Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
License:
krsnaman commited on
Commit
c629a07
1 Parent(s): bd3648a

Update IndicQuestionGeneration.py

Browse files
Files changed (1) hide show
  1. IndicQuestionGeneration.py +112 -112
IndicQuestionGeneration.py CHANGED
@@ -1,112 +1,112 @@
1
- import json
2
- import os
3
-
4
- import datasets
5
-
6
- _CITATION = """\
7
- @inproceedings{Kumar2022IndicNLGSM,
8
- title={IndicNLG Suite: Multilingual Datasets for Diverse NLG Tasks in Indic Languages},
9
- author={Aman Kumar and Himani Shrotriya and Prachi Sahu and Raj Dabre and Ratish Puduppully and Anoop Kunchukuttan and Amogh Mishra and Mitesh M. Khapra and Pratyush Kumar},
10
- year={2022},
11
- url = "https://arxiv.org/abs/2203.05437"
12
- }
13
- """
14
-
15
- _DESCRIPTION = """\
16
- This is the Question Generation dataset released as part of IndicNLG Suite. Each
17
- example has five fields: id, squad_id, answer, context and question. We create this dataset in eleven
18
- languages including as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. This is a translated data. The examples in each language are exactly similar but in different languages.
19
- The number of examples in each language is 98,027.
20
- """
21
- _HOMEPAGE = "https://indicnlp.ai4bharat.org/indicnlg-suite"
22
-
23
- _LICENSE = "Creative Commons Attribution-NonCommercial 4.0 International Public License"
24
-
25
- _URL = "https://huggingface.co/datasets/ai4bharat/IndicQuestionGeneration/resolve/main/data/{}_IndicQuestionGeneration_v{}.tar.bz2"
26
-
27
-
28
- _LANGUAGES = [
29
- "as",
30
- "bn",
31
- "gu",
32
- "hi",
33
- "kn",
34
- "ml",
35
- "mr",
36
- "or",
37
- "pa",
38
- "ta",
39
- "te"
40
- ]
41
-
42
-
43
- class WikiBio(datasets.GeneratorBasedBuilder):
44
- VERSION = datasets.Version("1.0.0")
45
-
46
- BUILDER_CONFIGS = [
47
- datasets.BuilderConfig(
48
- name="{}".format(lang),
49
- version=datasets.Version("1.0.0")
50
- )
51
- for lang in _LANGUAGES
52
- ]
53
-
54
- def _info(self):
55
- return datasets.DatasetInfo(
56
- description=_DESCRIPTION,
57
- features=datasets.Features(
58
- {
59
- "id": datasets.Value("string"),
60
- "squad_id": datasets.Value("string"),
61
- "answer": datasets.Value("string"),
62
- "context": datasets.Value("string"),
63
- "question": datasets.Value("string")
64
- }
65
- ),
66
- supervised_keys=None,
67
- homepage=_HOMEPAGE,
68
- citation=_CITATION,
69
- license=_LICENSE,
70
- version=self.VERSION,
71
- )
72
-
73
- def _split_generators(self, dl_manager):
74
- """Returns SplitGenerators."""
75
- lang = str(self.config.name)
76
- url = _URL.format(lang, self.VERSION.version_str[:-2])
77
-
78
- data_dir = dl_manager.download_and_extract(url)
79
- return [
80
- datasets.SplitGenerator(
81
- name=datasets.Split.TRAIN,
82
- gen_kwargs={
83
- "filepath": os.path.join(data_dir, lang + "_train" + ".jsonl"),
84
- },
85
- ),
86
- datasets.SplitGenerator(
87
- name=datasets.Split.TEST,
88
- gen_kwargs={
89
- "filepath": os.path.join(data_dir, lang + "_test" + ".jsonl"),
90
- },
91
- ),
92
- datasets.SplitGenerator(
93
- name=datasets.Split.VALIDATION,
94
- gen_kwargs={
95
- "filepath": os.path.join(data_dir, lang + "_val" + ".jsonl"),
96
- },
97
- ),
98
- ]
99
-
100
- def _generate_examples(self, filepath):
101
- """Yields examples as (key, example) tuples."""
102
- with open(filepath, encoding="utf-8") as f:
103
- for idx_, row in enumerate(f):
104
- data = json.loads(row)
105
- yield idx_, {
106
- "id": data["id"],
107
- "squad_id": data["squad_id"],
108
- "answer": data["answer"],
109
- "context": data["context"],
110
- "question": data["question"]
111
-
112
- }
 
1
+ import json
2
+ import os
3
+
4
+ import datasets
5
+
6
+ _CITATION = """\
7
+ @inproceedings{Kumar2022IndicNLGSM,
8
+ title={IndicNLG Suite: Multilingual Datasets for Diverse NLG Tasks in Indic Languages},
9
+ author={Aman Kumar and Himani Shrotriya and Prachi Sahu and Raj Dabre and Ratish Puduppully and Anoop Kunchukuttan and Amogh Mishra and Mitesh M. Khapra and Pratyush Kumar},
10
+ year={2022},
11
+ url = "https://arxiv.org/abs/2203.05437"
12
+ }
13
+ """
14
+
15
+ _DESCRIPTION = """\
16
+ This is the Question Generation dataset released as part of IndicNLG Suite. Each
17
+ example has five fields: id, squad_id, answer, context and question. We create this dataset in eleven
18
+ languages including as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. This is a translated data. The examples in each language are exactly similar but in different languages.
19
+ The number of examples in each language is 98,027.
20
+ """
21
+ _HOMEPAGE = "https://indicnlp.ai4bharat.org/indicnlg-suite"
22
+
23
+ _LICENSE = "Creative Commons Attribution-NonCommercial 4.0 International Public License"
24
+
25
+ _URL = "https://huggingface.co/datasets/ai4bharat/IndicQuestionGeneration/resolve/main/data/{}_QuestionGeneration_v{}.zip"
26
+
27
+
28
+ _LANGUAGES = [
29
+ "as",
30
+ "bn",
31
+ "gu",
32
+ "hi",
33
+ "kn",
34
+ "ml",
35
+ "mr",
36
+ "or",
37
+ "pa",
38
+ "ta",
39
+ "te"
40
+ ]
41
+
42
+
43
+ class QuestionGeneration(datasets.GeneratorBasedBuilder):
44
+ VERSION = datasets.Version("1.0.0")
45
+
46
+ BUILDER_CONFIGS = [
47
+ datasets.BuilderConfig(
48
+ name="{}".format(lang),
49
+ version=datasets.Version("1.0.0")
50
+ )
51
+ for lang in _LANGUAGES
52
+ ]
53
+
54
+ def _info(self):
55
+ return datasets.DatasetInfo(
56
+ description=_DESCRIPTION,
57
+ features=datasets.Features(
58
+ {
59
+ "id": datasets.Value("string"),
60
+ "squad_id": datasets.Value("string"),
61
+ "answer": datasets.Value("string"),
62
+ "context": datasets.Value("string"),
63
+ "question": datasets.Value("string")
64
+ }
65
+ ),
66
+ supervised_keys=None,
67
+ homepage=_HOMEPAGE,
68
+ citation=_CITATION,
69
+ license=_LICENSE,
70
+ version=self.VERSION,
71
+ )
72
+
73
+ def _split_generators(self, dl_manager):
74
+ """Returns SplitGenerators."""
75
+ lang = str(self.config.name)
76
+ url = _URL.format(lang, self.VERSION.version_str[:-2])
77
+
78
+ data_dir = dl_manager.download_and_extract(url)
79
+ return [
80
+ datasets.SplitGenerator(
81
+ name=datasets.Split.TRAIN,
82
+ gen_kwargs={
83
+ "filepath": os.path.join(data_dir, lang + "_train" + ".jsonl"),
84
+ },
85
+ ),
86
+ datasets.SplitGenerator(
87
+ name=datasets.Split.TEST,
88
+ gen_kwargs={
89
+ "filepath": os.path.join(data_dir, lang + "_test" + ".jsonl"),
90
+ },
91
+ ),
92
+ datasets.SplitGenerator(
93
+ name=datasets.Split.VALIDATION,
94
+ gen_kwargs={
95
+ "filepath": os.path.join(data_dir, lang + "_val" + ".jsonl"),
96
+ },
97
+ ),
98
+ ]
99
+
100
+ def _generate_examples(self, filepath):
101
+ """Yields examples as (key, example) tuples."""
102
+ with open(filepath, encoding="utf-8") as f:
103
+ for idx_, row in enumerate(f):
104
+ data = json.loads(row)
105
+ yield idx_, {
106
+ "id": data["id"],
107
+ "squad_id": data["squad_id"],
108
+ "answer": data["answer"],
109
+ "context": data["context"],
110
+ "question": data["question"]
111
+
112
+ }