Datasets:
lmqg
/

Languages:
Chinese
ArXiv:
License:
asahi417 commited on
Commit
eee0edd
1 Parent(s): 45ad4c0
.gitattributes CHANGED
@@ -53,3 +53,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ data/processed/test.jsonl filter=lfs diff=lfs merge=lfs -text
57
+ data/processed/train.jsonl filter=lfs diff=lfs merge=lfs -text
58
+ data/processed/validation.jsonl filter=lfs diff=lfs merge=lfs -text
data/processed/test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c97be8ad690988b11ae238bdbb26da763b28334ab6894185337037fa69b2c0d
3
+ size 38396960
data/processed/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8debcac1fbd7bc09cad41e51fa2a87b77cb69723460a70fbc167bb4431a82427
3
+ size 279350289
data/processed/validation.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04ba1bbd2cc7af963535d98dbbb86ed44f99748355f1243c3ff9b4c8fc7dc3d8
3
+ size 39458227
format_squad.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from random import shuffle, seed
4
+
5
+
6
+ def get_dict(filepath):
7
+ output = []
8
+ with open(filepath) as f:
9
+ tmp = json.load(f)["data"]
10
+ for t in tmp:
11
+ for x in t["paragraphs"]:
12
+ context = x["context"]
13
+ for qa in x["qas"]:
14
+ if qa["is_impossible"]:
15
+ continue
16
+ answers = qa["answers"]
17
+ if len(answers) == 0:
18
+ continue
19
+ answer = answers[0]["text"]
20
+ if answer not in context:
21
+ continue
22
+ output.append({
23
+ "answer": answer,
24
+ "context": context,
25
+ "question": qa["question"]
26
+ })
27
+ return output
28
+
29
+
30
+
31
+ train = get_dict("dataset/train-zen-v1.0.json")
32
+ dev = get_dict("dataset/dev-zen-v1.0.json")
33
+ seed(42)
34
+ shuffle(train)
35
+ test = train[:len(dev)]
36
+ train = train[len(dev):]
37
+ with open("data/raw.train.jsonl", "w") as f:
38
+ f.write("\n".join([json.dumps(x) for x in train]))
39
+ with open("data/raw.valid.jsonl", "w") as f:
40
+ f.write("\n".join([json.dumps(x) for x in dev]))
41
+ with open("data/raw.test.jsonl", "w") as f:
42
+ f.write("\n".join([json.dumps(x) for x in test]))
process.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Script to process raw SQuAD file for Question Generation format
2
+ You need to run `python -m spacy download zh_core_web_sm`.
3
+ Split when uploading to dataset hub by
4
+ ```
5
+ gsplit -l 3300 -d --additional-suffix=.jsonl train.jsonl train
6
+ gsplit -l 3300 -d --additional-suffix=.jsonl test.jsonl test
7
+ gsplit -l 3300 -d --additional-suffix=.jsonl dev.jsonl dev
8
+ ```
9
+ """
10
+ import json
11
+ import os
12
+ import re
13
+ from glob import glob
14
+ from tqdm import tqdm
15
+ from typing import List, Dict
16
+
17
+ import spacy
18
+
19
+ SPLITTER = spacy.load('zh_core_web_sm')
20
+ HIGHLIGHT_TOKEN = '<hl>'
21
+
22
+
23
+ def get_sentence(document: str):
24
+ return [str(s) for s in SPLITTER(document).sents]
25
+
26
+
27
+ def jsonline_reader(filename: str):
28
+ with open(filename, 'r') as f_reader:
29
+ examples = [json.loads(i) for i in f_reader.read().split('\n') if len(i) > 0]
30
+ return examples
31
+
32
+
33
+ def process_single_data(data: Dict):
34
+ """ Convert single raw json data into QG format """
35
+ example = {'question': data["question"], 'paragraph': data["context"], 'answer': data["answer"]}
36
+
37
+ # get sentence
38
+ position = example['paragraph'].find(example['answer'])
39
+ assert position != -1
40
+ before_tmp = get_sentence(example['paragraph'][:position])
41
+ if len(before_tmp) == 0:
42
+ before = ''
43
+ before_sentence = ''
44
+ else:
45
+ if before_tmp[-1].endswith('.'):
46
+ before = ' '.join(before_tmp)
47
+ before_sentence = ''
48
+ else:
49
+ before = ' '.join(before_tmp[:-1])
50
+ before_sentence = before_tmp[-1]
51
+ before_sentence = before_sentence if before_sentence.endswith(' ') else '{} '.format(before_sentence)
52
+ after_tmp = get_sentence(example['paragraph'][position + len(example['answer']):])
53
+ if len(after_tmp) == 0:
54
+ after = ''
55
+ after_sentence = ''
56
+ else:
57
+ after = ' '.join(after_tmp[1:])
58
+ after_sentence = after_tmp[0]
59
+ after_sentence = after_sentence if after_sentence.startswith(' ') else ' {}'.format(after_sentence)
60
+ example['sentence'] = '{}{}{}'.format(before_sentence, example['answer'], after_sentence)
61
+
62
+ # get paragraph_sentence
63
+ before = '' if before == '' else '{} '.format(before)
64
+ after = '' if after == '' else ' {}'.format(after)
65
+ source_text = '{0}{1} {2} {1}{3}'.format(before, HIGHLIGHT_TOKEN, example['sentence'], after)
66
+ example['paragraph_sentence'] = re.sub(r'\s+', ' ', source_text)
67
+
68
+ # get paragraph_answer
69
+ source_text = '{0}{1} {2} {1}{3}'.format(
70
+ example['paragraph'][:position], HIGHLIGHT_TOKEN, example['answer'],
71
+ example['paragraph'][position + len(example['answer']):])
72
+ example['paragraph_answer'] = re.sub(r'\s+', ' ', source_text)
73
+
74
+ # get sentence_answer
75
+ if len(before_tmp) == 0 or before_tmp[-1].endswith('.'):
76
+ before = ''
77
+ else:
78
+ before = before_tmp[-1] if before_tmp[-1].endswith(' ') else '{} '.format(before_tmp[-1])
79
+ if len(after_tmp) == 0:
80
+ after = ''
81
+ else:
82
+ after = after_tmp[0] if after_tmp[0].startswith(' ') else ' {}'.format(after_tmp[0])
83
+ source_text = '{0}{1} {2} {1}{3}'.format(before, HIGHLIGHT_TOKEN, example['answer'], after)
84
+ example['sentence_answer'] = re.sub(r'\s+', ' ', source_text)
85
+
86
+ return example
87
+
88
+
89
+ if __name__ == '__main__':
90
+ output = './data/processed'
91
+ os.makedirs(output, exist_ok=True)
92
+ path = {'train': 'data/raw.train.jsonl', 'validation': 'data/raw.valid.jsonl', 'test': 'data/raw.test.jsonl'}
93
+ for k, v in path.items():
94
+ json_data = []
95
+ for _file in sorted(glob(v)):
96
+ json_data += jsonline_reader(_file)
97
+ with open('{}/{}.jsonl'.format(output, k), 'w') as f:
98
+ for single_data in tqdm(json_data):
99
+ single_data = process_single_data(single_data)
100
+ f.write(json.dumps(single_data) + '\n')
qg_zhquad.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ python -c "from datasets import load_dataset;load_dataset('.')" """
2
+ import json
3
+ import datasets
4
+
5
+ logger = datasets.logging.get_logger(__name__)
6
+ _VERSION = "0.0.0"
7
+ _NAME = "qg_zhquad"
8
+ _CITATION = """
9
+ @inproceedings{ushio-etal-2022-generative,
10
+ title = "{G}enerative {L}anguage {M}odels for {P}aragraph-{L}evel {Q}uestion {G}eneration",
11
+ author = "Ushio, Asahi and
12
+ Alva-Manchego, Fernando and
13
+ Camacho-Collados, Jose",
14
+ booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing",
15
+ month = dec,
16
+ year = "2022",
17
+ address = "Abu Dhabi, U.A.E.",
18
+ publisher = "Association for Computational Linguistics",
19
+ }
20
+ """
21
+ _DESCRIPTION = """[Chinese SQuAD](https://github.com/junzeng-pluto/ChineseSquad) dataset for question generation (QG) task."""
22
+ _URL = 'https://huggingface.co/datasets/lmqg/qg_zhquad/resolve/main/data/processed'
23
+ _URLS = {
24
+ str(datasets.Split.TEST): [f'{_URL}/test.jsonl'],
25
+ str(datasets.Split.TRAIN): [f'{_URL}/train.jsonl'],
26
+ str(datasets.Split.VALIDATION): [f'{_URL}/validation.jsonl'],
27
+ }
28
+
29
+
30
+ class QGZHQuADConfig(datasets.BuilderConfig):
31
+ """BuilderConfig for SquadQG"""
32
+
33
+ def __init__(self, **kwargs):
34
+ """BuilderConfig for SquadQG.
35
+ Args:
36
+ **kwargs: keyword arguments forwarded to super.
37
+ """
38
+ super(QGZHQuADConfig, self).__init__(**kwargs)
39
+
40
+
41
+ class QGZHQuAD(datasets.GeneratorBasedBuilder):
42
+
43
+ BUILDER_CONFIGS = [
44
+ QGZHQuADConfig(name=_NAME, version=datasets.Version(_VERSION), description=_DESCRIPTION),
45
+ ]
46
+
47
+ def _info(self):
48
+ return datasets.DatasetInfo(
49
+ description=_DESCRIPTION,
50
+ features=datasets.Features(
51
+ {
52
+ "answer": datasets.Value("string"),
53
+ "paragraph_question": datasets.Value("string"),
54
+ "question": datasets.Value("string"),
55
+ "sentence": datasets.Value("string"),
56
+ "paragraph": datasets.Value("string"),
57
+ "sentence_answer": datasets.Value("string"),
58
+ "paragraph_answer": datasets.Value("string"),
59
+ "paragraph_sentence": datasets.Value("string"),
60
+ "paragraph_id": datasets.Value("string")
61
+ }
62
+ ),
63
+ supervised_keys=None,
64
+ homepage="https://github.com/asahi417/lm-question-generation"
65
+ )
66
+
67
+ def _split_generators(self, dl_manager):
68
+ downloaded_file = dl_manager.download_and_extract(_URLS)
69
+ return [datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[str(i)]})
70
+ for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]]
71
+
72
+ def _generate_examples(self, filepaths):
73
+ _key = 0
74
+ for filepath in filepaths:
75
+ logger.info("generating examples from = %s", filepath)
76
+ with open(filepath, encoding="utf-8") as f:
77
+ _list = f.read().split('\n')
78
+ if _list[-1] == '':
79
+ _list = _list[:-1]
80
+ for i in _list:
81
+ data = json.loads(i)
82
+ yield _key, data
83
+ _key += 1