shibing624 commited on
Commit
d5e00e4
1 Parent(s): c8897a2
Files changed (1) hide show
  1. nli_zh.py +107 -26
nli_zh.py CHANGED
@@ -1,65 +1,146 @@
1
- """Natural Language Inference (NLI) Chinese Corpus.(nli_zh)"""
 
 
 
 
2
 
 
3
 
4
- import csv
5
  import os
6
 
7
  import datasets
8
 
 
9
 
10
- _DESCRIPTION = """\
11
- 常见中文语义匹配数据集,包含ATEC、BQ、LCQMC、PAWSX、STS-B共5个任务。
12
- """
 
 
 
 
 
 
 
 
 
 
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
- class Nli_zh(datasets.GeneratorBasedBuilder):
 
16
  """The Natural Language Inference Chinese(NLI_zh) Corpus."""
17
 
18
  BUILDER_CONFIGS = [
19
- datasets.BuilderConfig(
20
  name="ATEC",
21
- version=datasets.Version("1.0.0", ""),
22
- description="Plain text import of NLI_zh",
23
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  ]
25
 
26
  def _info(self):
27
  return datasets.DatasetInfo(
28
- description=_DESCRIPTION,
29
  features=datasets.Features(
30
  {
31
  "sentence1": datasets.Value("string"),
32
  "sentence2": datasets.Value("string"),
33
  "label": datasets.Value("int32"),
 
34
  }
35
  ),
36
- supervised_keys=None,
37
- homepage="https://github.com/shibing624/text2vec",
38
  )
39
 
40
  def _split_generators(self, dl_manager):
41
- dl_dir = dl_manager.download_and_extract(_DATA_URL)
42
- data_dir = os.path.join(dl_dir, "nli_zh")
43
  return [
44
  datasets.SplitGenerator(
45
- name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(data_dir, "snli_1.0_test.txt")}
 
 
 
 
46
  ),
47
  datasets.SplitGenerator(
48
- name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(data_dir, "snli_1.0_dev.txt")}
 
 
 
 
49
  ),
50
  datasets.SplitGenerator(
51
- name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(data_dir, "snli_1.0_train.txt")}
 
 
 
 
52
  ),
53
  ]
54
 
55
  def _generate_examples(self, filepath):
56
  """This function returns the examples in the raw (text) form."""
57
- with open(filepath, encoding="utf-8") as f:
58
- reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
59
- for idx, row in enumerate(reader):
60
- label = -1 if row["gold_label"] == "-" else row["gold_label"]
61
  yield idx, {
62
- "premise": row["sentence1"],
63
- "hypothesis": row["sentence2"],
64
- "label": label,
65
- }
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ @author:XuMing([email protected])
4
+ @description:
5
+ """
6
 
7
+ """Natural Language Inference (NLI) Chinese Corpus.(nli_zh)"""
8
 
 
9
  import os
10
 
11
  import datasets
12
 
13
+ _DESCRIPTION = """纯文本数据,格式:(sentence1, sentence2, label)。常见中文语义匹配数据集,包含ATEC、BQ、LCQMC、PAWSX、STS-B共5个任务。"""
14
 
15
+ ATEC_HOME = "https://github.com/IceFlameWorm/NLP_Datasets/tree/master/ATEC"
16
+ BQ_HOME = "http://icrc.hitsz.edu.cn/info/1037/1162.htm"
17
+ LCQMC_HOME = "http://icrc.hitsz.edu.cn/Article/show/171.html"
18
+ PAWSX_HOME = "https://arxiv.org/abs/1908.11828"
19
+ STSB_HOME = "https://github.com/pluto-junzeng/CNSD"
20
+
21
+ _CITATION = "https://github.com/shibing624/text2vec"
22
+
23
+ _DATA_URL = "https://github.com/shibing624/text2vec/releases/download/1.1.2/senteval_cn.zip"
24
+
25
+
26
+ class NliZhConfig(datasets.BuilderConfig):
27
+ """BuilderConfig for NLI_zh"""
28
 
29
+ def __init__(self, features, data_url, citation, url, label_classes=(0, 1), **kwargs):
30
+ """BuilderConfig for NLI_zh
31
+ Args:
32
+ features: `list[string]`, list of the features that will appear in the
33
+ feature dict. Should not include "label".
34
+ data_url: `string`, url to download the zip file from.
35
+ citation: `string`, citation for the data set.
36
+ url: `string`, url for information about the data set.
37
+ label_classes: `list[int]`, sim is 1, else 0.
38
+ **kwargs: keyword arguments forwarded to super.
39
+ """
40
+ super().__init__(version=datasets.Version("1.0.0"), **kwargs)
41
+ self.features = features
42
+ self.label_classes = label_classes
43
+ self.data_url = data_url
44
+ self.citation = citation
45
+ self.url = url
46
 
47
+
48
+ class NliZh(datasets.GeneratorBasedBuilder):
49
  """The Natural Language Inference Chinese(NLI_zh) Corpus."""
50
 
51
  BUILDER_CONFIGS = [
52
+ NliZhConfig(
53
  name="ATEC",
54
+ description=_DESCRIPTION,
55
+ features=["sentence1", "sentence1"],
56
+ data_url=_DATA_URL,
57
+ citation=_CITATION,
58
+ url=ATEC_HOME,
59
+ ),
60
+ NliZhConfig(
61
+ name="BQ",
62
+ description=_DESCRIPTION,
63
+ features=["sentence1", "sentence1"],
64
+ data_url=_DATA_URL,
65
+ citation=_CITATION,
66
+ url=BQ_HOME,
67
+ ),
68
+ NliZhConfig(
69
+ name="LCQMC",
70
+ description=_DESCRIPTION,
71
+ features=["sentence1", "sentence1"],
72
+ data_url=_DATA_URL,
73
+ citation=_CITATION,
74
+ url=LCQMC_HOME,
75
+ ),
76
+ NliZhConfig(
77
+ name="PAWSX",
78
+ description=_DESCRIPTION,
79
+ features=["sentence1", "sentence1"],
80
+ data_url=_DATA_URL,
81
+ citation=_CITATION,
82
+ url=PAWSX_HOME,
83
+ ),
84
+ NliZhConfig(
85
+ name="STS-B",
86
+ description=_DESCRIPTION,
87
+ features=["sentence1", "sentence1"],
88
+ data_url=_DATA_URL,
89
+ citation=_CITATION,
90
+ url=STSB_HOME,
91
+ ),
92
  ]
93
 
94
  def _info(self):
95
  return datasets.DatasetInfo(
96
+ description=self.config.description,
97
  features=datasets.Features(
98
  {
99
  "sentence1": datasets.Value("string"),
100
  "sentence2": datasets.Value("string"),
101
  "label": datasets.Value("int32"),
102
+ # "idx": datasets.Value("int32"),
103
  }
104
  ),
105
+ homepage=self.config.url,
106
+ citation=self.config.citation,
107
  )
108
 
109
  def _split_generators(self, dl_manager):
110
+ dl_dir = dl_manager.download_and_extract(self.config.data_url) or ""
111
+ dl_dir = os.path.join(dl_dir, f"senteval_cn/{self.config.name}")
112
  return [
113
  datasets.SplitGenerator(
114
+ name=datasets.Split.TRAIN,
115
+ gen_kwargs={
116
+ "data_file": os.path.join(dl_dir, f"{self.config.name}.train.data"),
117
+ "split": datasets.Split.TRAIN,
118
+ },
119
  ),
120
  datasets.SplitGenerator(
121
+ name=datasets.Split.VALIDATION,
122
+ gen_kwargs={
123
+ "data_file": os.path.join(dl_dir, f"{self.config.name}.valid.data"),
124
+ "split": datasets.Split.VALIDATION,
125
+ },
126
  ),
127
  datasets.SplitGenerator(
128
+ name=datasets.Split.TEST,
129
+ gen_kwargs={
130
+ "data_file": os.path.join(dl_dir, f"{self.config.name}.test.data"),
131
+ "split": datasets.Split.TEST,
132
+ },
133
  ),
134
  ]
135
 
136
  def _generate_examples(self, filepath):
137
  """This function returns the examples in the raw (text) form."""
138
+ with open(filepath, 'r', encoding="utf-8") as f:
139
+ for idx, row in enumerate(f):
140
+ # print(row)
141
+ terms = row.split('\t')
142
  yield idx, {
143
+ "sentence1": terms[0],
144
+ "sentence2": terms[1],
145
+ "label": int(terms[2]),
146
+ }