Sampson2022 commited on
Commit
1964298
1 Parent(s): dcd7e33

Update demo2.py

Browse files
Files changed (1) hide show
  1. demo2.py +9 -4
demo2.py CHANGED
@@ -25,7 +25,8 @@ from datasets import TextClassification
25
  logger = datasets.logging.get_logger(__name__)
26
 
27
 
28
- _CITATION = """\n@article{2016arXiv160605250R,
 
29
  author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},
30
  Konstantin and {Liang}, Percy},
31
  title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}",
@@ -38,10 +39,14 @@ archivePrefix = {arXiv},
38
  }
39
  """
40
 
41
- _DESCRIPTION = """\nStanford Question Answering Dataset (SQuAD) is a reading comprehension \ndataset, consisting of questions posed by crowdworkers on a set of Wikipedia \narticles, where the answer to every question is a segment of text, or span, \nfrom the corresponding reading passage, or the question might be unanswerable.
 
 
 
 
42
  """
43
 
44
- train_url = "https://raw.githubusercontent.com/Sampson2016/test/master/train.csv?token=GHSAT0AAAAAABR4XKTHOQNDJLYATOR3QTUKYVQLKGQ"
45
  test_url = "https://raw.githubusercontent.com/Sampson2016/test/master/test.csv?token=GHSAT0AAAAAABR4XKTHTE445FP64IQZAHE4YVQLKUQ"
46
 
47
  _URLS = {
@@ -91,7 +96,7 @@ class Demo2(datasets.GeneratorBasedBuilder):
91
 
92
  return [
93
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
94
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["test"]}),
95
  ]
96
 
97
  def _generate_examples(self, filepath):
 
25
  logger = datasets.logging.get_logger(__name__)
26
 
27
 
28
+ _CITATION = """
29
+ @article{2016arXiv160605250R,
30
  author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},
31
  Konstantin and {Liang}, Percy},
32
  title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}",
 
39
  }
40
  """
41
 
42
+ _DESCRIPTION = """
43
+ Stanford Question Answering Dataset (SQuAD) is a reading comprehension
44
+ dataset, consisting of questions posed by crowdworkers on a set of Wikipedia
45
+ articles, where the answer to every question is a segment of text, or span,
46
+ from the corresponding reading passage, or the question might be unanswerable.
47
  """
48
 
49
+ train_url = "https://raw.githubusercontent.com/Sampson2016/test/master/train.csv?token=GHSAT0AAAAAABR4XKTH73T5VNFVZ3KS33FYYVQLQAA"
50
  test_url = "https://raw.githubusercontent.com/Sampson2016/test/master/test.csv?token=GHSAT0AAAAAABR4XKTHTE445FP64IQZAHE4YVQLKUQ"
51
 
52
  _URLS = {
 
96
 
97
  return [
98
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
99
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
100
  ]
101
 
102
  def _generate_examples(self, filepath):