David Wadden
commited on
Commit
•
96b5bb6
1
Parent(s):
8bf74e5
COVID-Fact entailment.
Browse files- README.md +13 -19
- covidfact_entailment.py +26 -22
README.md
CHANGED
@@ -1,15 +1,14 @@
|
|
1 |
---
|
2 |
annotations_creators:
|
3 |
- expert-generated
|
4 |
-
language:
|
5 |
-
- en
|
6 |
language_creators:
|
7 |
- found
|
|
|
|
|
8 |
license:
|
9 |
- cc-by-nc-2.0
|
10 |
multilinguality:
|
11 |
- monolingual
|
12 |
-
pretty_name: SciFact
|
13 |
size_categories:
|
14 |
- 1K<n<10K
|
15 |
source_datasets:
|
@@ -18,7 +17,7 @@ task_categories:
|
|
18 |
- text-classification
|
19 |
task_ids:
|
20 |
- fact-checking
|
21 |
-
|
22 |
dataset_info:
|
23 |
features:
|
24 |
- name: claim_id
|
@@ -37,17 +36,17 @@ dataset_info:
|
|
37 |
sequence: int32
|
38 |
splits:
|
39 |
- name: train
|
40 |
-
num_bytes:
|
41 |
-
num_examples:
|
42 |
-
- name:
|
43 |
-
num_bytes:
|
44 |
-
num_examples:
|
45 |
-
download_size:
|
46 |
-
dataset_size:
|
47 |
---
|
48 |
|
49 |
|
50 |
-
# Dataset Card for "
|
51 |
|
52 |
## Table of Contents
|
53 |
|
@@ -59,17 +58,12 @@ dataset_info:
|
|
59 |
|
60 |
## Dataset Description
|
61 |
|
62 |
-
- **
|
63 |
-
- **Repository:** <https://github.com/allenai/scifact>
|
64 |
-
- **Paper:** [Fact or Fiction: Verifying Scientific Claims](https://aclanthology.org/2020.emnlp-main.609/)
|
65 |
- **Point of Contact:** [David Wadden](mailto:[email protected])
|
66 |
|
67 |
### Dataset Summary
|
68 |
|
69 |
-
|
70 |
-
|
71 |
-
For more information on the dataset, see [allenai/scifact](https://huggingface.co/datasets/allenai/scifact).
|
72 |
-
This has the same data, but reformatted as an entailment task. A single instance includes a claim paired with a paper title and abstract, together with an entailment label and a list of evidence sentences (if any).
|
73 |
|
74 |
## Dataset Structure
|
75 |
|
|
|
1 |
---
|
2 |
annotations_creators:
|
3 |
- expert-generated
|
|
|
|
|
4 |
language_creators:
|
5 |
- found
|
6 |
+
language:
|
7 |
+
- en
|
8 |
license:
|
9 |
- cc-by-nc-2.0
|
10 |
multilinguality:
|
11 |
- monolingual
|
|
|
12 |
size_categories:
|
13 |
- 1K<n<10K
|
14 |
source_datasets:
|
|
|
17 |
- text-classification
|
18 |
task_ids:
|
19 |
- fact-checking
|
20 |
+
pretty_name: CovidFact
|
21 |
dataset_info:
|
22 |
features:
|
23 |
- name: claim_id
|
|
|
36 |
sequence: int32
|
37 |
splits:
|
38 |
- name: train
|
39 |
+
num_bytes: 1547185
|
40 |
+
num_examples: 940
|
41 |
+
- name: test
|
42 |
+
num_bytes: 523542
|
43 |
+
num_examples: 317
|
44 |
+
download_size: 3610222
|
45 |
+
dataset_size: 2070727
|
46 |
---
|
47 |
|
48 |
|
49 |
+
# Dataset Card for "covidfact_entailment"
|
50 |
|
51 |
## Table of Contents
|
52 |
|
|
|
58 |
|
59 |
## Dataset Description
|
60 |
|
61 |
+
- **Repository:** <https://github.com/asaakyan/covidfact>
|
|
|
|
|
62 |
- **Point of Contact:** [David Wadden](mailto:[email protected])
|
63 |
|
64 |
### Dataset Summary
|
65 |
|
66 |
+
COVID-FACT is a dataset of claims about COVID-19. For this version of the dataset, we follow the preprocessing from the MultiVerS modeling paper https://github.com/dwadden/multivers, verifying claims against abstracts of scientific research articles. Entailment labels and rationales are included.
|
|
|
|
|
|
|
67 |
|
68 |
## Dataset Structure
|
69 |
|
covidfact_entailment.py
CHANGED
@@ -7,27 +7,30 @@ import json
|
|
7 |
|
8 |
|
9 |
_CITATION = """\
|
10 |
-
@
|
11 |
-
title={Fact
|
12 |
-
author={
|
13 |
-
|
14 |
-
year={
|
|
|
|
|
15 |
}
|
16 |
"""
|
17 |
|
|
|
18 |
_DESCRIPTION = """\
|
19 |
-
|
20 |
"""
|
21 |
|
22 |
-
_URL = "https://scifact.s3
|
23 |
|
24 |
|
25 |
def flatten(xss):
|
26 |
return [x for xs in xss for x in xs]
|
27 |
|
28 |
|
29 |
-
class
|
30 |
-
"""
|
31 |
|
32 |
def __init__(self, **kwargs):
|
33 |
"""
|
@@ -35,19 +38,19 @@ class ScifactEntailmentConfig(datasets.BuilderConfig):
|
|
35 |
Args:
|
36 |
**kwargs: keyword arguments forwarded to super.
|
37 |
"""
|
38 |
-
super(
|
39 |
version=datasets.Version("1.0.0", ""), **kwargs
|
40 |
)
|
41 |
|
42 |
|
43 |
-
class
|
44 |
-
"""TODO(
|
45 |
|
46 |
-
# TODO(
|
47 |
VERSION = datasets.Version("0.1.0")
|
48 |
|
49 |
def _info(self):
|
50 |
-
# TODO(
|
51 |
|
52 |
features = {
|
53 |
"claim_id": datasets.Value("int32"),
|
@@ -92,13 +95,14 @@ class ScifactEntailment(datasets.GeneratorBasedBuilder):
|
|
92 |
# download and extract URLs
|
93 |
archive = dl_manager.download(_URL)
|
94 |
for path, f in dl_manager.iter_archive(archive):
|
95 |
-
|
|
|
96 |
corpus = self._read_tar_file(f)
|
97 |
corpus = {x["doc_id"]: x for x in corpus}
|
98 |
-
elif path == "data/claims_train.jsonl":
|
99 |
claims_train = self._read_tar_file(f)
|
100 |
-
elif path == "data/
|
101 |
-
|
102 |
|
103 |
return [
|
104 |
datasets.SplitGenerator(
|
@@ -111,12 +115,12 @@ class ScifactEntailment(datasets.GeneratorBasedBuilder):
|
|
111 |
},
|
112 |
),
|
113 |
datasets.SplitGenerator(
|
114 |
-
name=datasets.Split.
|
115 |
# These kwargs will be passed to _generate_examples
|
116 |
gen_kwargs={
|
117 |
-
"claims":
|
118 |
"corpus": corpus,
|
119 |
-
"split": "
|
120 |
},
|
121 |
),
|
122 |
]
|
@@ -127,7 +131,7 @@ class ScifactEntailment(datasets.GeneratorBasedBuilder):
|
|
127 |
id_ = -1 # Will increment to 0 on first iteration.
|
128 |
for claim in claims:
|
129 |
evidence = {int(k): v for k, v in claim["evidence"].items()}
|
130 |
-
for cited_doc_id in claim["
|
131 |
cited_doc = corpus[cited_doc_id]
|
132 |
abstract_sents = [sent.strip() for sent in cited_doc["abstract"]]
|
133 |
|
|
|
7 |
|
8 |
|
9 |
_CITATION = """\
|
10 |
+
@article{Saakyan2021COVIDFactFE,
|
11 |
+
title={COVID-Fact: Fact Extraction and Verification of Real-World Claims on COVID-19 Pandemic},
|
12 |
+
author={Arkadiy Saakyan and Tuhin Chakrabarty and Smaranda Muresan},
|
13 |
+
journal={ArXiv},
|
14 |
+
year={2021},
|
15 |
+
volume={abs/2106.03794},
|
16 |
+
url={https://api.semanticscholar.org/CorpusID:235364036}
|
17 |
}
|
18 |
"""
|
19 |
|
20 |
+
|
21 |
_DESCRIPTION = """\
|
22 |
+
COVID-FACT is a dataset of claims about COVID-19. For this version of the dataset, we follow the preprocessing from the MultiVerS modeling paper https://github.com/dwadden/multivers, verifying claims against abstracts of scientific research articles. Entailment labels and rationales are included.
|
23 |
"""
|
24 |
|
25 |
+
_URL = "https://scifact.s3.us-west-2.amazonaws.com/longchecker/latest/data.tar.gz"
|
26 |
|
27 |
|
28 |
def flatten(xss):
|
29 |
return [x for xs in xss for x in xs]
|
30 |
|
31 |
|
32 |
+
class CovidFactEntailmentConfig(datasets.BuilderConfig):
|
33 |
+
"""builderconfig for covidfact"""
|
34 |
|
35 |
def __init__(self, **kwargs):
|
36 |
"""
|
|
|
38 |
Args:
|
39 |
**kwargs: keyword arguments forwarded to super.
|
40 |
"""
|
41 |
+
super(CovidFactEntailmentConfig, self).__init__(
|
42 |
version=datasets.Version("1.0.0", ""), **kwargs
|
43 |
)
|
44 |
|
45 |
|
46 |
+
class CovidFactEntailment(datasets.GeneratorBasedBuilder):
|
47 |
+
"""TODO(covidfact): Short description of my dataset."""
|
48 |
|
49 |
+
# TODO(covidfact): Set up version.
|
50 |
VERSION = datasets.Version("0.1.0")
|
51 |
|
52 |
def _info(self):
|
53 |
+
# TODO(covidfact): Specifies the datasets.DatasetInfo object
|
54 |
|
55 |
features = {
|
56 |
"claim_id": datasets.Value("int32"),
|
|
|
95 |
# download and extract URLs
|
96 |
archive = dl_manager.download(_URL)
|
97 |
for path, f in dl_manager.iter_archive(archive):
|
98 |
+
# The claims are too similar to paper titles; don't include.
|
99 |
+
if path == "data/covidfact/corpus_without_titles.jsonl":
|
100 |
corpus = self._read_tar_file(f)
|
101 |
corpus = {x["doc_id"]: x for x in corpus}
|
102 |
+
elif path == "data/covidfact/claims_train.jsonl":
|
103 |
claims_train = self._read_tar_file(f)
|
104 |
+
elif path == "data/covidfact/claims_test.jsonl":
|
105 |
+
claims_test = self._read_tar_file(f)
|
106 |
|
107 |
return [
|
108 |
datasets.SplitGenerator(
|
|
|
115 |
},
|
116 |
),
|
117 |
datasets.SplitGenerator(
|
118 |
+
name=datasets.Split.TEST,
|
119 |
# These kwargs will be passed to _generate_examples
|
120 |
gen_kwargs={
|
121 |
+
"claims": claims_test,
|
122 |
"corpus": corpus,
|
123 |
+
"split": "test",
|
124 |
},
|
125 |
),
|
126 |
]
|
|
|
131 |
id_ = -1 # Will increment to 0 on first iteration.
|
132 |
for claim in claims:
|
133 |
evidence = {int(k): v for k, v in claim["evidence"].items()}
|
134 |
+
for cited_doc_id in claim["doc_ids"]:
|
135 |
cited_doc = corpus[cited_doc_id]
|
136 |
abstract_sents = [sent.strip() for sent in cited_doc["abstract"]]
|
137 |
|