Commit
•
58fdf71
1
Parent(s):
013210d
Delete Python files
Browse files- Fin-Fact.py +0 -79
- anli.py +0 -90
- bart_eval.py +0 -95
- bart_explaination_gen.py +0 -61
- bert_gen.py +0 -83
- data_crawler.py +0 -76
- data_generator.py +0 -171
- gpt2_nli.py +0 -70
- pegasus_gen.py +0 -70
- xl_sum_gen.py +0 -80
Fin-Fact.py
DELETED
@@ -1,79 +0,0 @@
|
|
1 |
-
"""Fin-Fact dataset."""
|
2 |
-
|
3 |
-
import json
|
4 |
-
import datasets
|
5 |
-
|
6 |
-
_CITATION = """\
|
7 |
-
@misc{rangapur2023finfact,
|
8 |
-
title={Fin-Fact: A Benchmark Dataset for Multimodal Financial Fact Checking and Explanation Generation},
|
9 |
-
author={Aman Rangapur and Haoran Wang and Kai Shu},
|
10 |
-
year={2023},
|
11 |
-
eprint={2309.08793},
|
12 |
-
archivePrefix={arXiv},
|
13 |
-
primaryClass={cs.AI}
|
14 |
-
}
|
15 |
-
"""
|
16 |
-
|
17 |
-
_DESCRIPTION = """\
|
18 |
-
Fin-Fact is a comprehensive dataset designed specifically for financial fact-checking and explanation generation.
|
19 |
-
The dataset consists of 3121 claims spanning multiple financial sectors.
|
20 |
-
"""
|
21 |
-
|
22 |
-
_HOMEPAGE = "https://github.com/IIT-DM/Fin-Fact"
|
23 |
-
_LICENSE = "Apache 2.0"
|
24 |
-
_URL = "https://huggingface.co/datasets/amanrangapur/Fin-Fact/resolve/main/finfact.json"
|
25 |
-
|
26 |
-
class FinFact(datasets.GeneratorBasedBuilder):
|
27 |
-
"""Fin-Fact dataset for financial fact-checking and text generation."""
|
28 |
-
|
29 |
-
VERSION = datasets.Version("1.0.0")
|
30 |
-
BUILDER_CONFIGS = [
|
31 |
-
datasets.BuilderConfig(
|
32 |
-
name="generation",
|
33 |
-
version=VERSION,
|
34 |
-
description="The Fin-Fact dataset for financial fact-checking and text generation",
|
35 |
-
),
|
36 |
-
]
|
37 |
-
|
38 |
-
DEFAULT_CONFIG_NAME = "generation"
|
39 |
-
|
40 |
-
def _info(self):
|
41 |
-
return datasets.DatasetInfo(
|
42 |
-
description=_DESCRIPTION,
|
43 |
-
features=datasets.Features(
|
44 |
-
{
|
45 |
-
"url": datasets.Value("string"),
|
46 |
-
"claim": datasets.Value("string"),
|
47 |
-
"author": datasets.Value("string"),
|
48 |
-
"posted": datasets.Value("string"),
|
49 |
-
"label": datasets.Value("string"),
|
50 |
-
}
|
51 |
-
),
|
52 |
-
supervised_keys=None,
|
53 |
-
homepage=_HOMEPAGE,
|
54 |
-
license=_LICENSE,
|
55 |
-
citation=_CITATION,
|
56 |
-
)
|
57 |
-
|
58 |
-
def _split_generators(self, dl_manager):
|
59 |
-
downloaded_file = dl_manager.download(_URL)
|
60 |
-
return [
|
61 |
-
datasets.SplitGenerator(
|
62 |
-
name=datasets.Split.TRAIN,
|
63 |
-
gen_kwargs={
|
64 |
-
"filepath": downloaded_file,
|
65 |
-
},
|
66 |
-
),
|
67 |
-
]
|
68 |
-
|
69 |
-
def _generate_examples(self, filepath):
|
70 |
-
with open(filepath, encoding="utf-8") as f:
|
71 |
-
data = json.load(f)
|
72 |
-
for id_, row in enumerate(data):
|
73 |
-
yield id_, {
|
74 |
-
"url": row.get("url", ""),
|
75 |
-
"claim": row.get("claim", ""),
|
76 |
-
"author": row.get("author", ""),
|
77 |
-
"posted": row.get("posted", ""),
|
78 |
-
"label": row.get("label", ""),
|
79 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
anli.py
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
2 |
-
import torch
|
3 |
-
import argparse
|
4 |
-
import json
|
5 |
-
from sklearn.metrics import confusion_matrix, accuracy_score, recall_score, precision_score, classification_report, f1_score
|
6 |
-
|
7 |
-
class FactCheckerApp:
|
8 |
-
def __init__(self, hg_model_hub_name='ynie/electra-large-discriminator-snli_mnli_fever_anli_R1_R2_R3-nli'):
|
9 |
-
# hg_model_hub_name = "ynie/roberta-large-snli_mnli_fever_anli_R1_R2_R3-nli"
|
10 |
-
# hg_model_hub_name = "ynie/albert-xxlarge-v2-snli_mnli_fever_anli_R1_R2_R3-nli"
|
11 |
-
# hg_model_hub_name = "ynie/bart-large-snli_mnli_fever_anli_R1_R2_R3-nli"
|
12 |
-
# hg_model_hub_name = "ynie/electra-large-discriminator-snli_mnli_fever_anli_R1_R2_R3-nli"
|
13 |
-
# hg_model_hub_name = "ynie/xlnet-large-cased-snli_mnli_fever_anli_R1_R2_R3-nli"
|
14 |
-
|
15 |
-
self.max_length = 248
|
16 |
-
self.tokenizer = AutoTokenizer.from_pretrained(hg_model_hub_name)
|
17 |
-
self.model = AutoModelForSequenceClassification.from_pretrained(hg_model_hub_name)
|
18 |
-
self.sentences_list = []
|
19 |
-
self.titles_list = []
|
20 |
-
self.labels_list = []
|
21 |
-
self.claim_list = []
|
22 |
-
|
23 |
-
def load_data(self, filename):
|
24 |
-
with open(filename, "r") as infile:
|
25 |
-
self.data = json.load(infile)
|
26 |
-
|
27 |
-
def preprocess_data(self):
|
28 |
-
for entry in self.data:
|
29 |
-
if "data" in entry:
|
30 |
-
self.titles_list.append(entry["title"])
|
31 |
-
_evidence = ' '.join([item["sentence"] for item in entry["data"]])
|
32 |
-
self.sentences_list.append(_evidence)
|
33 |
-
self.labels_list.append(entry["label"])
|
34 |
-
|
35 |
-
def validate_claims(self, threshold=0.5):
|
36 |
-
for title, evidence in zip(self.titles_list, self.sentences_list):
|
37 |
-
tokenized_input_seq_pair = self.tokenizer.encode_plus(evidence, title,
|
38 |
-
max_length=self.max_length,
|
39 |
-
return_token_type_ids=True, truncation=True)
|
40 |
-
input_ids = torch.Tensor(tokenized_input_seq_pair['input_ids']).long().unsqueeze(0)
|
41 |
-
token_type_ids = torch.Tensor(tokenized_input_seq_pair['token_type_ids']).long().unsqueeze(0)
|
42 |
-
attention_mask = torch.Tensor(tokenized_input_seq_pair['attention_mask']).long().unsqueeze(0)
|
43 |
-
outputs = self.model(input_ids,
|
44 |
-
attention_mask=attention_mask,
|
45 |
-
labels=None)
|
46 |
-
predicted_probability = torch.softmax(outputs.logits, dim=1)[0].tolist()
|
47 |
-
entailment_prob = predicted_probability[0]
|
48 |
-
neutral_prob = predicted_probability[1]
|
49 |
-
contradiction_prob = predicted_probability[2]
|
50 |
-
|
51 |
-
if entailment_prob > threshold:
|
52 |
-
is_claim_true = "true"
|
53 |
-
elif neutral_prob > threshold:
|
54 |
-
is_claim_true = "neutral"
|
55 |
-
else:
|
56 |
-
is_claim_true = "false"
|
57 |
-
|
58 |
-
print(is_claim_true)
|
59 |
-
self.claim_list.append(is_claim_true)
|
60 |
-
|
61 |
-
def calculate_metrics(self):
|
62 |
-
precision = precision_score(self.labels_list, self.claim_list, average='macro')
|
63 |
-
accuracy = accuracy_score(self.labels_list, self.claim_list)
|
64 |
-
f1_scoree = f1_score(self.labels_list, self.claim_list, average='macro')
|
65 |
-
conf_matrix = confusion_matrix(self.labels_list, self.claim_list)
|
66 |
-
recall_metric = recall_score(self.labels_list, self.claim_list, pos_label="true", average="macro")
|
67 |
-
cls_report = classification_report(self.labels_list, self.claim_list, labels=["true", "false", "neutral"])
|
68 |
-
return precision, accuracy, f1_scoree, conf_matrix, recall_metric, cls_report
|
69 |
-
|
70 |
-
def parse_args():
|
71 |
-
parser = argparse.ArgumentParser(description="Fact Checker Application")
|
72 |
-
parser.add_argument("--model_name", default="ynie/bart-large-snli_mnli_fever_anli_R1_R2_R3-nli", help="Name of the pre-trained model to use")
|
73 |
-
parser.add_argument("--data_file", required=True, help="Path to the JSON data file")
|
74 |
-
parser.add_argument("--threshold", type=float, default=0.5, help="Threshold for claim validation")
|
75 |
-
return parser.parse_args()
|
76 |
-
|
77 |
-
if __name__ == "__main__":
|
78 |
-
args = parse_args()
|
79 |
-
fact_checker_app = FactCheckerApp(hg_model_hub_name=args.model_name)
|
80 |
-
fact_checker_app.load_data(args.data_file)
|
81 |
-
fact_checker_app.preprocess_data()
|
82 |
-
fact_checker_app.validate_claims(threshold=args.threshold)
|
83 |
-
precision, accuracy, f1_scoree, conf_matrix, recall_metric, cls_report = fact_checker_app.calculate_metrics()
|
84 |
-
print("Precision:", precision)
|
85 |
-
print("Accuracy:", accuracy)
|
86 |
-
print("F1 score:", f1_scoree)
|
87 |
-
print("Recall: ", recall_metric)
|
88 |
-
print("Confusion Matrix:\n", conf_matrix)
|
89 |
-
print("Report:\n", cls_report)
|
90 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bart_eval.py
DELETED
@@ -1,95 +0,0 @@
|
|
1 |
-
import json, itertools, pyter
|
2 |
-
from nltk.translate.bleu_score import SmoothingFunction, corpus_bleu
|
3 |
-
|
4 |
-
|
5 |
-
class NLPFactGenerator:
|
6 |
-
def __init__(self):
|
7 |
-
self.gen_fact_list = []
|
8 |
-
self.evidence_list = []
|
9 |
-
|
10 |
-
def _split_into_words(self, sentences):
|
11 |
-
return list(itertools.chain(*[_.split(" ") for _ in sentences]))
|
12 |
-
|
13 |
-
def _get_word_ngrams(self, n, sentences):
|
14 |
-
assert len(sentences) > 0
|
15 |
-
assert n > 0
|
16 |
-
words = self._split_into_words(sentences)
|
17 |
-
return self._get_ngrams(n, words)
|
18 |
-
|
19 |
-
def _get_ngrams(self, n, text):
|
20 |
-
ngram_set = set()
|
21 |
-
text_length = len(text)
|
22 |
-
max_index_ngram_start = text_length - n
|
23 |
-
for i in range(max_index_ngram_start + 1):
|
24 |
-
ngram_set.add(tuple(text[i:i + n]))
|
25 |
-
return ngram_set
|
26 |
-
|
27 |
-
def load_data(self, filename):
|
28 |
-
with open(filename, "r") as infile:
|
29 |
-
self.data = json.load(infile)
|
30 |
-
|
31 |
-
def get_title_evidence_generated_facts(self):
|
32 |
-
titles = []
|
33 |
-
evidences = []
|
34 |
-
generated_facts = []
|
35 |
-
|
36 |
-
for entry in self.data:
|
37 |
-
titles.append(entry["title"])
|
38 |
-
evidences.append(entry["evidence"])
|
39 |
-
generated_facts.append(entry["generated_fact"])
|
40 |
-
|
41 |
-
return evidences, generated_facts
|
42 |
-
|
43 |
-
def ter(self):
|
44 |
-
ref, gen = self.get_title_evidence_generated_facts()
|
45 |
-
if len(ref) == 1:
|
46 |
-
total_score = pyter.ter(gen[0].split(), ref[0].split())
|
47 |
-
else:
|
48 |
-
total_score = 0
|
49 |
-
for i in range(len(gen)):
|
50 |
-
total_score = total_score + pyter.ter(gen[i].split(), ref[i].split())
|
51 |
-
total_score = total_score/len(gen)
|
52 |
-
return total_score
|
53 |
-
|
54 |
-
def bleu(self):
|
55 |
-
evidence_list, gen_fact_list = self.get_title_evidence_generated_facts()
|
56 |
-
ref_bleu = []
|
57 |
-
gen_bleu = []
|
58 |
-
for l in evidence_list:
|
59 |
-
gen_bleu.append(l.split())
|
60 |
-
for i,l in enumerate(gen_fact_list):
|
61 |
-
ref_bleu.append([l.split()])
|
62 |
-
cc = SmoothingFunction()
|
63 |
-
score_bleu = corpus_bleu(ref_bleu, gen_bleu, weights=(0, 1, 0, 0), smoothing_function=cc.method4)
|
64 |
-
return score_bleu
|
65 |
-
|
66 |
-
def rouge_one(self,n=3):
|
67 |
-
evidence_list, gen_fact_list = self.get_title_evidence_generated_facts()
|
68 |
-
evaluated_ngrams = self._get_word_ngrams(n, evidence_list)
|
69 |
-
reference_ngrams = self._get_word_ngrams(n, gen_fact_list)
|
70 |
-
reference_count = len(reference_ngrams)
|
71 |
-
evaluated_count = len(evaluated_ngrams)
|
72 |
-
overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)
|
73 |
-
overlapping_count = len(overlapping_ngrams)
|
74 |
-
if evaluated_count == 0:
|
75 |
-
precision = 0.0
|
76 |
-
else:
|
77 |
-
precision = overlapping_count / evaluated_count
|
78 |
-
|
79 |
-
if reference_count == 0:
|
80 |
-
recall = 0.0
|
81 |
-
else:
|
82 |
-
recall = overlapping_count / reference_count
|
83 |
-
|
84 |
-
f1_score = 2.0 * ((precision * recall) / (precision + recall + 1e-8))
|
85 |
-
return recall
|
86 |
-
|
87 |
-
|
88 |
-
if __name__ == "__main__":
|
89 |
-
fact_generator = NLPFactGenerator()
|
90 |
-
fact_generator.load_data("generated_facts_xlsum.json")
|
91 |
-
rouge_one_score = fact_generator.rouge_one()
|
92 |
-
blue_score = fact_generator.bleu()
|
93 |
-
print(blue_score)
|
94 |
-
print(rouge_one_score)
|
95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bart_explaination_gen.py
DELETED
@@ -1,61 +0,0 @@
|
|
1 |
-
from transformers import BartTokenizer, BartForConditionalGeneration
|
2 |
-
import json
|
3 |
-
|
4 |
-
class NLPFactGenerator:
|
5 |
-
def __init__(self, model_name="facebook/bart-large-cnn"):
|
6 |
-
self.max_length = 1024
|
7 |
-
self.model = BartForConditionalGeneration.from_pretrained(model_name)
|
8 |
-
self.tokenizer = BartTokenizer.from_pretrained(model_name)
|
9 |
-
self.sentences_list = []
|
10 |
-
self.justification_list = []
|
11 |
-
self.titles_list = []
|
12 |
-
self.labels_list = []
|
13 |
-
self.claim_list = []
|
14 |
-
|
15 |
-
def load_data(self, filename):
|
16 |
-
with open(filename, "r") as infile:
|
17 |
-
self.data = json.load(infile)
|
18 |
-
|
19 |
-
def preprocess_data(self):
|
20 |
-
max_seq_length = 1024
|
21 |
-
for entry in self.data:
|
22 |
-
if "data" in entry:
|
23 |
-
self.titles_list.append(entry["title"])
|
24 |
-
justification = ' '.join(entry["paragraphs"])
|
25 |
-
for evidence in self.sentences_list:
|
26 |
-
if len(evidence) > max_seq_length:
|
27 |
-
evidence = evidence[:max_seq_length]
|
28 |
-
_evidence = ' '.join([item["sentence"] for item in entry["data"]])
|
29 |
-
self.justification_list.append(justification)
|
30 |
-
self.sentences_list.append(_evidence)
|
31 |
-
self.labels_list.append(entry["label"])
|
32 |
-
|
33 |
-
def generate_fact(self):
|
34 |
-
max_seq_length = 1024
|
35 |
-
generated_facts = []
|
36 |
-
for evidence in self.justification_list:
|
37 |
-
if len(evidence) > max_seq_length:
|
38 |
-
evidence = evidence[:max_seq_length]
|
39 |
-
input_ids = self.tokenizer.encode(evidence, return_tensors="pt")
|
40 |
-
try:
|
41 |
-
generated_ids = self.model.generate(input_ids, max_length=self.max_length, num_return_sequences=1)
|
42 |
-
generated_text = self.tokenizer.decode(generated_ids[0], skip_special_tokens=True)
|
43 |
-
print('Done')
|
44 |
-
print('*'*50)
|
45 |
-
generated_facts.append(generated_text)
|
46 |
-
except:
|
47 |
-
print('Input ID: ', len(input_ids))
|
48 |
-
return generated_facts
|
49 |
-
|
50 |
-
|
51 |
-
if __name__ == "__main__":
|
52 |
-
fact_generator = NLPFactGenerator()
|
53 |
-
fact_generator.load_data("finfact_old.json")
|
54 |
-
fact_generator.preprocess_data()
|
55 |
-
generated_facts = fact_generator.generate_fact()
|
56 |
-
generated_data = []
|
57 |
-
|
58 |
-
for title, evi, fact in zip(fact_generator.titles_list, fact_generator.sentences_list, generated_facts):
|
59 |
-
generated_data.append({"title": title, "evidence":evi, "generated_fact": fact})
|
60 |
-
with open("generated_facts.json", "w") as outfile:
|
61 |
-
json.dump(generated_data, outfile, indent=4)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bert_gen.py
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from transformers import BertTokenizerFast, EncoderDecoderModel
|
3 |
-
import json
|
4 |
-
|
5 |
-
class NLPFactGenerator:
|
6 |
-
def __init__(self, ckpt="mrm8488/bert2bert_shared-german-finetuned-summarization"):
|
7 |
-
self.max_length = 1024
|
8 |
-
self.tokenizer = BertTokenizerFast.from_pretrained(ckpt)
|
9 |
-
self.model = EncoderDecoderModel.from_pretrained(ckpt)
|
10 |
-
self.sentences_list = []
|
11 |
-
self.justification_list = []
|
12 |
-
self.titles_list = []
|
13 |
-
self.labels_list = []
|
14 |
-
self.claim_list = []
|
15 |
-
|
16 |
-
def load_data(self, filename):
|
17 |
-
with open(filename, "r") as infile:
|
18 |
-
self.data = json.load(infile)
|
19 |
-
|
20 |
-
def preprocess_data(self):
|
21 |
-
max_seq_length = 1024
|
22 |
-
for entry in self.data:
|
23 |
-
if "data" in entry:
|
24 |
-
self.titles_list.append(entry["title"])
|
25 |
-
justification = ' '.join(entry["paragraphs"])
|
26 |
-
for evidence in self.sentences_list:
|
27 |
-
if len(evidence) > max_seq_length:
|
28 |
-
evidence = evidence[:max_seq_length]
|
29 |
-
_evidence = ' '.join([item["sentence"] for item in entry["data"]])
|
30 |
-
self.justification_list.append(justification)
|
31 |
-
self.sentences_list.append(_evidence)
|
32 |
-
self.labels_list.append(entry["label"])
|
33 |
-
|
34 |
-
def generate_fact(self):
|
35 |
-
max_seq_length = 1024
|
36 |
-
generated_facts = []
|
37 |
-
count = 0
|
38 |
-
for evidence in self.justification_list:
|
39 |
-
if len(evidence) > max_seq_length:
|
40 |
-
evidence = evidence[:max_seq_length]
|
41 |
-
inputs = self.tokenizer([evidence], padding="max_length", truncation=True, max_length=1024, return_tensors="pt")
|
42 |
-
input_ids = inputs.input_ids
|
43 |
-
attention_mask = inputs.attention_mask
|
44 |
-
try:
|
45 |
-
|
46 |
-
output = self.model.generate(input_ids, attention_mask=attention_mask)
|
47 |
-
summary = self.tokenizer.decode(output[0], skip_special_tokens=True)
|
48 |
-
count+=1
|
49 |
-
print(count)
|
50 |
-
generated_facts.append(summary)
|
51 |
-
except:
|
52 |
-
print('Input ID: ', len(input_ids))
|
53 |
-
return generated_facts
|
54 |
-
|
55 |
-
|
56 |
-
if __name__ == "__main__":
|
57 |
-
fact_generator = NLPFactGenerator()
|
58 |
-
fact_generator.load_data("finfact_old.json")
|
59 |
-
fact_generator.preprocess_data()
|
60 |
-
generated_facts = fact_generator.generate_fact()
|
61 |
-
generated_data = []
|
62 |
-
|
63 |
-
for title, evi, fact in zip(fact_generator.titles_list, fact_generator.sentences_list, generated_facts):
|
64 |
-
generated_data.append({"title": title, "evidence":evi, "generated_fact": fact})
|
65 |
-
with open("generated_facts_bert.json", "w") as outfile:
|
66 |
-
json.dump(generated_data, outfile, indent=4)
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
71 |
-
ckpt = 'mrm8488/bert2bert_shared-german-finetuned-summarization'
|
72 |
-
tokenizer = BertTokenizerFast.from_pretrained(ckpt)
|
73 |
-
model = EncoderDecoderModel.from_pretrained(ckpt).to(device)
|
74 |
-
def generate_summary(text):
|
75 |
-
inputs = tokenizer([text], padding="max_length", truncation=True, max_length=512, return_tensors="pt")
|
76 |
-
input_ids = inputs.input_ids.to(device)
|
77 |
-
attention_mask = inputs.attention_mask.to(device)
|
78 |
-
output = model.generate(input_ids, attention_mask=attention_mask)
|
79 |
-
return tokenizer.decode(output[0], skip_special_tokens=True)
|
80 |
-
|
81 |
-
text = "Your text here..."
|
82 |
-
|
83 |
-
generate_summary(text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data_crawler.py
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
from bs4 import BeautifulSoup
|
2 |
-
import pandas as pd
|
3 |
-
import requests
|
4 |
-
import json
|
5 |
-
|
6 |
-
class FactCheckerScraper:
|
7 |
-
def __init__(self):
|
8 |
-
self.authors = []
|
9 |
-
self.statements = []
|
10 |
-
self.sources = []
|
11 |
-
self.targets = []
|
12 |
-
self.href_list = []
|
13 |
-
|
14 |
-
def scrape_website(self, page_number, source):
|
15 |
-
page_num = str(page_number)
|
16 |
-
URL = 'https://www.politifact.com/factchecks/list/?category=income&page={}&source={}'.format(page_num, source)
|
17 |
-
webpage = requests.get(URL)
|
18 |
-
soup = BeautifulSoup(webpage.text, "html.parser")
|
19 |
-
statement_footer = soup.find_all('footer', attrs={'class':'m-statement__footer'})
|
20 |
-
statement_quote = soup.find_all('div', attrs={'class':'m-statement__quote'})
|
21 |
-
statement_meta = soup.find_all('div', attrs={'class':'m-statement__meta'})
|
22 |
-
target = soup.find_all('div', attrs={'class':'m-statement__meter'})
|
23 |
-
|
24 |
-
for i in statement_footer:
|
25 |
-
link1 = i.text.strip()
|
26 |
-
name_and_date = link1.split()
|
27 |
-
first_name = name_and_date[1]
|
28 |
-
last_name = name_and_date[2]
|
29 |
-
full_name = first_name+' '+last_name
|
30 |
-
self.authors.append(full_name)
|
31 |
-
|
32 |
-
for i in statement_quote:
|
33 |
-
link2 = i.find_all('a')
|
34 |
-
self.statements.append(link2[0].text.strip())
|
35 |
-
|
36 |
-
for i in statement_meta:
|
37 |
-
link3 = i.find_all('a')
|
38 |
-
source_text = link3[0].text.strip()
|
39 |
-
self.sources.append(source_text)
|
40 |
-
|
41 |
-
for i in target:
|
42 |
-
fact = i.find('div', attrs={'class':'c-image'}).find('img').get('alt')
|
43 |
-
self.targets.append(fact)
|
44 |
-
|
45 |
-
for i in statement_quote:
|
46 |
-
href = i.find('a')['href']
|
47 |
-
href = 'https://www.politifact.com' + href
|
48 |
-
self.href_list.append(href)
|
49 |
-
|
50 |
-
def scrape_multiple_pages(self, num_pages, source):
|
51 |
-
for i in range(1, num_pages):
|
52 |
-
self.scrape_website(i, source)
|
53 |
-
|
54 |
-
def create_dataframe(self):
|
55 |
-
data = pd.DataFrame(columns=['author', 'statement', 'links', 'source', 'date', 'target'])
|
56 |
-
data['author'] = self.authors
|
57 |
-
data['statement'] = self.statements
|
58 |
-
data['links'] = self.href_list
|
59 |
-
data['source'] = self.sources
|
60 |
-
data['target'] = self.targets
|
61 |
-
return data
|
62 |
-
|
63 |
-
def save_to_json(self, filename):
|
64 |
-
data_json = {
|
65 |
-
"url": self.href_list,
|
66 |
-
"label": self.targets
|
67 |
-
}
|
68 |
-
|
69 |
-
with open(filename, "w") as outfile:
|
70 |
-
json.dump(data_json, outfile)
|
71 |
-
|
72 |
-
if __name__ == "__main__":
|
73 |
-
scraper = FactCheckerScraper()
|
74 |
-
scraper.scrape_multiple_pages(70, source='covid')
|
75 |
-
data = scraper.create_dataframe()
|
76 |
-
scraper.save_to_json("./income.json")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data_generator.py
DELETED
@@ -1,171 +0,0 @@
|
|
1 |
-
from bs4 import BeautifulSoup
|
2 |
-
import datetime
|
3 |
-
import requests
|
4 |
-
import nltk
|
5 |
-
import json
|
6 |
-
|
7 |
-
class WebScraper:
|
8 |
-
def __init__(self, url):
|
9 |
-
self.URL = url
|
10 |
-
try:
|
11 |
-
self.webpage = requests.get(self.URL)
|
12 |
-
except requests.exceptions.RequestException as e:
|
13 |
-
print(f"Error: {e}")
|
14 |
-
if self.webpage:
|
15 |
-
try:
|
16 |
-
self.soup = BeautifulSoup(self.webpage.text, "html.parser")
|
17 |
-
except:
|
18 |
-
print("Error: Failed to create BeautifulSoup object.")
|
19 |
-
|
20 |
-
def remove_unicode(self, string):
|
21 |
-
return string.encode('ascii', 'ignore').decode('utf-8')
|
22 |
-
|
23 |
-
def get_page_title(self):
|
24 |
-
try:
|
25 |
-
div_element = self.soup.find('div', class_='m-statement__quote')
|
26 |
-
title = div_element.get_text(strip=True)
|
27 |
-
cleaned_title = self.remove_unicode(title)
|
28 |
-
cleaned_title = cleaned_title.replace('\"', '')
|
29 |
-
except AttributeError:
|
30 |
-
return None # Error: Failed to get page title.
|
31 |
-
return cleaned_title
|
32 |
-
|
33 |
-
def get_page_author(self):
|
34 |
-
try:
|
35 |
-
author_element = self.soup.find('div', class_='m-author__content').find('a')
|
36 |
-
author = author_element.get_text(strip=True)
|
37 |
-
except AttributeError:
|
38 |
-
return None # Error: Failed to get page author.
|
39 |
-
return author
|
40 |
-
|
41 |
-
def get_page_posted_date(self):
|
42 |
-
date_element = None
|
43 |
-
try:
|
44 |
-
date_element = self.soup.find('span', class_='m-author__date')
|
45 |
-
date = date_element.get_text(strip=True)
|
46 |
-
date_obj = datetime.datetime.strptime(date, "%B %d, %Y")
|
47 |
-
formatted_date = date_obj.strftime("%m/%d/%Y")
|
48 |
-
except (AttributeError, ValueError):
|
49 |
-
return None # Error: Failed to get page posted date.
|
50 |
-
return formatted_date
|
51 |
-
|
52 |
-
def get_sci_check_digest(self):
|
53 |
-
try:
|
54 |
-
div_element = self.soup.find('div', class_='short-on-time')
|
55 |
-
li_tags = div_element.find_all('li') if div_element else []
|
56 |
-
sci_digest_list = [li.get_text(strip=True) for li in li_tags]
|
57 |
-
final_sci_digest = ", ".join(sci_digest_list)
|
58 |
-
cleaned_sci_digest = self.remove_unicode(final_sci_digest)
|
59 |
-
cleaned_sci_digest = cleaned_sci_digest.replace('\"', '')
|
60 |
-
tokenised_sci_digest = nltk.sent_tokenize(cleaned_sci_digest)
|
61 |
-
except AttributeError:
|
62 |
-
return None # Error: Failed to get SciCheck digest.
|
63 |
-
return tokenised_sci_digest
|
64 |
-
|
65 |
-
def get_paragraph_list(self):
|
66 |
-
try:
|
67 |
-
paragraph_list = []
|
68 |
-
article_element = self.soup.find('article', class_='m-textblock')
|
69 |
-
p_elements = article_element.find_all('p')
|
70 |
-
text_list = [p.get_text(strip=True) for p in p_elements]
|
71 |
-
for text in text_list:
|
72 |
-
paragraph_list.append(text)
|
73 |
-
final_paragraphs = " ".join(paragraph_list)
|
74 |
-
cleaned_paragraphs = final_paragraphs.replace('\u00a0', ' ')
|
75 |
-
cleaned_paragraphs = self.remove_unicode(cleaned_paragraphs)
|
76 |
-
cleaned_paragraphs = cleaned_paragraphs.replace('\"', '')
|
77 |
-
tokenized_paragraphs = nltk.sent_tokenize(cleaned_paragraphs)
|
78 |
-
except AttributeError:
|
79 |
-
return None, None # Error: Failed to get paragraphs.
|
80 |
-
return paragraph_list, tokenized_paragraphs
|
81 |
-
|
82 |
-
def get_sentences_citations(self):
|
83 |
-
try:
|
84 |
-
p_elements = self.soup.select('article.m-textblock p')
|
85 |
-
citation_list = []
|
86 |
-
for p in p_elements:
|
87 |
-
href = p.find('a')
|
88 |
-
if href and 'href' in href.attrs:
|
89 |
-
href_text = href['href']
|
90 |
-
sentence = p.get_text(strip=True)
|
91 |
-
cleaned_sentence = sentence.replace('\u00a0', ' ')
|
92 |
-
cleaned_sentence = self.remove_unicode(cleaned_sentence)
|
93 |
-
cleaned_sentence = cleaned_sentence.replace('\"', '')
|
94 |
-
citation_list.append({"sentence": cleaned_sentence, "hrefs": href_text})
|
95 |
-
except AttributeError:
|
96 |
-
return None # Error: Failed to get citation list.
|
97 |
-
return citation_list
|
98 |
-
|
99 |
-
def get_issue_list(self):
|
100 |
-
issue_list = []
|
101 |
-
try:
|
102 |
-
ul_element = self.soup.find('ul', class_='m-list--horizontal')
|
103 |
-
li_elements = ul_element.find_all('li', class_='m-list__item')
|
104 |
-
for li in li_elements[:-1]:
|
105 |
-
category = li.a['title']
|
106 |
-
issue_list.append(category)
|
107 |
-
except AttributeError:
|
108 |
-
return None # Error: Failed to get issue list.
|
109 |
-
return issue_list
|
110 |
-
|
111 |
-
def get_image_info(self):
|
112 |
-
try:
|
113 |
-
article_element = self.soup.find('article', class_='m-textblock')
|
114 |
-
p_elements = article_element.find_all('p')
|
115 |
-
em_elements = article_element.find_all('em')
|
116 |
-
img_count = 0
|
117 |
-
image_captions = []
|
118 |
-
for p in p_elements:
|
119 |
-
img_tag = p.find('img')
|
120 |
-
if img_tag:
|
121 |
-
img_src = img_tag['src']
|
122 |
-
if img_src:
|
123 |
-
img_count += 1
|
124 |
-
if img_count <= len(em_elements):
|
125 |
-
image_caption = em_elements[img_count - 1].get_text(strip=True)
|
126 |
-
cleaned_captions = image_caption.replace('\u00a0', ' ')
|
127 |
-
cleaned_captions = self.remove_unicode(cleaned_captions)
|
128 |
-
cleaned_captions = cleaned_captions.replace('\"', '')
|
129 |
-
image_captions.append({"image_src": img_src, "image_caption": cleaned_captions})
|
130 |
-
except:
|
131 |
-
return None
|
132 |
-
return image_captions
|
133 |
-
|
134 |
-
def get_label(self):
|
135 |
-
try:
|
136 |
-
target = self.soup.find_all('div', attrs={'class':'m-statement__meter'})
|
137 |
-
for i in target:
|
138 |
-
label = i.find('div', attrs={'class':'c-image'}).find('img').get('alt')
|
139 |
-
# if label == 'pants-fire':
|
140 |
-
# label = 'false'
|
141 |
-
# elif label == 'mostly-true':
|
142 |
-
# label = 'true'
|
143 |
-
except:
|
144 |
-
return None
|
145 |
-
return label
|
146 |
-
|
147 |
-
with open("./income.json", "r") as infile:
|
148 |
-
data = json.load(infile)
|
149 |
-
urls = data["url"]
|
150 |
-
labels = data["label"]
|
151 |
-
|
152 |
-
scraped_data = []
|
153 |
-
for url, label in zip(urls,labels):
|
154 |
-
print(url)
|
155 |
-
scraper = WebScraper(url)
|
156 |
-
data = {
|
157 |
-
"url": url,
|
158 |
-
"title": scraper.get_page_title(),
|
159 |
-
"author": scraper.get_page_author(),
|
160 |
-
"posted": scraper.get_page_posted_date(),
|
161 |
-
"sci_digest": scraper.get_sci_check_digest(),
|
162 |
-
"paragraphs": scraper.get_paragraph_list()[1],
|
163 |
-
"issues": scraper.get_issue_list(),
|
164 |
-
"image_data": scraper.get_image_info(),
|
165 |
-
"data": scraper.get_sentences_citations(),
|
166 |
-
"label": label
|
167 |
-
}
|
168 |
-
scraped_data.append(data)
|
169 |
-
|
170 |
-
with open("./json_new/income.json", "w") as outfile:
|
171 |
-
json.dump(scraped_data, outfile)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gpt2_nli.py
DELETED
@@ -1,70 +0,0 @@
|
|
1 |
-
from transformers import (
|
2 |
-
GPT2LMHeadModel,
|
3 |
-
GPT2Tokenizer,
|
4 |
-
)
|
5 |
-
import argparse
|
6 |
-
import warnings
|
7 |
-
warnings.filterwarnings("ignore")
|
8 |
-
|
9 |
-
from fact_checking import FactChecker
|
10 |
-
import json
|
11 |
-
from sklearn.metrics import confusion_matrix, classification_report
|
12 |
-
|
13 |
-
class FactCheckerApp:
|
14 |
-
def __init__(self, model_name='fractalego/fact-checking'):
|
15 |
-
self.tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
|
16 |
-
self.fact_checking_model = GPT2LMHeadModel.from_pretrained(model_name)
|
17 |
-
self.fact_checker = FactChecker(self.fact_checking_model, self.tokenizer)
|
18 |
-
self.sentences_list = []
|
19 |
-
self.titles_list = []
|
20 |
-
self.labels_list = []
|
21 |
-
self.claim_list = []
|
22 |
-
|
23 |
-
def load_data(self, filename):
|
24 |
-
with open(filename, "r") as infile:
|
25 |
-
self.data = json.load(infile)
|
26 |
-
|
27 |
-
def preprocess_data(self):
|
28 |
-
for entry in self.data:
|
29 |
-
if "data" in entry:
|
30 |
-
self.titles_list.append(entry["title"])
|
31 |
-
_evidence = ' '.join([item["sentence"] for item in entry["data"]])
|
32 |
-
self.sentences_list.append(_evidence)
|
33 |
-
self.labels_list.append(entry["label"])
|
34 |
-
|
35 |
-
def validate_claims(self):
|
36 |
-
max_seq_length = 1024
|
37 |
-
for title, evidence in zip(self.titles_list, self.sentences_list):
|
38 |
-
try:
|
39 |
-
if len(title) > max_seq_length:
|
40 |
-
title = title[:max_seq_length]
|
41 |
-
if len(evidence) > max_seq_length:
|
42 |
-
evidence = evidence[:max_seq_length]
|
43 |
-
print(len(evidence))
|
44 |
-
is_claim_true = self.fact_checker.validate(evidence, title)
|
45 |
-
print(is_claim_true)
|
46 |
-
self.claim_list.append(is_claim_true)
|
47 |
-
except IndexError:
|
48 |
-
self.claim_list.append(None)
|
49 |
-
|
50 |
-
def calculate_metrics(self):
|
51 |
-
conf_matrix = confusion_matrix(self.labels_list, [str(is_claim).lower() for is_claim in self.claim_list])
|
52 |
-
cls_report = classification_report(self.labels_list, [str(is_claim).lower() for is_claim in self.claim_list], labels=["true", "false", "neutral"])
|
53 |
-
|
54 |
-
return conf_matrix, cls_report
|
55 |
-
|
56 |
-
def parse_args():
|
57 |
-
parser = argparse.ArgumentParser(description="Fact Checker Application")
|
58 |
-
parser.add_argument("--model_name", default="fractalego/fact-checking", help="Name of the fact-checking model to use")
|
59 |
-
parser.add_argument("--data_file", required=True, help="Path to the JSON data file")
|
60 |
-
return parser.parse_args()
|
61 |
-
|
62 |
-
if __name__ == "__main__":
|
63 |
-
args = parse_args()
|
64 |
-
fact_checker_app = FactCheckerApp(model_name=args.model_name)
|
65 |
-
fact_checker_app.load_data(args.data_file)
|
66 |
-
fact_checker_app.preprocess_data()
|
67 |
-
fact_checker_app.validate_claims()
|
68 |
-
conf_matrix, cls_report = fact_checker_app.calculate_metrics()
|
69 |
-
print("Confusion Matrix:\n", conf_matrix)
|
70 |
-
print("Report:\n", cls_report)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pegasus_gen.py
DELETED
@@ -1,70 +0,0 @@
|
|
1 |
-
from transformers import PegasusTokenizer, PegasusForConditionalGeneration
|
2 |
-
import json
|
3 |
-
|
4 |
-
class NLPFactGenerator:
|
5 |
-
def __init__(self, model_name="human-centered-summarization/financial-summarization-pegasus"):
|
6 |
-
self.max_length = 1024
|
7 |
-
self.tokenizer = PegasusTokenizer.from_pretrained(model_name)
|
8 |
-
self.model = PegasusForConditionalGeneration.from_pretrained(model_name)
|
9 |
-
self.sentences_list = []
|
10 |
-
self.justification_list = []
|
11 |
-
self.titles_list = []
|
12 |
-
self.labels_list = []
|
13 |
-
self.claim_list = []
|
14 |
-
|
15 |
-
def load_data(self, filename):
|
16 |
-
with open(filename, "r") as infile:
|
17 |
-
self.data = json.load(infile)
|
18 |
-
|
19 |
-
def preprocess_data(self):
|
20 |
-
max_seq_length = 1024
|
21 |
-
for entry in self.data:
|
22 |
-
if "data" in entry:
|
23 |
-
self.titles_list.append(entry["title"])
|
24 |
-
justification = ' '.join(entry["paragraphs"])
|
25 |
-
for evidence in self.sentences_list:
|
26 |
-
if len(evidence) > max_seq_length:
|
27 |
-
evidence = evidence[:max_seq_length]
|
28 |
-
_evidence = ' '.join([item["sentence"] for item in entry["data"]])
|
29 |
-
self.justification_list.append(justification)
|
30 |
-
self.sentences_list.append(_evidence)
|
31 |
-
self.labels_list.append(entry["label"])
|
32 |
-
|
33 |
-
def generate_fact(self):
|
34 |
-
max_seq_length = 1024
|
35 |
-
generated_facts = []
|
36 |
-
count = 0
|
37 |
-
for evidence in self.justification_list:
|
38 |
-
if len(evidence) > max_seq_length:
|
39 |
-
evidence = evidence[:max_seq_length]
|
40 |
-
input_ids = self.tokenizer(evidence, return_tensors="pt").input_ids
|
41 |
-
try:
|
42 |
-
output = self.model.generate(
|
43 |
-
input_ids,
|
44 |
-
max_length=64,
|
45 |
-
num_beams=5,
|
46 |
-
early_stopping=True
|
47 |
-
)
|
48 |
-
summary = self.tokenizer.decode(output[0], skip_special_tokens=True)
|
49 |
-
count+=1
|
50 |
-
print(count)
|
51 |
-
|
52 |
-
generated_facts.append(summary)
|
53 |
-
except:
|
54 |
-
print('Input ID: ', len(input_ids))
|
55 |
-
return generated_facts
|
56 |
-
|
57 |
-
|
58 |
-
if __name__ == "__main__":
|
59 |
-
fact_generator = NLPFactGenerator()
|
60 |
-
fact_generator.load_data("finfact_old.json")
|
61 |
-
fact_generator.preprocess_data()
|
62 |
-
generated_facts = fact_generator.generate_fact()
|
63 |
-
generated_data = []
|
64 |
-
|
65 |
-
for title, evi, fact in zip(fact_generator.titles_list, fact_generator.sentences_list, generated_facts):
|
66 |
-
generated_data.append({"title": title, "evidence":evi, "generated_fact": fact})
|
67 |
-
with open("generated_facts_pegasus.json", "w") as outfile:
|
68 |
-
json.dump(generated_data, outfile, indent=4)
|
69 |
-
|
70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xl_sum_gen.py
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
3 |
-
import json
|
4 |
-
|
5 |
-
class NLPFactGenerator:
|
6 |
-
def __init__(self, model_name="csebuetnlp/mT5_multilingual_XLSum"):
|
7 |
-
self.max_length = 1024
|
8 |
-
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
9 |
-
self.model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
10 |
-
self.WHITESPACE_HANDLER = lambda k: re.sub('\s+', ' ', re.sub('\n+', ' ', k.strip()))
|
11 |
-
self.sentences_list = []
|
12 |
-
self.justification_list = []
|
13 |
-
self.titles_list = []
|
14 |
-
self.labels_list = []
|
15 |
-
self.claim_list = []
|
16 |
-
|
17 |
-
def load_data(self, filename):
|
18 |
-
with open(filename, "r") as infile:
|
19 |
-
self.data = json.load(infile)
|
20 |
-
|
21 |
-
def preprocess_data(self):
|
22 |
-
max_seq_length = 1024
|
23 |
-
for entry in self.data:
|
24 |
-
if "data" in entry:
|
25 |
-
self.titles_list.append(entry["title"])
|
26 |
-
justification = ' '.join(entry["paragraphs"])
|
27 |
-
for evidence in self.sentences_list:
|
28 |
-
if len(evidence) > max_seq_length:
|
29 |
-
evidence = evidence[:max_seq_length]
|
30 |
-
_evidence = ' '.join([item["sentence"] for item in entry["data"]])
|
31 |
-
self.justification_list.append(justification)
|
32 |
-
self.sentences_list.append(_evidence)
|
33 |
-
self.labels_list.append(entry["label"])
|
34 |
-
|
35 |
-
def generate_fact(self):
|
36 |
-
max_seq_length = 1024
|
37 |
-
generated_facts = []
|
38 |
-
count = 0
|
39 |
-
for evidence in self.justification_list:
|
40 |
-
if len(evidence) > max_seq_length:
|
41 |
-
evidence = evidence[:max_seq_length]
|
42 |
-
input_ids = self.tokenizer(
|
43 |
-
[self.WHITESPACE_HANDLER(evidence)],
|
44 |
-
return_tensors="pt",
|
45 |
-
padding="max_length",
|
46 |
-
truncation=True,
|
47 |
-
max_length=1024)["input_ids"]
|
48 |
-
try:
|
49 |
-
output_ids = self.model.generate(
|
50 |
-
input_ids=input_ids,
|
51 |
-
max_length=128,
|
52 |
-
no_repeat_ngram_size=2,
|
53 |
-
num_beams=4)[0]
|
54 |
-
summary = self.tokenizer.decode(
|
55 |
-
output_ids,
|
56 |
-
skip_special_tokens=True,
|
57 |
-
clean_up_tokenization_spaces=False)
|
58 |
-
count+=1
|
59 |
-
print(count)
|
60 |
-
generated_facts.append(summary)
|
61 |
-
except:
|
62 |
-
print('Input ID: ', len(input_ids))
|
63 |
-
return generated_facts
|
64 |
-
|
65 |
-
|
66 |
-
if __name__ == "__main__":
|
67 |
-
fact_generator = NLPFactGenerator()
|
68 |
-
fact_generator.load_data("finfact_old.json")
|
69 |
-
fact_generator.preprocess_data()
|
70 |
-
generated_facts = fact_generator.generate_fact()
|
71 |
-
generated_data = []
|
72 |
-
|
73 |
-
for title, evi, fact in zip(fact_generator.titles_list, fact_generator.sentences_list, generated_facts):
|
74 |
-
generated_data.append({"title": title, "evidence":evi, "generated_fact": fact})
|
75 |
-
with open("generated_facts_xlsum.json", "w") as outfile:
|
76 |
-
json.dump(generated_data, outfile, indent=4)
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|