TempoSum / TempoSum.py
chiseng-cheang's picture
Update TempoSum.py
57db608
raw
history blame
3.95 kB
"""The TempoSum benchmark."""
import json
import os
import datasets
from contextlib import ExitStack
_CITATION = """
@misc{cheang2023temposum,
Author = {Chi Seng Cheang and Hou Pong Chan and Derek F. Wong and Xuebo Liu and Zhaocong Li and Yanming Sun and Shudong Liu and Lidia S. Chao},
Title = {TempoSum: Evaluating the Temporal Generalization of Abstractive Summarization},
Year = {2023},
}
"""
_DESCRIPTION = """TempoSum: Evaluating the Temporal Generalization of Abstractive Summarization"""
_URL = "https://huggingface.co/datasets/chiseng-cheang/TempoSum/resolve/main/data/"
_DOCUMENT = "document"
_SUMMARY = "summary"
_TITLE = "title"
_DATASET_CONFIGS = {
"BBC_in-distribution": {
"urls": {
datasets.Split.TEST: os.path.join(_URL, "bbc_in_distribution.tar.gz"),
},
"available_features": [_DOCUMENT, _SUMMARY],
},
"BBC_future": {
"urls": {
datasets.Split.TEST: os.path.join(_URL, "bbc_future.tar.gz"),
},
"available_features": [_DOCUMENT, _SUMMARY],
},
"CNN_in-distribution": {
"urls": {
datasets.Split.TEST: os.path.join(_URL, "cnn_in_distribution.tar.gz"),
},
"available_features": [_DOCUMENT, _SUMMARY],
},
"CNN_future": {
"urls": {
datasets.Split.TEST: os.path.join(_URL, "cnn_future.tar.gz"),
},
"available_features": [_DOCUMENT, _SUMMARY],
},
}
class TempoSumConfig(datasets.BuilderConfig):
"""BuilderConfig for TempoSum."""
def __init__(self, urls, available_features, **kwargs):
super(TempoSumConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
self.features = datasets.Features({
feature: datasets.Value("String") for feature in available_features
# _DOCUMENT: datasets.Value("string"),
# _SUMMARY: datasets.Value("string"),
})
self.urls = urls
self.available_features = available_features
class TempoSum(datasets.GeneratorBasedBuilder):
"""The TempoSum benchmark."""
BUILDER_CONFIGS = []
for datasplit_name, datasplit_config in _DATASET_CONFIGS.items():
BUILDER_CONFIGS.append(
TempoSumConfig(
name=datasplit_name,
urls=datasplit_config['urls'],
)
)
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
homepage="https://github.com/AndyCheang/TempoSum",
)
def _split_generators(self, dl_manager):
dl_dirs = dl_manager.download_and_extract(self.config.urls)
splits = []
for split in dl_dirs:
splits.append(
datasets.SplitGenerator(
name=split._name,
gen_kwargs={
'data_file': dl_dirs[split],
'split': split,
}
)
)
return splits
def _generate_examples(self, data_file, split):
# document_path = os.path.join(data_file, _DOCUMENT)
# summary_path = os.path.join(data_file, _SUMMARY)
features = self.config.available_features
with ExitStack() as stack:
files = [stack.enter_context(open(os.path.join(data_file, feature))) \
for feature in features]
for idx, sample_data in enumerate(zip(*files)):
yield idx, {
feature: feature_data
for (feature, feature_data) in zip(features, sample_data)
}
# with open(document_path, 'r') as document_reader, open(summary_path, 'r') as summary_reader:
# for idx, (document, summary) in enumerate(zip(document_reader, summary_reader)):
# yield idx, {
# _DOCUMENT: document,
# _SUMMARY: summary,
# }