File size: 4,024 Bytes
27b2113
 
 
 
 
 
3a8d406
 
27b2113
 
 
 
 
aad6247
27b2113
 
 
 
c48fb13
27b2113
3a8d406
 
 
 
27b2113
 
 
 
 
3a8d406
27b2113
 
 
 
 
3a8d406
27b2113
 
 
 
 
6deb535
27b2113
 
 
 
 
6deb535
27b2113
 
 
 
 
 
3a8d406
27b2113
 
8866d24
3a8d406
 
27b2113
 
3a8d406
27b2113
 
 
 
 
 
 
 
 
 
3ef64f0
27b2113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3a8d406
 
 
 
 
 
 
 
 
27b2113
57db608
3a8d406
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
"""The TempoSum benchmark."""

import json
import os
import datasets

from contextlib import ExitStack

_CITATION = """
@misc{cheang2023temposum,
Author = {Chi Seng Cheang and Hou Pong Chan and Derek F. Wong and Xuebo Liu and Zhaocong Li and Yanming Sun and Shudong Liu and Lidia S. Chao},
Title = {TempoSum: Evaluating the Temporal Generalization of Abstractive Summarization},
Year = {2023},
}
"""

_DESCRIPTION = """TempoSum: Evaluating the Temporal Generalization of Abstractive Summarization"""

_URL = "https://huggingface.co/datasets/chiseng-cheang/TempoSum/resolve/main/data/"

_DOCUMENT = "document"
_SUMMARY = "summary"
_TITLE = "title"

_DATASET_CONFIGS = {
    "BBC_in-distribution": {
        "urls": {
            datasets.Split.TEST: os.path.join(_URL, "bbc_in_distribution.tar.gz"),
        },
        "available_features": [_DOCUMENT, _SUMMARY],
    },
    "BBC_future": {
        "urls": {
            datasets.Split.TEST: os.path.join(_URL, "bbc_future.tar.gz"),
        },
        "available_features": [_DOCUMENT, _SUMMARY],
    },
    "CNN_in-distribution": {
        "urls": {
            datasets.Split.TEST: os.path.join(_URL, "cnn_in_distribution.tar.gz"),
        },
        "available_features": [_DOCUMENT, _TITLE],
    },
    "CNN_future": {
        "urls": {
            datasets.Split.TEST: os.path.join(_URL, "cnn_future.tar.gz"),
        },
        "available_features": [_DOCUMENT, _TITLE],
    },
}


class TempoSumConfig(datasets.BuilderConfig):
    """BuilderConfig for TempoSum."""
    def __init__(self, urls, available_features, **kwargs):
        super(TempoSumConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
        self.features = datasets.Features({
            feature: datasets.Value("string") for feature in available_features
            # _DOCUMENT: datasets.Value("string"),
            # _SUMMARY: datasets.Value("string"),
        })
        self.urls = urls
        self.available_features = available_features

class TempoSum(datasets.GeneratorBasedBuilder):
    """The TempoSum benchmark."""
    BUILDER_CONFIGS = []

    for datasplit_name, datasplit_config in _DATASET_CONFIGS.items():
        BUILDER_CONFIGS.append(
            TempoSumConfig(
                name=datasplit_name,
                urls=datasplit_config['urls'],
                available_features=datasplit_config['available_features'],
            )
        )

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            homepage="https://github.com/AndyCheang/TempoSum",
        )

    def _split_generators(self, dl_manager):
        dl_dirs = dl_manager.download_and_extract(self.config.urls)
        splits = []
        for split in dl_dirs:
            splits.append(
                datasets.SplitGenerator(
                    name=split._name,
                    gen_kwargs={
                        'data_file': dl_dirs[split],
                        'split': split,
                    }
                )
            )
        return splits

    def _generate_examples(self, data_file, split):
        # document_path = os.path.join(data_file, _DOCUMENT)
        # summary_path = os.path.join(data_file, _SUMMARY)

        features = self.config.available_features
        with ExitStack() as stack:
            files = [stack.enter_context(open(os.path.join(data_file, feature))) \
                     for feature in features]
            
            for idx, sample_data in enumerate(zip(*files)):
                yield idx, {
                    feature: feature_data  
                    for (feature, feature_data) in zip(features, sample_data)
                }
            
        # with open(document_path, 'r') as document_reader, open(summary_path, 'r') as summary_reader:
        #     for idx, (document, summary) in enumerate(zip(document_reader, summary_reader)):
        #         yield idx, {
        #             _DOCUMENT: document,
        #             _SUMMARY: summary,
        #         }