Commit
•
c189f56
0
Parent(s):
Update files from the datasets library (from 1.2.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.2.0
- .gitattributes +27 -0
- README.md +180 -0
- dataset_infos.json +1 -0
- dummy/thaisum/1.0.0/dummy_data.zip +3 -0
- thaisum.py +104 -0
.gitattributes
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
annotations_creators:
|
3 |
+
- no-annotation
|
4 |
+
language_creators:
|
5 |
+
- found
|
6 |
+
languages:
|
7 |
+
- th
|
8 |
+
licenses:
|
9 |
+
- mit
|
10 |
+
multilinguality:
|
11 |
+
- monolingual
|
12 |
+
size_categories:
|
13 |
+
- 100K<n<1M
|
14 |
+
source_datasets:
|
15 |
+
- original
|
16 |
+
task_categories:
|
17 |
+
- conditional-text-generation
|
18 |
+
- sequence-modeling
|
19 |
+
task_ids:
|
20 |
+
- language-modeling
|
21 |
+
- summarization
|
22 |
+
---
|
23 |
+
|
24 |
+
# Dataset Card for `thaisum`
|
25 |
+
|
26 |
+
## Table of Contents
|
27 |
+
- [Dataset Description](#dataset-description)
|
28 |
+
- [Dataset Summary](#dataset-summary)
|
29 |
+
- [Supported Tasks](#supported-tasks-and-leaderboards)
|
30 |
+
- [Languages](#languages)
|
31 |
+
- [Dataset Structure](#dataset-structure)
|
32 |
+
- [Data Instances](#data-instances)
|
33 |
+
- [Data Fields](#data-instances)
|
34 |
+
- [Data Splits](#data-instances)
|
35 |
+
- [Dataset Creation](#dataset-creation)
|
36 |
+
- [Curation Rationale](#curation-rationale)
|
37 |
+
- [Source Data](#source-data)
|
38 |
+
- [Annotations](#annotations)
|
39 |
+
- [Personal and Sensitive Information](#personal-and-sensitive-information)
|
40 |
+
- [Considerations for Using the Data](#considerations-for-using-the-data)
|
41 |
+
- [Social Impact of Dataset](#social-impact-of-dataset)
|
42 |
+
- [Discussion of Biases](#discussion-of-biases)
|
43 |
+
- [Other Known Limitations](#other-known-limitations)
|
44 |
+
- [Additional Information](#additional-information)
|
45 |
+
- [Dataset Curators](#dataset-curators)
|
46 |
+
- [Licensing Information](#licensing-information)
|
47 |
+
- [Citation Information](#citation-information)
|
48 |
+
|
49 |
+
## Dataset Description
|
50 |
+
|
51 |
+
- **Homepage:** https://github.com/nakhunchumpolsathien/ThaiSum
|
52 |
+
- **Repository:** https://github.com/nakhunchumpolsathien/ThaiSum
|
53 |
+
- **Paper:**
|
54 |
+
- **Leaderboard:**
|
55 |
+
- **Point of Contact:** https://github.com/nakhunchumpolsathien
|
56 |
+
|
57 |
+
### Dataset Summary
|
58 |
+
|
59 |
+
ThaiSum is a large-scale corpus for Thai text summarization obtained from several online news websites namely Thairath, ThaiPBS, Prachathai, and The Standard. This dataset consists of over 350,000 article and summary pairs written by journalists.
|
60 |
+
|
61 |
+
### Supported Tasks and Leaderboards
|
62 |
+
|
63 |
+
summarization, language modeling
|
64 |
+
|
65 |
+
### Languages
|
66 |
+
|
67 |
+
Thai
|
68 |
+
|
69 |
+
## Dataset Structure
|
70 |
+
|
71 |
+
### Data Instances
|
72 |
+
|
73 |
+
```
|
74 |
+
{'body': 'กีเก ซานเชซ ฟลอเรส\xa0 กุนซือเลือดกระทิงของทีมวัตฟอร์ด\xa0 เมินประเด็นจุดโทษปัญหาในเกมพรีเมียร์ลีก อังกฤษ นัดที่แตนอาละวาดเปิดบ้านพ่าย คริสตัล พาเลซ 0-1ชี้ทีมของเขาเล่นไม่ดีพอเอง,สำนักข่าวต่างประเทศรายงานวันที่ 27 ก.ย. ว่า กีเก ซานเชซ ฟลอเรส\xa0 ผู้จัดการทีมชาวสเปน ของ แตนอาละวาด วัตฟอร์ด\xa0 ยอมรับทีมของเขาเล่นได้ไม่ดีพอเอง ในเกมพรีเมียร์ลีก อังกฤษ นัดเปิดบ้านพ่าย อินทรีผงาด คริสตัล พาเลซ 0-1 เมื่อคืนวันอาทิตย์ที่ผ่านมา,เกมนี้จุดเปลี่ยนมาอยู่ที่การได้จุดโทษในช่วงครึ่งหลังของ คริสตัล พาเลซ ซึ่งไม่ค่อยชัดเจนเท่าไหร่ว่า อัลลัน นียอม นั้นไปทำฟาล์วใส่ วิลฟรีด ซาฮา ในเขตโทษหรือไม่ แต่ผู้ตัดสินก็ชี้เป็นจุดโทษ ซึ่ง โยอัน กาบาย สังหารไม่พลาด และเป็นประตูชัยช่วยให้ คริสตัล พาเลซ เอาชนะ วัตฟอร์ด ไป 1-0 และเป็นการพ่ายแพ้ในบ้านนัดแรกของวัตฟอร์ดในฤดูกาลนี้อีกด้วย,ฟลอเรส กล่าวว่า มันเป็นเรื่องยากในการหยุดเกมรุกของคริสตัล พาเลซ ซึ่งมันอึดอัดจริงๆสำหรับเรา เราเล่นกันได้ไม่ดีนักในตอนที่ได้ครองบอล เราต้องเล่นทางริมเส้นให้มากกว่านี้ เราไม่สามารถหยุดเกมสวนกลับของพวกเขาได้ และแนวรับของเราก็ยืนไม่เป็นระเบียบสักเท่าไหร่ในช่วงครึ่งแรก ส่วนเรื่องจุดโทษการตัดสินใจขั้นสุดท้ายม���นอยู่ที่ผู้ตัดสิน ซึ่งมันเป็นการตัดสินใจที่สำคัญ ผมเองก็ไม่รู้ว่าเขาตัดสินถูกหรือเปล่า บางทีมันอาจเป็นจุดที่ตัดสินเกมนี้เลย แต่เราไม่ได้แพ้เกมนี้เพราะจุดโทษ เราแพ้ในวันนี้เพราะเราเล่นไม่ดีและคริสตัล พาเลซ เล่นดีกว่าเรา เราไม่ได้มีฟอร์มการเล่นที่ดีในเกมนี้เลย', 'summary': 'กีเก ซานเชซ ฟลอเรส กุนซือเลือดกระทิงของทีมวัตฟอร์ด เมินประเด็นจุดโทษปัญหาในเกมพรีเมียร์ลีก อังกฤษ นัดที่แตนอาละวาดเปิดบ้านพ่าย คริสตัล พาเลซ 0-1ชี้ทีมของเขาเล่นไม่ดีพอเอง', 'tags': 'พรีเมียร์ลีก,วัตฟอร์ด,คริสตัล พาเลซ,กีเก ซานเชซ ฟลอเรส,ข่าวกีฬา,ข่าว,ไทยรัฐออนไลน์', 'title': 'ฟลอเรส รับ วัตฟอร์ดห่วยเองเกมพ่ายพาเลซคาบ้าน', 'type': '', 'url': 'https://www.thairath.co.th/content/528322'}
|
75 |
+
```
|
76 |
+
|
77 |
+
### Data Fields
|
78 |
+
|
79 |
+
- `title`: title of article
|
80 |
+
- `body`: body of article
|
81 |
+
- `summary`: summary of article
|
82 |
+
- `type`: type of article, if any
|
83 |
+
- `tags`: tags of article, separated by `,`
|
84 |
+
- `url`: URL of article
|
85 |
+
|
86 |
+
### Data Splits
|
87 |
+
|
88 |
+
train/valid/test: 358868 / 11000 / 11000
|
89 |
+
|
90 |
+
## Dataset Creation
|
91 |
+
|
92 |
+
### Curation Rationale
|
93 |
+
|
94 |
+
Sequence-to-sequence (Seq2Seq) models have shown great achievement in text summarization. However, Seq2Seq model often requires large-scale training data to achieve effective results. Although many impressive advancements in text summarization field have been made, most of summarization studies focus on resource-rich languages. The progress of Thai text summarization is still far behind. The dearth of large-scale dataset keeps Thai text summarization in its infancy. As far as our knowledge goes, there is not a large-scale dataset for Thai text summarization available anywhere. Thus, we present ThaiSum, a large-scale corpus for Thai text summarization obtained from several online news websites namely Thairath, ThaiPBS, Prachathai, and The Standard.
|
95 |
+
|
96 |
+
### Source Data
|
97 |
+
|
98 |
+
#### Initial Data Collection and Normalization
|
99 |
+
|
100 |
+
We used a python library named Scrapy to crawl articles from several news websites namely Thairath, Prachatai, ThaiPBS and, The Standard. We first collected news URLs provided in their sitemaps. During web-crawling, we used HTML markup and metadata available in HTML pages to identify article text, summary, headline, tags and label. Collected articles were published online from 2014 to August 2020. <br> <br>
|
101 |
+
We further performed data cleansing process to minimize noisy data. We filtered out articles that their article text or summary is missing. Articles that contains article text with less than 150 words or summary with less than 15 words were removed. We also discarded articles that contain at least one of these following tags: ‘ดวง’ (horoscope), ‘นิยาย’ (novel), ‘อินสตราแกรมดารา’ (celebrity Instagram), ‘คลิปสุดฮา’(funny video) and ‘สรุปข่าว’ (highlight news). Some summaries were completely irrelevant to their original article texts. To eliminate those irrelevant summaries, we calculated abstractedness score between summary and its article text. Abstractedness score is written formally as: <br>
|
102 |
+
<center><a href="https://www.codecogs.com/eqnedit.php?latex=\begin{equation}&space;\frac{|S-A|}{r}&space;\times&space;100&space;\end{equation}" target="_blank"><img src="https://latex.codecogs.com/gif.latex?\begin{equation}&space;\frac{|S-A|}{r}&space;\times&space;100&space;\end{equation}" title="\begin{equation} \frac{|S-A|}{r} \times 100 \end{equation}" /></a></center><br>
|
103 |
+
<br>Where 𝑆 denotes set of article tokens. 𝐴 denotes set of summary tokens. 𝑟 denotes a total number of summary tokens. We omitted articles that have abstractedness score at 1-grams higher than 60%.
|
104 |
+
<br><br>
|
105 |
+
|
106 |
+
It is important to point out that we used [PyThaiNLP](https://github.com/PyThaiNLP/pythainlp), version 2.2.4, tokenizing engine = newmm, to process Thai texts in this study. It is challenging to tokenize running Thai text into words or sentences because there are not clear word/sentence delimiters in Thai language. Therefore, using different tokenization engines may result in different segment of words/sentences.
|
107 |
+
|
108 |
+
After data-cleansing process, ThaiSum dataset contains over 358,000 articles. The size of this dataset is comparable to a well-known English document summarization dataset, CNN/Dily mail dataset. Moreover, we analyse the characteristics of this dataset by measuring the abstractedness level, compassion rate, and content diversity. For more details, see [thaisum_exploration.ipynb](https://github.com/nakhunchumpolsathien/ThaiSum/blob/master/thaisum_exploration.ipynb).
|
109 |
+
|
110 |
+
#### Dataset Statistics
|
111 |
+
|
112 |
+
ThaiSum dataset consists of 358,868 articles. Average lengths of article texts and summaries are approximately 530 and 37 words respectively. As mentioned earlier, we also collected headlines, tags and labels provided in each article. Tags are similar to keywords of the article. An article normally contains several tags but a few labels. Tags can be name of places or persons that article is about while labels indicate news category (politic, entertainment, etc.). Ultimatly, ThaiSum contains 538,059 unique tags and 59 unique labels. Note that not every article contains tags or labels.
|
113 |
+
|
114 |
+
|Dataset Size| 358,868 | articles |
|
115 |
+
|:---|---:|---:|
|
116 |
+
|Avg. Article Length| 529.5 | words|
|
117 |
+
|Avg. Summary Length | 37.3 | words|
|
118 |
+
|Avg. Headline Length | 12.6 | words|
|
119 |
+
|Unique Vocabulary Size | 407,355 | words|
|
120 |
+
|Occurring > 10 times | 81,761 | words|
|
121 |
+
|Unique News Tag Size | 538,059 | tags|
|
122 |
+
|Unique News Label Size | 59 | labels|
|
123 |
+
|
124 |
+
#### Who are the source language producers?
|
125 |
+
|
126 |
+
Journalists of respective articles
|
127 |
+
|
128 |
+
### Annotations
|
129 |
+
|
130 |
+
#### Annotation process
|
131 |
+
|
132 |
+
`summary`, `type` and `tags` are created by journalists who wrote the articles and/or their publishers.
|
133 |
+
|
134 |
+
#### Who are the annotators?
|
135 |
+
|
136 |
+
`summary`, `type` and `tags` are created by journalists who wrote the articles and/or their publishers.
|
137 |
+
|
138 |
+
### Personal and Sensitive Information
|
139 |
+
|
140 |
+
All data are public news articles. No personal and sensitive information is expected to be included.
|
141 |
+
|
142 |
+
## Considerations for Using the Data
|
143 |
+
|
144 |
+
### Social Impact of Dataset
|
145 |
+
|
146 |
+
- News summarization in Thai
|
147 |
+
- Language modeling for Thai news
|
148 |
+
|
149 |
+
### Discussion of Biases
|
150 |
+
|
151 |
+
|
152 |
+
- [ThaiPBS](https://www.thaipbs.or.th/home) [receives funding from Thai government](https://www.bangkokbiznews.com/blog/detail/648740).
|
153 |
+
- [Thairath](https://www.thairath.co.th/) is known as [the most popular newspaper in Thailand](https://mgronline.com/onlinesection/detail/9620000058532); no clear political leaning.
|
154 |
+
- [The Standard](https://thestandard.co/) is a left-leaning online magazine.
|
155 |
+
- [Prachathai](https://prachatai.com/) is a left-leaning, human-right-focused news site.
|
156 |
+
|
157 |
+
### Other Known Limitations
|
158 |
+
|
159 |
+
[More Information Needed]
|
160 |
+
|
161 |
+
## Additional Information
|
162 |
+
|
163 |
+
### Dataset Curators
|
164 |
+
|
165 |
+
[@nakhunchumpolsathien](https://github.com/nakhunchumpolsathien/)
|
166 |
+
[@caramelWaffle](https://github.com/caramelWaffle)
|
167 |
+
|
168 |
+
### Licensing Information
|
169 |
+
|
170 |
+
MIT License
|
171 |
+
|
172 |
+
### Citation Information
|
173 |
+
|
174 |
+
```
|
175 |
+
@mastersthesis{chumpolsathien_2020,
|
176 |
+
title={Using Knowledge Distillation from Keyword Extraction to Improve the Informativeness of Neural Cross-lingual Summarization},
|
177 |
+
author={Chumpolsathien, Nakhun},
|
178 |
+
year={2020},
|
179 |
+
school={Beijing Institute of Technology}
|
180 |
+
```
|
dataset_infos.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"thaisum": {"description": "ThaiSum is a large-scale corpus for Thai text summarization obtained from several online news websites namely Thairath,\nThaiPBS, Prachathai, and The Standard. This dataset consists of over 350,000 article and summary pairs\nwritten by journalists.\n", "citation": "@mastersthesis{chumpolsathien_2020, \n title={Using Knowledge Distillation from Keyword Extraction to Improve the Informativeness of Neural Cross-lingual Summarization},\n author={Chumpolsathien, Nakhun}, \n year={2020}, \n school={Beijing Institute of Technology}\n", "homepage": "https://github.com/nakhunchumpolsathien/ThaiSum", "license": "", "features": {"title": {"dtype": "string", "id": null, "_type": "Value"}, "body": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "tags": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": {"input": "body", "output": "summary"}, "builder_name": "thaisum", "config_name": "thaisum", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2945472406, "num_examples": 358868, "dataset_name": "thaisum"}, "validation": {"name": "validation", "num_bytes": 118437310, "num_examples": 11000, "dataset_name": "thaisum"}, "test": {"name": "test", "num_bytes": 119496704, "num_examples": 11000, "dataset_name": "thaisum"}}, "download_checksums": {"https://archive.org/download/thaisum_datasets/data.zip": {"num_bytes": 647582078, "checksum": "526610cc780ebe8c34c8bcd49d169861637fed426ba860fb1e9d48768599e1bf"}}, "download_size": 647582078, "post_processing_size": null, "dataset_size": 3183406420, "size_in_bytes": 3830988498}}
|
dummy/thaisum/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fd431d270df1c5fca14e27ed8ed7fda7023970a92c62cfb2669899ecad5d5217
|
3 |
+
size 38463
|
thaisum.py
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import absolute_import, division, print_function
|
2 |
+
|
3 |
+
import csv
|
4 |
+
import os
|
5 |
+
|
6 |
+
import datasets
|
7 |
+
|
8 |
+
|
9 |
+
csv.field_size_limit(int(1e6)) # to accommodate large fields
|
10 |
+
|
11 |
+
|
12 |
+
_CITATION = """\
|
13 |
+
@mastersthesis{chumpolsathien_2020,
|
14 |
+
title={Using Knowledge Distillation from Keyword Extraction to Improve the Informativeness of Neural Cross-lingual Summarization},
|
15 |
+
author={Chumpolsathien, Nakhun},
|
16 |
+
year={2020},
|
17 |
+
school={Beijing Institute of Technology}
|
18 |
+
"""
|
19 |
+
|
20 |
+
_DESCRIPTION = """\
|
21 |
+
ThaiSum is a large-scale corpus for Thai text summarization obtained from several online news websites namely Thairath,
|
22 |
+
ThaiPBS, Prachathai, and The Standard. This dataset consists of over 350,000 article and summary pairs
|
23 |
+
written by journalists.
|
24 |
+
"""
|
25 |
+
|
26 |
+
|
27 |
+
class ThaiSumConfig(datasets.BuilderConfig):
|
28 |
+
"""BuilderConfig for ThaiSum."""
|
29 |
+
|
30 |
+
def __init__(self, **kwargs):
|
31 |
+
"""BuilderConfig for ThaiSum.
|
32 |
+
|
33 |
+
Args:
|
34 |
+
**kwargs: keyword arguments forwarded to super.
|
35 |
+
"""
|
36 |
+
super(ThaiSumConfig, self).__init__(**kwargs)
|
37 |
+
|
38 |
+
|
39 |
+
class Thaisum(datasets.GeneratorBasedBuilder):
|
40 |
+
"""ThaiSum: The largest dataset for Thai text summarization"""
|
41 |
+
|
42 |
+
_DOWNLOAD_URL = "https://archive.org/download/thaisum_datasets/data.zip"
|
43 |
+
_TRAIN_FILE = "train.csv"
|
44 |
+
_VAL_FILE = "valid.csv"
|
45 |
+
_TEST_FILE = "test.csv"
|
46 |
+
|
47 |
+
BUILDER_CONFIGS = [
|
48 |
+
ThaiSumConfig(
|
49 |
+
name="thaisum",
|
50 |
+
version=datasets.Version("1.0.0"),
|
51 |
+
description="ThaiSum: The largest dataset for Thai text summarization",
|
52 |
+
),
|
53 |
+
]
|
54 |
+
|
55 |
+
def _info(self):
|
56 |
+
return datasets.DatasetInfo(
|
57 |
+
description=_DESCRIPTION,
|
58 |
+
features=datasets.Features(
|
59 |
+
{
|
60 |
+
"title": datasets.Value("string"),
|
61 |
+
"body": datasets.Value("string"),
|
62 |
+
"summary": datasets.Value("string"),
|
63 |
+
"type": datasets.Value("string"),
|
64 |
+
"tags": datasets.Value("string"),
|
65 |
+
"url": datasets.Value("string"),
|
66 |
+
}
|
67 |
+
),
|
68 |
+
supervised_keys=("body", "summary"),
|
69 |
+
homepage="https://github.com/nakhunchumpolsathien/ThaiSum",
|
70 |
+
citation=_CITATION,
|
71 |
+
)
|
72 |
+
|
73 |
+
def _split_generators(self, dl_manager):
|
74 |
+
arch_path = dl_manager.download_and_extract(self._DOWNLOAD_URL)
|
75 |
+
data_dir = os.path.join(arch_path, "data")
|
76 |
+
return [
|
77 |
+
datasets.SplitGenerator(
|
78 |
+
name=datasets.Split.TRAIN,
|
79 |
+
gen_kwargs={"filepath": os.path.join(data_dir, self._TRAIN_FILE)},
|
80 |
+
),
|
81 |
+
datasets.SplitGenerator(
|
82 |
+
name=datasets.Split.VALIDATION,
|
83 |
+
gen_kwargs={"filepath": os.path.join(data_dir, self._VAL_FILE)},
|
84 |
+
),
|
85 |
+
datasets.SplitGenerator(
|
86 |
+
name=datasets.Split.TEST,
|
87 |
+
gen_kwargs={"filepath": os.path.join(data_dir, self._TEST_FILE)},
|
88 |
+
),
|
89 |
+
]
|
90 |
+
|
91 |
+
def _generate_examples(self, filepath):
|
92 |
+
"""Generate examples."""
|
93 |
+
with open(filepath, encoding="utf-8") as f:
|
94 |
+
csv_reader = csv.reader(f)
|
95 |
+
next(csv_reader) # skip header
|
96 |
+
for id_, row in enumerate(csv_reader):
|
97 |
+
yield id_, {
|
98 |
+
"title": row[0],
|
99 |
+
"body": row[1],
|
100 |
+
"summary": row[2],
|
101 |
+
"type": row[3],
|
102 |
+
"tags": row[4],
|
103 |
+
"url": row[5],
|
104 |
+
}
|