Datasets:

Languages:
Vietnamese
ArXiv:
License:
holylovenia commited on
Commit
f85f4ff
1 Parent(s): 7d059a4

Upload vintext.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. vintext.py +235 -0
vintext.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ Vintext is a challenging scene text dataset for Vietnamese, where some characters are equivocal in the visual form due to accent symbols.
18
+ This dataset contains 1500 fully annotated images from the original format. Each text instance is delineated by a quadrilateral bounding box and associated with the ground truth sequence of characters.
19
+ The dataset is randomly split into 2 subsets for training (1,200 images) and testing (300 images).
20
+ """
21
+ import os
22
+ from pathlib import Path
23
+ from typing import Dict, List, Tuple
24
+
25
+ import datasets
26
+
27
+ from seacrowd.utils import schemas
28
+ from seacrowd.utils.configs import SEACrowdConfig
29
+ from seacrowd.utils.constants import Licenses, Tasks
30
+
31
+ _CITATION = """\
32
+ @INPROCEEDINGS{vintext,
33
+ author={Nguyen, Nguyen and Nguyen, Thu and Tran, Vinh and Tran, Minh-Triet and Ngo, Thanh Duc and Huu Nguyen, Thien and Hoai, Minh},
34
+ booktitle={2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
35
+ title={Dictionary-guided Scene Text Recognition},
36
+ year={2021},
37
+ pages={7379-7388},
38
+ keywords={Training;Visualization;Computer vision;Casting;Dictionaries;Codes;Text recognition},
39
+ doi={10.1109/CVPR46437.2021.00730}
40
+ }
41
+ """
42
+
43
+ _DATASETNAME = "vintext"
44
+
45
+ _DESCRIPTION = """\
46
+ Vintext is a challenging scene text dataset for Vietnamese, where some characters are equivocal in the visual form due to accent symbols.
47
+ This dataset contains 2000 fully annotated images with 56,084 text instances. Each text instance is delineated by a quadrilateral bounding box and associated with the ground truth sequence of characters.
48
+ The dataset is randomly split into three subsets for training (1,200 images), validation (300 images), and testing (500 images).
49
+ """
50
+
51
+ _HOMEPAGE = "https://github.com/VinAIResearch/dict-guided"
52
+
53
+ _LANGUAGES = ["vie"]
54
+
55
+ _LICENSE = Licenses.AGPL_3_0.value
56
+
57
+ _LOCAL = False
58
+
59
+ _GDRIVE_ID = "1UUQhNvzgpZy7zXBFQp0Qox-BBjunZ0ml"
60
+
61
+ _SUPPORTED_TASKS = [Tasks.OPTICAL_CHARACTER_RECOGNITION]
62
+
63
+ _SOURCE_VERSION = "1.0.0"
64
+
65
+ _SEACROWD_VERSION = "2024.06.20"
66
+
67
+
68
+ class VintextDataset(datasets.GeneratorBasedBuilder):
69
+ """
70
+ Vintext is a challenging scene text dataset for Vietnamese, where some characters are equivocal in the visual form due to accent symbols.
71
+ This dataset contains 1500 fully annotated images from the original format. Each text instance is delineated by a quadrilateral bounding box and associated with the ground truth sequence of characters.
72
+ The dataset is randomly split into 2 subsets for training (1,200 images) and testing (300 images).
73
+ """
74
+
75
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
76
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
77
+
78
+ BUILDER_CONFIGS = [
79
+ SEACrowdConfig(
80
+ name=f"{_DATASETNAME}_source",
81
+ version=SOURCE_VERSION,
82
+ description=f"{_DATASETNAME} source schema",
83
+ schema="source",
84
+ subset_id=f"{_DATASETNAME}",
85
+ ),
86
+ SEACrowdConfig(
87
+ name=f"{_DATASETNAME}_seacrowd_imtext",
88
+ version=SEACROWD_VERSION,
89
+ description=f"{_DATASETNAME} SEACrowd schema",
90
+ schema="seacrowd_imtext",
91
+ subset_id=f"{_DATASETNAME}",
92
+ ),
93
+ ]
94
+
95
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
96
+
97
+ def _info(self) -> datasets.DatasetInfo:
98
+
99
+ if self.config.schema == "source":
100
+ features = datasets.Features(
101
+ {
102
+ "id": datasets.Value("string"),
103
+ "image_path": datasets.Value("string"),
104
+ "annotations": datasets.Sequence(
105
+ {
106
+ "x1": datasets.Value("int32"),
107
+ "y1": datasets.Value("int32"),
108
+ "x2": datasets.Value("int32"),
109
+ "y2": datasets.Value("int32"),
110
+ "x3": datasets.Value("int32"),
111
+ "y3": datasets.Value("int32"),
112
+ "x4": datasets.Value("int32"),
113
+ "y4": datasets.Value("int32"),
114
+ "transcript": datasets.Value("string"),
115
+ }
116
+ ),
117
+ }
118
+ )
119
+
120
+ elif self.config.schema == "seacrowd_imtext":
121
+ features = schemas.image_text_features()
122
+ features["metadata"]["annotations"] = datasets.Sequence(
123
+ {
124
+ "x1": datasets.Value("int32"),
125
+ "y1": datasets.Value("int32"),
126
+ "x2": datasets.Value("int32"),
127
+ "y2": datasets.Value("int32"),
128
+ "x3": datasets.Value("int32"),
129
+ "y3": datasets.Value("int32"),
130
+ "x4": datasets.Value("int32"),
131
+ "y4": datasets.Value("int32"),
132
+ "transcript": datasets.Value("string"),
133
+ }
134
+ )
135
+
136
+ return datasets.DatasetInfo(
137
+ description=_DESCRIPTION,
138
+ features=features,
139
+ homepage=_HOMEPAGE,
140
+ license=_LICENSE,
141
+ citation=_CITATION,
142
+ )
143
+
144
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
145
+ """Returns SplitGenerators."""
146
+ try:
147
+ import gdown
148
+ except ImportError as err:
149
+ raise ImportError("You need to install gdown (`pip install gdown`) to downloads a public file/folder from Google Drive.") from err
150
+
151
+ zip_filepath = os.path.join(os.path.dirname(__file__), "vietnamese_original.zip")
152
+ if not os.path.exists(zip_filepath):
153
+ gdown.download(id=_GDRIVE_ID, output=zip_filepath)
154
+
155
+ data_dir = dl_manager.extract(zip_filepath)
156
+
157
+ return [
158
+ datasets.SplitGenerator(
159
+ name=datasets.Split.TRAIN,
160
+ gen_kwargs={
161
+ "imagepath": Path(data_dir) / "vietnamese/train_images",
162
+ "labelpath": Path(data_dir) / "vietnamese/labels",
163
+ },
164
+ ),
165
+ datasets.SplitGenerator(
166
+ name=datasets.Split.TEST,
167
+ gen_kwargs={
168
+ "imagepath": Path(data_dir) / "vietnamese/test_image",
169
+ "labelpath": Path(data_dir) / "vietnamese/labels",
170
+ },
171
+ ),
172
+ ]
173
+
174
+ def _generate_examples(self, imagepath: Path, labelpath: Path) -> Tuple[int, Dict]:
175
+ """Yields examples as (key, example) tuples."""
176
+
177
+ df_list = []
178
+
179
+ for image in os.listdir(imagepath):
180
+ image_id = int(image.split(".")[0][2:])
181
+ label_file = os.path.join(labelpath, f"gt_{image_id}.txt")
182
+ with open(label_file, "r") as f:
183
+ label = f.read().strip()
184
+ df_list.append({"id": image_id, "image_path": os.path.join(imagepath, image), "label": label})
185
+
186
+ if self.config.schema == "source":
187
+ for i, row in enumerate(df_list):
188
+ labels = [label.split(",") for label in row["label"].split("\n")]
189
+
190
+ yield i, {
191
+ "id": row["id"],
192
+ "image_path": row["image_path"],
193
+ "annotations": [
194
+ {
195
+ "x1": label[0],
196
+ "y1": label[1],
197
+ "x2": label[2],
198
+ "y2": label[3],
199
+ "x3": label[4],
200
+ "y3": label[5],
201
+ "x4": label[6],
202
+ "y4": label[7],
203
+ "transcript": label[8],
204
+ }
205
+ for label in labels
206
+ ],
207
+ }
208
+
209
+ elif self.config.schema == "seacrowd_imtext":
210
+ for i, row in enumerate(df_list):
211
+ labels = [label.split(",") for label in row["label"].split("\n")]
212
+
213
+ yield i, {
214
+ "id": row["id"],
215
+ "image_paths": [row["image_path"]],
216
+ "texts": None,
217
+ "metadata": {
218
+ "context": None,
219
+ "labels": None,
220
+ "annotations": [
221
+ {
222
+ "x1": label[0],
223
+ "y1": label[1],
224
+ "x2": label[2],
225
+ "y2": label[3],
226
+ "x3": label[4],
227
+ "y3": label[5],
228
+ "x4": label[6],
229
+ "y4": label[7],
230
+ "transcript": label[8],
231
+ }
232
+ for label in labels
233
+ ],
234
+ },
235
+ }