File size: 3,912 Bytes
471bb69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55b72a0
471bb69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import json
import datasets
import os

logger = datasets.logging.get_logger(__name__)


class Dataset(datasets.GeneratorBasedBuilder):
    def _info(self):
        return datasets.DatasetInfo(
            features=datasets.Features({
                "images": datasets.Sequence(datasets.Image()),
                "id": datasets.Value("int32"),
                "conversations": datasets.Sequence(datasets.Features({
                    "from": datasets.Value("string"),
                    "value": datasets.Value("string")
                }))
            })
        )
    
    def _split_generators(self, dl_manager: datasets.DownloadManager):
        dl_manager.download_config.token = True
        dl_manager.download_config.num_proc = 10

        base_url = "https://huggingface.co/datasets/liuylhf/ocr/resolve/main/data"
        train_image_files = dl_manager.download_and_extract(
            f"{base_url}/train/train.tar.gz"
        )
        test_image_files = dl_manager.download_and_extract(
            f"{base_url}/test/test.tar.gz"
        )
        image_files = train_image_files + test_image_files

        image_file_to_full_path_mapping = dict([
            ('images/' + '/'.join(image_file.split('/')[-2:]), image_file) for image_file in dl_manager.iter_files(image_files)
        ])

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepath": dl_manager.download_and_extract(
                        f"{base_url}/train.jsonl"),
                    "image_file_to_full_path_mapping": image_file_to_full_path_mapping
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "filepath": dl_manager.download_and_extract(
                        f"{base_url}/test.jsonl"),
                    "image_file_to_full_path_mapping": image_file_to_full_path_mapping
                },
            ),
        ]

    def _get_step_info(self, item):
        first_image_path = item['images'][0]
        folder = '/'.join(first_image_path.split('/')[-2:-1])

        task = folder.split('-')[0]
        step = folder.split('-')[1].split('_')

        step_number = step[0]
        retry_index = int(step[1])

        return {
            "task_name": task,
            "step_name": f"{task}-{step_number}",
            "retry_index": retry_index
        }

    def _generate_examples(self, filepath, image_file_to_full_path_mapping):
        with open(filepath, "r") as f:
            lines = f.readlines()

            items = []
            step_name_to_retry_count = {}
            for id, line in enumerate(lines):
                item = json.loads(line)
                if len(json.loads(item["conversations"][1]["value"])["actions"]) == 0:
                    continue

                items.append(item)
                step_name = self._get_step_info(item)["step_name"]
                if step_name not in step_name_to_retry_count:
                    step_name_to_retry_count[step_name] = 0
                step_name_to_retry_count[step_name] += 1

            for id, item in enumerate(items):
                step_info = self._get_step_info(item)
                yield id, {
                    "images": [
                        image_file_to_full_path_mapping[image] for image in item["images"]
                    ],
                    "conversations": item["conversations"],
                    "length": item["length"],
                    "task_name": step_info["task_name"],
                    "step_name": step_info["step_name"],
                    "has_retry": step_name_to_retry_count[step_info['step_name']] > 1,
                    "retry_index": step_info["retry_index"],
                    "total_retries": step_name_to_retry_count[step_info['step_name']]
                }