File size: 2,049 Bytes
73488ef b946dca 73488ef 1c4dfa3 73488ef b946dca 73488ef b946dca 73488ef b946dca 73488ef b946dca 73488ef b946dca 73488ef b946dca |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
import os
import json
import datasets
class SchoolNotebooks(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features(
{
"image": datasets.Image(),
}
)
)
def _split_generators(self, dl_manager):
_URLS = {
"images": "images.zip",
"train_data": "annotations_train.json",
"test_data": "annotations_test.json",
"val_data": "annotations_val.json"
}
data_files = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"image_paths": dl_manager.iter_files(data_files["images"]),
"annotation_path": data_files["train_data"],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"image_paths": dl_manager.iter_files(data_files["images"]),
"annotation_path": data_files["test_data"],
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"image_paths": dl_manager.iter_files(data_files["images"]),
"annotation_path": data_files["val_data"],
},
)
]
def _generate_examples(self, image_paths, annotation_path):
"""Generate examples."""
with open(annotation_path, 'r') as f:
data = json.load(f)
image_names = set()
for image_data in data['images']:
image_names.add(image_data['file_name'])
for idx, image_path in enumerate(image_paths):
if os.path.basename(image_path) in image_names:
example = {
"image": image_path,
}
yield idx, example
|