mrhacker7599
commited on
Commit
•
ed1b081
1
Parent(s):
5aa0593
Upload 10 files
Browse files- README.dataset.txt +17 -0
- README.md +81 -3
- README.roboflow.txt +15 -0
- data/test.zip +3 -0
- data/train.zip +3 -0
- data/valid-mini.zip +3 -0
- data/valid.zip +3 -0
- license-plate-object-detection.py +152 -0
- split_name_to_num_samples.json +1 -0
- thumbnail.jpg +3 -0
README.dataset.txt
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# undefined > original-images
|
2 |
+
https://public.roboflow.ai/object-detection/undefined
|
3 |
+
|
4 |
+
Provided by undefined
|
5 |
+
License: CC BY 4.0
|
6 |
+
|
7 |
+
# Overview
|
8 |
+
The Numberplate Dataset is a collection of Licence Plates that can easily be used for Automatic Number Plate Detection.
|
9 |
+
# Example Footage!
|
10 |
+
![Licese Plate Detection](https://media0.giphy.com/media/5l6FAJoHuT0YiM3oZG/giphy.gif?cid=790b7611013999dd479d98f81ddbaaf2a0563c41cea2413b&rid=giphy.gif&ct=g)
|
11 |
+
|
12 |
+
# Training and Deployment
|
13 |
+
The Number Plate model has been trained in Roboflow, and available for inference on the Dataset tab.
|
14 |
+
One could also build a Automatic Number Plate Recognition [ANPR] App using YOLOR and EasyOCR. This is achieved using the Roboflow Platform which you can deploy the model for robust and real-time ANPR.
|
15 |
+
|
16 |
+
# About Augmented Startups
|
17 |
+
We are at the forefront of Artificial Intelligence in computer vision. With over 92k subscribers on YouTube, we embark on fun and innovative projects in this field and create videos and courses so that everyone can be an expert in this field. Our vision is to create a world full of inventors that can turn their dreams into reality
|
README.md
CHANGED
@@ -1,3 +1,81 @@
|
|
1 |
-
---
|
2 |
-
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
task_categories:
|
3 |
+
- object-detection
|
4 |
+
tags:
|
5 |
+
- roboflow
|
6 |
+
- roboflow2huggingface
|
7 |
+
- Self Driving
|
8 |
+
- Anpr
|
9 |
+
---
|
10 |
+
|
11 |
+
<div align="center">
|
12 |
+
<img width="640" alt="keremberke/license-plate-object-detection" src="https://huggingface.co/datasets/keremberke/license-plate-object-detection/resolve/main/thumbnail.jpg">
|
13 |
+
</div>
|
14 |
+
|
15 |
+
### Dataset Labels
|
16 |
+
|
17 |
+
```
|
18 |
+
['license_plate']
|
19 |
+
```
|
20 |
+
|
21 |
+
|
22 |
+
### Number of Images
|
23 |
+
|
24 |
+
```json
|
25 |
+
{'train': 6176, 'valid': 1765, 'test': 882}
|
26 |
+
```
|
27 |
+
|
28 |
+
|
29 |
+
### How to Use
|
30 |
+
|
31 |
+
- Install [datasets](https://pypi.org/project/datasets/):
|
32 |
+
|
33 |
+
```bash
|
34 |
+
pip install datasets
|
35 |
+
```
|
36 |
+
|
37 |
+
- Load the dataset:
|
38 |
+
|
39 |
+
```python
|
40 |
+
from datasets import load_dataset
|
41 |
+
|
42 |
+
ds = load_dataset("keremberke/license-plate-object-detection", name="full")
|
43 |
+
example = ds['train'][0]
|
44 |
+
```
|
45 |
+
|
46 |
+
### Roboflow Dataset Page
|
47 |
+
[https://universe.roboflow.com/augmented-startups/vehicle-registration-plates-trudk/dataset/1](https://universe.roboflow.com/augmented-startups/vehicle-registration-plates-trudk/dataset/1?ref=roboflow2huggingface)
|
48 |
+
|
49 |
+
### Citation
|
50 |
+
|
51 |
+
```
|
52 |
+
@misc{ vehicle-registration-plates-trudk_dataset,
|
53 |
+
title = { Vehicle Registration Plates Dataset },
|
54 |
+
type = { Open Source Dataset },
|
55 |
+
author = { Augmented Startups },
|
56 |
+
howpublished = { \\url{ https://universe.roboflow.com/augmented-startups/vehicle-registration-plates-trudk } },
|
57 |
+
url = { https://universe.roboflow.com/augmented-startups/vehicle-registration-plates-trudk },
|
58 |
+
journal = { Roboflow Universe },
|
59 |
+
publisher = { Roboflow },
|
60 |
+
year = { 2022 },
|
61 |
+
month = { jun },
|
62 |
+
note = { visited on 2023-01-18 },
|
63 |
+
}
|
64 |
+
```
|
65 |
+
|
66 |
+
### License
|
67 |
+
CC BY 4.0
|
68 |
+
|
69 |
+
### Dataset Summary
|
70 |
+
This dataset was exported via roboflow.ai on January 13, 2022 at 5:20 PM GMT
|
71 |
+
|
72 |
+
It includes 8823 images.
|
73 |
+
VRP are annotated in COCO format.
|
74 |
+
|
75 |
+
The following pre-processing was applied to each image:
|
76 |
+
* Auto-orientation of pixel data (with EXIF-orientation stripping)
|
77 |
+
|
78 |
+
No image augmentation techniques were applied.
|
79 |
+
|
80 |
+
|
81 |
+
|
README.roboflow.txt
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
Vehicle Registration Plates - v1 original-images
|
3 |
+
==============================
|
4 |
+
|
5 |
+
This dataset was exported via roboflow.ai on January 13, 2022 at 5:20 PM GMT
|
6 |
+
|
7 |
+
It includes 8823 images.
|
8 |
+
VRP are annotated in COCO format.
|
9 |
+
|
10 |
+
The following pre-processing was applied to each image:
|
11 |
+
* Auto-orientation of pixel data (with EXIF-orientation stripping)
|
12 |
+
|
13 |
+
No image augmentation techniques were applied.
|
14 |
+
|
15 |
+
|
data/test.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f23ce4d52022943dd0bcd35ca79939ccb39ac64ca9e0768c2bb0a3994d62c94f
|
3 |
+
size 133
|
data/train.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:78afd5f26568569d7c620f9d79d11b3d740e611c7b17adc835a64fa3c10e3c6f
|
3 |
+
size 134
|
data/valid-mini.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:33eaac1b3091ed00126faced3a1868f9c5eac9a5534dd01778fa7cf133913d83
|
3 |
+
size 130
|
data/valid.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7e809cd5d761b77f024062505ef79109cd5e4b73beae1382c5858571b7c52e07
|
3 |
+
size 133
|
license-plate-object-detection.py
ADDED
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import collections
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
|
5 |
+
import datasets
|
6 |
+
|
7 |
+
|
8 |
+
_HOMEPAGE = "https://universe.roboflow.com/augmented-startups/vehicle-registration-plates-trudk/dataset/1?ref=roboflow2huggingface"
|
9 |
+
_LICENSE = "CC BY 4.0"
|
10 |
+
_CITATION = """\
|
11 |
+
@misc{ vehicle-registration-plates-trudk_dataset,
|
12 |
+
title = { Vehicle Registration Plates Dataset },
|
13 |
+
type = { Open Source Dataset },
|
14 |
+
author = { Augmented Startups },
|
15 |
+
howpublished = { \\url{ https://universe.roboflow.com/augmented-startups/vehicle-registration-plates-trudk } },
|
16 |
+
url = { https://universe.roboflow.com/augmented-startups/vehicle-registration-plates-trudk },
|
17 |
+
journal = { Roboflow Universe },
|
18 |
+
publisher = { Roboflow },
|
19 |
+
year = { 2022 },
|
20 |
+
month = { jun },
|
21 |
+
note = { visited on 2023-01-18 },
|
22 |
+
}
|
23 |
+
"""
|
24 |
+
_CATEGORIES = ['license_plate']
|
25 |
+
_ANNOTATION_FILENAME = "_annotations.coco.json"
|
26 |
+
|
27 |
+
|
28 |
+
class LICENSEPLATEOBJECTDETECTIONConfig(datasets.BuilderConfig):
|
29 |
+
"""Builder Config for license-plate-object-detection"""
|
30 |
+
|
31 |
+
def __init__(self, data_urls, **kwargs):
|
32 |
+
"""
|
33 |
+
BuilderConfig for license-plate-object-detection.
|
34 |
+
|
35 |
+
Args:
|
36 |
+
data_urls: `dict`, name to url to download the zip file from.
|
37 |
+
**kwargs: keyword arguments forwarded to super.
|
38 |
+
"""
|
39 |
+
super(LICENSEPLATEOBJECTDETECTIONConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
|
40 |
+
self.data_urls = data_urls
|
41 |
+
|
42 |
+
|
43 |
+
class LICENSEPLATEOBJECTDETECTION(datasets.GeneratorBasedBuilder):
|
44 |
+
"""license-plate-object-detection object detection dataset"""
|
45 |
+
|
46 |
+
VERSION = datasets.Version("1.0.0")
|
47 |
+
BUILDER_CONFIGS = [
|
48 |
+
LICENSEPLATEOBJECTDETECTIONConfig(
|
49 |
+
name="full",
|
50 |
+
description="Full version of license-plate-object-detection dataset.",
|
51 |
+
data_urls={
|
52 |
+
"train": "https://huggingface.co/datasets/keremberke/license-plate-object-detection/resolve/main/data/train.zip",
|
53 |
+
"validation": "https://huggingface.co/datasets/keremberke/license-plate-object-detection/resolve/main/data/valid.zip",
|
54 |
+
"test": "https://huggingface.co/datasets/keremberke/license-plate-object-detection/resolve/main/data/test.zip",
|
55 |
+
},
|
56 |
+
),
|
57 |
+
LICENSEPLATEOBJECTDETECTIONConfig(
|
58 |
+
name="mini",
|
59 |
+
description="Mini version of license-plate-object-detection dataset.",
|
60 |
+
data_urls={
|
61 |
+
"train": "https://huggingface.co/datasets/keremberke/license-plate-object-detection/resolve/main/data/valid-mini.zip",
|
62 |
+
"validation": "https://huggingface.co/datasets/keremberke/license-plate-object-detection/resolve/main/data/valid-mini.zip",
|
63 |
+
"test": "https://huggingface.co/datasets/keremberke/license-plate-object-detection/resolve/main/data/valid-mini.zip",
|
64 |
+
},
|
65 |
+
)
|
66 |
+
]
|
67 |
+
|
68 |
+
def _info(self):
|
69 |
+
features = datasets.Features(
|
70 |
+
{
|
71 |
+
"image_id": datasets.Value("int64"),
|
72 |
+
"image": datasets.Image(),
|
73 |
+
"width": datasets.Value("int32"),
|
74 |
+
"height": datasets.Value("int32"),
|
75 |
+
"objects": datasets.Sequence(
|
76 |
+
{
|
77 |
+
"id": datasets.Value("int64"),
|
78 |
+
"area": datasets.Value("int64"),
|
79 |
+
"bbox": datasets.Sequence(datasets.Value("float32"), length=4),
|
80 |
+
"category": datasets.ClassLabel(names=_CATEGORIES),
|
81 |
+
}
|
82 |
+
),
|
83 |
+
}
|
84 |
+
)
|
85 |
+
return datasets.DatasetInfo(
|
86 |
+
features=features,
|
87 |
+
homepage=_HOMEPAGE,
|
88 |
+
citation=_CITATION,
|
89 |
+
license=_LICENSE,
|
90 |
+
)
|
91 |
+
|
92 |
+
def _split_generators(self, dl_manager):
|
93 |
+
data_files = dl_manager.download_and_extract(self.config.data_urls)
|
94 |
+
return [
|
95 |
+
datasets.SplitGenerator(
|
96 |
+
name=datasets.Split.TRAIN,
|
97 |
+
gen_kwargs={
|
98 |
+
"folder_dir": data_files["train"],
|
99 |
+
},
|
100 |
+
),
|
101 |
+
datasets.SplitGenerator(
|
102 |
+
name=datasets.Split.VALIDATION,
|
103 |
+
gen_kwargs={
|
104 |
+
"folder_dir": data_files["validation"],
|
105 |
+
},
|
106 |
+
),
|
107 |
+
datasets.SplitGenerator(
|
108 |
+
name=datasets.Split.TEST,
|
109 |
+
gen_kwargs={
|
110 |
+
"folder_dir": data_files["test"],
|
111 |
+
},
|
112 |
+
),
|
113 |
+
]
|
114 |
+
|
115 |
+
def _generate_examples(self, folder_dir):
|
116 |
+
def process_annot(annot, category_id_to_category):
|
117 |
+
return {
|
118 |
+
"id": annot["id"],
|
119 |
+
"area": annot["area"],
|
120 |
+
"bbox": annot["bbox"],
|
121 |
+
"category": category_id_to_category[annot["category_id"]],
|
122 |
+
}
|
123 |
+
|
124 |
+
image_id_to_image = {}
|
125 |
+
idx = 0
|
126 |
+
|
127 |
+
annotation_filepath = os.path.join(folder_dir, _ANNOTATION_FILENAME)
|
128 |
+
with open(annotation_filepath, "r") as f:
|
129 |
+
annotations = json.load(f)
|
130 |
+
category_id_to_category = {category["id"]: category["name"] for category in annotations["categories"]}
|
131 |
+
image_id_to_annotations = collections.defaultdict(list)
|
132 |
+
for annot in annotations["annotations"]:
|
133 |
+
image_id_to_annotations[annot["image_id"]].append(annot)
|
134 |
+
filename_to_image = {image["file_name"]: image for image in annotations["images"]}
|
135 |
+
|
136 |
+
for filename in os.listdir(folder_dir):
|
137 |
+
filepath = os.path.join(folder_dir, filename)
|
138 |
+
if filename in filename_to_image:
|
139 |
+
image = filename_to_image[filename]
|
140 |
+
objects = [
|
141 |
+
process_annot(annot, category_id_to_category) for annot in image_id_to_annotations[image["id"]]
|
142 |
+
]
|
143 |
+
with open(filepath, "rb") as f:
|
144 |
+
image_bytes = f.read()
|
145 |
+
yield idx, {
|
146 |
+
"image_id": image["id"],
|
147 |
+
"image": {"path": filepath, "bytes": image_bytes},
|
148 |
+
"width": image["width"],
|
149 |
+
"height": image["height"],
|
150 |
+
"objects": objects,
|
151 |
+
}
|
152 |
+
idx += 1
|
split_name_to_num_samples.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"train": 6176, "valid": 1765, "test": 882}
|
thumbnail.jpg
ADDED
Git LFS Details
|