Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
CUB200-2011数据集介绍:
|
2 |
+
该数据集由加州理工学院再2010年提出的细粒度数据集,也是目前细粒度分类识别研究的基准图像数据集。
|
3 |
+
|
4 |
+
该数据集共有11788张鸟类图像,包含200类鸟类子类,其中训练数据集有5994张图像,测试集有5794张图像,每张图像均提供了图像类标记信息,图像中鸟的bounding box,鸟的关键part信息,以及鸟类的属性信息,数据集如下图所示。
|
5 |
+
|
6 |
+
|
7 |
+
|
8 |
+
下载的数据集中,包含了如下文件:
|
9 |
+
|
10 |
+
bounding_boxes.txt;classes.txt;image_class_labels.txt; images.txt; train_test_split.txt.
|
11 |
+
|
12 |
+
其中,bounding_boxes.txt为图像中鸟类的边界框信息;classes.txt为鸟类的类别信息,共有200类; image_class_labels.txt为图像标签和所属类别标签信息;images.txt为图像的标签和图像路径信息;train_test_split.txt为训练集和测试集划分。
|
13 |
+
|
14 |
+
本博客主要是根据train_test_split.txt文件和images.txt文件将原始下载的CUB200-2011数据集划分为训练集和测试集。在深度学习Pytorch框架下采用ImageFolder和DataLoader读取数据集较为方便。相关的python代码如下:
|
15 |
+
|
16 |
+
(1) CUB200-2011训练集和测试集划分代码
|
17 |
+
# *_*coding: utf-8 *_*
|
18 |
+
# author --liming--
|
19 |
+
|
20 |
+
"""
|
21 |
+
读取images.txt文件,获得每个图像的标签
|
22 |
+
读取train_test_split.txt文件,获取每个图像的train, test标签.其中1为训练,0为测试.
|
23 |
+
"""
|
24 |
+
|
25 |
+
import os
|
26 |
+
import shutil
|
27 |
+
import numpy as np
|
28 |
+
import config
|
29 |
+
import time
|
30 |
+
|
31 |
+
time_start = time.time()
|
32 |
+
|
33 |
+
# 文件路径
|
34 |
+
path_images = config.path + 'images.txt'
|
35 |
+
path_split = config.path + 'train_test_split.txt'
|
36 |
+
trian_save_path = config.path + 'dataset/train/'
|
37 |
+
test_save_path = config.path + 'dataset/test/'
|
38 |
+
|
39 |
+
# 读取images.txt文件
|
40 |
+
images = []
|
41 |
+
with open(path_images,'r') as f:
|
42 |
+
for line in f:
|
43 |
+
images.append(list(line.strip('\n').split(',')))
|
44 |
+
|
45 |
+
# 读取train_test_split.txt文件
|
46 |
+
split = []
|
47 |
+
with open(path_split, 'r') as f_:
|
48 |
+
for line in f_:
|
49 |
+
split.append(list(line.strip('\n').split(',')))
|
50 |
+
|
51 |
+
# 划分
|
52 |
+
num = len(images) # 图像的总个数
|
53 |
+
for k in range(num):
|
54 |
+
file_name = images[k][0].split(' ')[1].split('/')[0]
|
55 |
+
aaa = int(split[k][0][-1])
|
56 |
+
if int(split[k][0][-1]) == 1: # 划分到训练集
|
57 |
+
#判断文件夹是否存在
|
58 |
+
if os.path.isdir(trian_save_path + file_name):
|
59 |
+
shutil.copy(config.path + 'images/' + images[k][0].split(' ')[1], trian_save_path+file_name+'/'+images[k][0].split(' ')[1].split('/')[1])
|
60 |
+
else:
|
61 |
+
os.makedirs(trian_save_path + file_name)
|
62 |
+
shutil.copy(config.path + 'images/' + images[k][0].split(' ')[1], trian_save_path + file_name + '/' + images[k][0].split(' ')[1].split('/')[1])
|
63 |
+
print('%s处理完毕!' % images[k][0].split(' ')[1].split('/')[1])
|
64 |
+
else:
|
65 |
+
#判断文件夹是否存在
|
66 |
+
if os.path.isdir(test_save_path + file_name):
|
67 |
+
aaaa = config.path + 'images/' + images[k][0].split(' ')[1]
|
68 |
+
bbbb = test_save_path+file_name+'/'+images[k][0].split(' ')[1]
|
69 |
+
shutil.copy(config.path + 'images/' + images[k][0].split(' ')[1], test_save_path+file_name+'/'+images[k][0].split(' ')[1].split('/')[1])
|
70 |
+
else:
|
71 |
+
os.makedirs(test_save_path + file_name)
|
72 |
+
shutil.copy(config.path + 'images/' + images[k][0].split(' ')[1], test_save_path + file_name + '/' + images[k][0].split(' ')[1].split('/')[1])
|
73 |
+
print('%s处理完毕!' % images[k][0].split(' ')[1].split('/')[1])
|
74 |
+
|
75 |
+
time_end = time.time()
|
76 |
+
print('CUB200训练集和测试集划分完毕, 耗时%s!!' % (time_end - time_start))
|
77 |
+
config文件
|
78 |
+
# *_*coding: utf-8 *_*
|
79 |
+
# author --liming--
|
80 |
+
|
81 |
+
path = '/media/lm/C3F680DFF08EB695/细粒度数据集/birds/CUB200/CUB_200_2011/'
|
82 |
+
|
83 |
+
ROOT_TRAIN = path + 'images/train/'
|
84 |
+
ROOT_TEST = path + 'images/test/'
|
85 |
+
BATCH_SIZE = 16
|
86 |
+
(2) 利用Pytorch方式读取数据
|
87 |
+
# *_*coding: utf-8 *_*
|
88 |
+
# author --liming--
|
89 |
+
|
90 |
+
"""
|
91 |
+
用于已下载数据集的转换,便于pytorch的读取
|
92 |
+
"""
|
93 |
+
|
94 |
+
import torch
|
95 |
+
import torchvision
|
96 |
+
import config
|
97 |
+
from torchvision import datasets, transforms
|
98 |
+
|
99 |
+
data_transform = transforms.Compose([
|
100 |
+
transforms.Resize(224),
|
101 |
+
transforms.ToTensor(),
|
102 |
+
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
103 |
+
])
|
104 |
+
|
105 |
+
def train_data_load():
|
106 |
+
# 训练集
|
107 |
+
root_train = config.ROOT_TRAIN
|
108 |
+
train_dataset = torchvision.datasets.ImageFolder(root_train,
|
109 |
+
transform=data_transform)
|
110 |
+
CLASS = train_dataset.class_to_idx
|
111 |
+
print('训练数据label与文件名的关系:', CLASS)
|
112 |
+
train_loader = torch.utils.data.DataLoader(train_dataset,
|
113 |
+
batch_size=config.BATCH_SIZE,
|
114 |
+
shuffle=True)
|
115 |
+
return CLASS, train_loader
|
116 |
+
|
117 |
+
def test_data_load():
|
118 |
+
# 测试集
|
119 |
+
root_test = config.ROOT_TEST
|
120 |
+
test_dataset = torchvision.datasets.ImageFolder(root_test,
|
121 |
+
transform=data_transform)
|
122 |
+
|
123 |
+
CLASS = test_dataset.class_to_idx
|
124 |
+
print('测试数据label与文件名的关系:',CLASS)
|
125 |
+
test_loader = torch.utils.data.DataLoader(test_dataset,
|
126 |
+
batch_size=config.BATCH_SIZE,
|
127 |
+
shuffle=True)
|
128 |
+
return CLASS, test_loader
|
129 |
+
|
130 |
+
if __name__ == '__main___':
|
131 |
+
train_data_load()
|
132 |
+
test_data_load()
|