概述
Cityscapes:
适用于汽车自动驾驶的训练数据集,包括19种都市街道场景:road、side-walk、building、wal、fence、pole、traficlight、trafic signvegetation、terain、sky、person、rider、car、truck、bus、train、motorcycle 和bicycle。该数据库中用于训练和校验的精细标注的图片数量为3475,同时也包含了 2 万张粗糙的标记图片
链接:https://pan.baidu.com/s/1dxsVOOZ1RC7c-obM23fHIg
提取码:kmrl
官网下载:https://www.cityscapes-dataset.com/
将数据下载好后,再将其中的gtFine和leftImg8bit文件夹放入到源代码中的data目录下,具体data中的目录结构如下所示:
E:.
├── cityscapes
│
├── gtFine
│
│
├── test
│
│
├── train
│
│
└── val
│
└── leftImg8bit
│
├── test
│
├── train
│
└── val
├── list
│
├── cityscapes
│
│
├── test.lst
│
│
├── trainval.lst
│
│
└── val.lst
│
├── lip
│
│
├── testvalList.txt
│
│
├── trainList.txt
│
│
└── valList.txt
Dataset.py
import os
import cv2
import numpy as np
from PIL import Image
import random
import torch
from torch.nn import functional as F
from torch.utils import data
import torchvision
class BaseDataset(data.Dataset):
def __init__(self,
ignore_label=-1,
crop_size=(512, 1024),
scale_factor=16,
downsample_rate=1,
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]):
self.crop_size = crop_size
self.ignore_label = ignore_label
self.mean = mean
self.std = std
# 裁剪前的缩放,random.randint(0.5, 0.5+scale_factor/10)
self.scale_factor = scale_factor
# 沿x轴,y轴的缩放系数,即是缩放到原来的多少
self.downsample_rate = downsample_rate
self.files = []
def __len__(self):
return len(self.files)
#
归一化/标准化后可以提升模型的收敛速度
def input_transform(self, image):
# 数据格式转换,并且实现从BGR到RGB的转换
image = image.astype(np.float32)[:, :, ::-1]
# 归一化:把数变为(0,1)/ (-1, 1)之间的小数
image = image / 255.0
# 标准化:将数据按比例缩放,使之落入一个小的特定区间
image -= self.mean
image /= self.std
return image
def label_transform(self, label):
return np.array(label).astype('int32')
def pad_image(self, image, h, w, size, padvalue):
pad_image = image.copy()
pad_h = max(size[0] - h, 0)
pad_w = max(size[1] - w, 0)
if pad_h > 0 or pad_w > 0:
# 选择用固定值padvalue填充,[top, bottom, left, right]
pad_image = cv2.copyMakeBorder(image, 0, pad_h, 0,pad_w,
cv2.BORDER_CONSTANT,
value=padvalue)
return pad_image
def rand_crop(self, image, label):
h, w = image.shape[:2]
# 固定值填充,由于在大小上,输入的图像大于输出图像,故用不到
image = self.pad_image(image, h, w, self.crop_size,
(0.0, 0.0, 0.0))
label = self.pad_image(label, h, w, self.crop_size,
(self.ignore_label,))
new_h, new_w = label.shape
x = random.randint(0, new_w - self.crop_size[1])
y = random.randint(0, new_h - self.crop_size[0])
image = image[y:y + self.crop_size[0], x:x + self.crop_size[1]]
label = label[y:y + self.crop_size[0], x:x + self.crop_size[1]]
return image, label
def multi_scale_aug(self, image, label=None,
rand_scale=1, rand_crop=True):
# cv2.imshow('original', image)
# 等比例放缩后,输出图片尺寸size=(h,w)计算
h, w = image.shape[:2]
if h > w:
long_size = np.int(h * rand_scale + 0.5)
new_h = long_size
new_w = np.int(w * long_size / h + 0.5)
else:
long_size = np.int(w * rand_scale + 0.5)
new_w = long_size
new_h = np.int(h * long_size / w + 0.5)
# 双线性插值(默认设置)
image = cv2.resize(image,dsize=(new_w, new_h),
interpolation=cv2.INTER_LINEAR)
# cv2.imshow('resize', image)
# cv2.waitKey(0)
if label is not None:
# 最近邻插值
label = cv2.resize(label, (new_w, new_h),
interpolation=cv2.INTER_NEAREST)
else:
return image
if rand_crop:
image, label = self.rand_crop(image, label)
return image, label
def gen_sample(self, image, label,
multi_scale=True, is_flip=True):
# 先放缩,后裁剪
if multi_scale:
rand_scale = 0.5 + random.randint(0, self.scale_factor) / 10.0
image, label = self.multi_scale_aug(image, label,
rand_scale=rand_scale)
# 数据格式转换/归一化与标准化
image = self.input_transform(image)
label = self.label_transform(label)
# cv2读出的图片存储使用的是:H×W×C,需要转换成C×H×W
image = image.transpose((2, 0, 1))
# 图像传统的增强方法之对图像进行翻转
if is_flip:
# -1: 将图像向右翻转180°; 1: 原图
flip = np.random.choice(2) * 2 - 1
# -1/1
image = image[:, :, ::flip]
label = label[:, ::flip]
# 以比例的形式去放缩图像
if self.downsample_rate != 1:
label = cv2.resize(label,
None,
fx=self.downsample_rate,
fy=self.downsample_rate,
interpolation=cv2.INTER_NEAREST)
return image, label
def inference(self, model, image, flip=False):
size = image.size()
pred = model(image)
pred = F.upsample(input=pred,
size=(size[-2], size[-1]),
mode='bilinear')
if flip:
flip_img = image.numpy()[:, :, :, ::-1]
flip_output = model(torch.from_numpy(flip_img.copy()))
flip_output = F.upsample(input=flip_output,
size=(size[-2], size[-1]),
mode='bilinear')
flip_pred = flip_output.cpu().numpy().copy()
flip_pred = torch.from_numpy(flip_pred[:, :, :, ::-1].copy()).cuda()
pred += flip_pred
pred = pred * 0.5
return pred.exp()
def multi_scale_inference(self, model, image, scales=[1], flip=False):
batch, _, ori_height, ori_width = image.size()
assert batch == 1, "only supporting batchsize 1."
image = image.numpy()[0].transpose((1, 2, 0)).copy()
stride_h = np.int(self.crop_size[0] * 2.0 / 3.0)
stride_w = np.int(self.crop_size[1] * 2.0 / 3.0)
final_pred = torch.zeros([1, self.num_classes,
ori_height, ori_width]).cuda()
padvalue = -1.0 * np.array(self.mean) / np.array(self.std)
for scale in scales:
new_img = self.multi_scale_aug(image=image,
rand_scale=scale,
rand_crop=False)
height, width = new_img.shape[:-1]
if max(height, width) <= np.min(self.crop_size):
new_img = self.pad_image(new_img, height, width,
self.crop_size, padvalue)
new_img = new_img.transpose((2, 0, 1))
new_img = np.expand_dims(new_img, axis=0)
new_img = torch.from_numpy(new_img)
preds = self.inference(model, new_img, flip)
preds = preds[:, :, 0:height, 0:width]
else:
if height < self.crop_size[0] or width < self.crop_size[1]:
new_img = self.pad_image(new_img, height, width,
self.crop_size, padvalue)
new_h, new_w = new_img.shape[:-1]
rows = np.int(np.ceil(1.0 * (new_h -
self.crop_size[0]) / stride_h)) + 1
cols = np.int(np.ceil(1.0 * (new_w -
self.crop_size[1]) / stride_w)) + 1
preds = torch.zeros([1, self.num_classes,
new_h, new_w]).cuda()
count = torch.zeros([1, 1, new_h, new_w]).cuda()
for r in range(rows):
for c in range(cols):
h0 = r * stride_h
w0 = c * stride_w
h1 = min(h0 + self.crop_size[0], new_h)
w1 = min(w0 + self.crop_size[1], new_w)
crop_img = new_img[h0:h1, w0:w1, :]
if h1 == new_h or w1 == new_w:
crop_img = self.pad_image(crop_img,
h1 - h0,
w1 - w0,
self.crop_size,
padvalue)
crop_img = crop_img.transpose((2, 0, 1))
crop_img = np.expand_dims(crop_img, axis=0)
crop_img = torch.from_numpy(crop_img)
pred = self.inference(model, crop_img, flip)
preds[:, :, h0:h1, w0:w1] += pred[:, :, 0:h1 - h0, 0:w1 - w0]
count[:, :, h0:h1, w0:w1] += 1
preds = preds / count
preds = preds[:, :, :height, :width]
preds = F.upsample(preds, (ori_height, ori_width),
mode='bilinear')
final_pred += preds
return final_pred
class Cityscapes(BaseDataset):
def __init__(self,
root,
list_path,
num_samples= None,
# 提取的样本数(.lst中参与训练样本数目)
num_classes= 19,
multi_scale= True,
flip= True,
ignore_label= -1,
crop_size= (512, 1024),
downsample_rate= 1,
scale_factor= 16,
mean= [0.485, 0.456, 0.406],
std= [0.229, 0.224, 0.225]):
super(Cityscapes, self).__init__(ignore_label,
crop_size, scale_factor, downsample_rate,mean, std, )
self.root = root
self.list_path = list_path
self.num_classes = num_classes
self.multi_scale = multi_scale
self.flip = flip
# .txt/.lst数据获取:打开文件,以空格分割每一行(注意:不要有空行)
self.img_list = [line.strip().split() for line in open(root + list_path)]
# 添加信息:sample{image_path,label_path, lable_name}
self.files = self.read_files()
# files=[{sample1},{sample2},{sample3},...] , num_samples:提取的样本数
if num_samples:
self.files = self.files[:num_samples]
#
self.label_mapping = {-1: ignore_label, 0: ignore_label,
1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label,
5: ignore_label, 6: ignore_label,
7: 0, 8: 1, 9: ignore_label,
10: ignore_label, 11: 2, 12: 3,
13: 4, 14: ignore_label, 15: ignore_label,
16: ignore_label, 17: 5, 18: ignore_label,
19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11,
25: 12, 26: 13, 27: 14, 28: 15,
29: ignore_label, 30: ignore_label,
31: 16, 32: 17, 33: 18}
#
self.class_weights = torch.FloatTensor([0.8373, 0.918, 0.866, 1.0345,
1.0166, 0.9969, 0.9754, 1.0489,
0.8786, 1.0023, 0.9539, 0.9843,
1.1116, 0.9037, 1.0865, 1.0955,
1.0865, 1.1529, 1.0507]).cuda()
def __getitem__(self, index):
item = self.files[index]
name = item["name"]
# 以BGR格式读取图像
image = cv2.imread(os.path.join(self.root, 'cityscapes', item["img"]),
cv2.IMREAD_COLOR)
# cv2.imshow('image', image)
# # 避免显示图像一闪而过的情况,或是出现图像无响应的情况
# cv2.waitKey(0)
size = image.shape
if 'test' in self.list_path:
image = self.input_transform(image)
image = image.transpose((2, 0, 1))
return image.copy()
# 以灰度图像格式读取图像
label = cv2.imread(os.path.join(self.root, 'cityscapes', item["label"]),
cv2.IMREAD_GRAYSCALE)
# cv2.imshow('label', label)
# # 避免显示图像一闪而过的情况,或是出现图像无响应的情况
# cv2.waitKey(0)
# 标签类别处理
label = self.convert_label(label)
# cv2.imwrite(r'C:UsersAliceDesktop1.png', label)
image, label = self.gen_sample(image, label,
self.multi_scale, self.flip)
return image.copy(), label.copy(), np.array(size), name
def read_files(self):
files = []
if 'test' in self.list_path:
for item in self.img_list:
image_path = item
name = os.path.splitext(os.path.basename(image_path[0]))[0]
files.append({
"img": image_path[0],
"name": name,
})
else:
for item in self.img_list:
image_path, label_path = item
name = os.path.splitext(os.path.basename(label_path))[0]
files.append({
"img": image_path,
"label": label_path,
"name": name,
})
return files
def convert_label(self, label, inverse=False):
temp = label.copy()
#
if inverse:
for v, k in self.label_mapping.items():
label[temp == k] = v
else:
for k, v in self.label_mapping.items():
# (k ,v) = (-1 , ignore_label=255)
label[temp == k] = v
return label
def multi_scale_inference(self, model, image, scales=[1], flip=False):
batch, _, ori_height, ori_width = image.size()
assert batch == 1, "only supporting batchsize 1."
image = image.numpy()[0].transpose((1, 2, 0)).copy()
stride_h = np.int(self.crop_size[0] * 1.0)
stride_w = np.int(self.crop_size[1] * 1.0)
final_pred = torch.zeros([1, self.num_classes,
ori_height, ori_width]).cuda()
for scale in scales:
new_img = self.multi_scale_aug(image=image,
rand_scale=scale,
rand_crop=False)
height, width = new_img.shape[:-1]
if scale <= 1.0:
new_img = new_img.transpose((2, 0, 1))
new_img = np.expand_dims(new_img, axis=0)
new_img = torch.from_numpy(new_img)
preds = self.inference(model, new_img, flip)
preds = preds[:, :, 0:height, 0:width]
else:
new_h, new_w = new_img.shape[:-1]
rows = np.int(np.ceil(1.0 * (new_h -
self.crop_size[0]) / stride_h)) + 1
cols = np.int(np.ceil(1.0 * (new_w -
self.crop_size[1]) / stride_w)) + 1
preds = torch.zeros([1, self.num_classes,
new_h, new_w]).cuda()
count = torch.zeros([1, 1, new_h, new_w]).cuda()
for r in range(rows):
for c in range(cols):
h0 = r * stride_h
w0 = c * stride_w
h1 = min(h0 + self.crop_size[0], new_h)
w1 = min(w0 + self.crop_size[1], new_w)
h0 = max(int(h1 - self.crop_size[0]), 0)
w0 = max(int(w1 - self.crop_size[1]), 0)
crop_img = new_img[h0:h1, w0:w1, :]
crop_img = crop_img.transpose((2, 0, 1))
crop_img = np.expand_dims(crop_img, axis=0)
crop_img = torch.from_numpy(crop_img)
pred = self.inference(model, crop_img, flip)
preds[:, :, h0:h1, w0:w1] += pred[:, :, 0:h1 - h0, 0:w1 - w0]
count[:, :, h0:h1, w0:w1] += 1
preds = preds / count
preds = preds[:, :, :height, :width]
preds = F.upsample(preds, (ori_height, ori_width),
mode='bilinear')
final_pred += preds
return final_pred
def get_palette(self, n):
palette = [0] * (n * 3)
for j in range(0, n):
lab = j
palette[j * 3 + 0] = 0
palette[j * 3 + 1] = 0
palette[j * 3 + 2] = 0
i = 0
while lab:
palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i))
palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i))
palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i))
i += 1
lab >>= 3
return palette
def save_pred(self, preds, sv_path, name):
palette = self.get_palette(256)
preds = np.asarray(np.argmax(preds, axis=1), dtype=np.uint8)
for i in range(preds.shape[0]):
pred = self.convert_label(preds[i], inverse=True)
save_img = Image.fromarray(pred)
save_img.putpalette(palette)
save_img.save(os.path.join(sv_path, name[i] + '.png'))
关于函数的分析:
https://blog.csdn.net/weixin_43723625/article/details/106070079
Loader Dataset:
config.train_dataset = dataset.Cityscapes(
root= os.path.join(config.project_path, config.DATASET["root"]),
list_path=config.DATASET["train_set"],
num_samples = None,
# 要从全部训练样本中提取多少样本数训练
num_classes = 19,
# 分割的类别数
multi_scale= True,
# 先放缩,后裁剪
flip= True,
# 图像的翻转
ignore_label= 255,
# 合并/忽略的标签像素值用255(黑)代替
crop_size= [512,1024],
# 输出图像的大小
scale_factor= 16,
# resize中,沿x轴,y轴的缩放系数
downsample_rate=1)
# 裁剪前的缩放,random.randint(0.5, 0.5+scale_factor/10)
config.trainloader = torch.utils.data.DataLoader(
config.train_dataset,
batch_size = 1,
# shuffle=True,
shuffle = False,
num_workers = 0)
最后
以上就是无奈黑夜为你收集整理的Cityscapes:适用于汽车自动驾驶的训练数据集的全部内容,希望文章能够帮你解决Cityscapes:适用于汽车自动驾驶的训练数据集所遇到的程序开发问题。
如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。
本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
发表评论 取消回复