概述
本blog多处代码copy自https://blog.csdn.net/TYUT_xiaoming/article/details/102480016,主要记录自己实践中遇到的问题和自己的解决方案,按下面的流程走相信你能快乐地完成该任务~
Step 1 Prepare
Yolo-v3代码fork from https://github.com/eriklindernoren/PyTorch-YOLOv3
Coco数据集需要自行下载
Step 2 提取图片和标注信息
首先运行下面的代码从原coco数据集中提取需要的类的图片,需要修改的地方有:
- savepath
- dataset_List
- classes_names
- dataDir
from pycocotools.coco import COCO
import os
import shutil
from tqdm import tqdm
import skimage.io as io
import matplotlib.pyplot as plt
import cv2
from PIL import Image, ImageDraw
#the path you want to save your results for coco to voc
savepath="/coco_class/"
img_dir=savepath+'images/val2014/'
anno_dir=savepath+'Annotations/val2014/'
# datasets_list=['train2014', 'val2014']
# datasets_list=['train2014']
datasets_list=['val2014']
classes_names = ["person","bicycle","car","motorbike", "bus", "truck"]
#Store annotations and train2014/val2014/... in this folder
dataDir= '/coco/'
headstr = """
<annotation>
<folder>VOC</folder>
<filename>%s</filename>
<source>
<database>My Database</database>
<annotation>COCO</annotation>

<flickrid>NULL</flickrid>
</source>
<size>
<width>%d</width>
<height>%d</height>
<depth>%d</depth>
</size>
<segmented>0</segmented>
"""
objstr = """
<object>
<name>%s</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>%d</xmin>
<ymin>%d</ymin>
<xmax>%d</xmax>
<ymax>%d</ymax>
</bndbox>
</object>
"""
tailstr = '''
</annotation>
'''
#if the dir is not exists,make it,else delete it
def mkr(path):
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
else:
os.mkdir(path)
mkr(img_dir)
mkr(anno_dir)
def id2name(coco):
classes=dict()
for cls in coco.dataset['categories']:
classes[cls['id']]=cls['name']
return classes
def write_xml(anno_path,head, objs, tail):
f = open(anno_path, "w")
f.write(head)
for obj in objs:
f.write(objstr%(obj[0],obj[1],obj[2],obj[3],obj[4]))
f.write(tail)
def save_annotations_and_imgs(coco,dataset,filename,objs):
#eg:COCO_train2014_000000196610.jpg-->COCO_train2014_000000196610.xml
anno_path=anno_dir+filename[:-3]+'xml'
img_path=dataDir+'images/'+dataset+'/'+filename
# print(img_path)
dst_imgpath=img_dir+filename
print(img_path,'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
img=cv2.imread(img_path)
# print(img)
if (img.shape[2] == 1):
print(filename + " not a RGB image")
return
shutil.copy(img_path, dst_imgpath)
head=headstr % (filename, img.shape[1], img.shape[0], img.shape[2])
tail = tailstr
write_xml(anno_path,head, objs, tail)
def showimg(coco,dataset,img,classes,cls_id,show=True):
global dataDir
I=Image.open('%s/%s/%s/%s'%(dataDir,'images',dataset,img['file_name']))
#Get the annotated information by ID
annIds = coco.getAnnIds(imgIds=img['id'], catIds=cls_id, iscrowd=None)
# print(annIds)
anns = coco.loadAnns(annIds)
# print(anns)
# coco.showAnns(anns)
objs = []
for ann in anns:
class_name=classes[ann['category_id']]
if class_name in classes_names:
print(class_name)
if 'bbox' in ann:
bbox=ann['bbox']
xmin = int(bbox[0])
ymin = int(bbox[1])
xmax = int(bbox[2] + bbox[0])
ymax = int(bbox[3] + bbox[1])
obj = [class_name, xmin, ymin, xmax, ymax]
objs.append(obj)
draw = ImageDraw.Draw(I)
draw.rectangle([xmin, ymin, xmax, ymax])
if show:
plt.figure()
plt.axis('off')
plt.imshow(I)
plt.show()
return objs
for dataset in datasets_list:
#./COCO/annotations/instances_train2014.json
annFile='{}/annotations/instances_{}.json'.format(dataDir,dataset)
#COCO API for initializing annotated data
coco = COCO(annFile)
'''
When the COCO object is created, the following information will be output:
loading annotations into memory...
Done (t=0.81s)
creating index...
index created!
So far, the JSON script has been parsed and the images are associated with the corresponding annotated data.
'''
#show all classes in coco
classes = id2name(coco)
print(classes)
#[1, 2, 3, 4, 6, 8]
classes_ids = coco.getCatIds(catNms=classes_names)
print(classes_ids)
# exit()
for cls in classes_names:
#Get ID number of this class
cls_id=coco.getCatIds(catNms=[cls])
img_ids=coco.getImgIds(catIds=cls_id)
print(cls,len(img_ids))
# imgIds=img_ids[0:10]
for imgId in tqdm(img_ids):
img = coco.loadImgs(imgId)[0]
filename = img['file_name']
# print(filename)
objs=showimg(coco, dataset, img, classes,classes_ids,show=False)
print(objs)
save_annotations_and_imgs(coco, dataset, filename, objs)
这一步会生成生成提取后的images文件夹和Anootations(.xml)文件夹
Step 3 过滤错误提取信息
用上面的代码会造成提取多个类xml文件都没有object这个属性,这也是为什么有这篇blog的原因。。
我采用了很暴力的方法,就是把那些不包含我们要的类的annotation和image删除即可,运行下面代码:
import os
Dir = './coco_class/Annotations/val2014'
ImageDir = './coco_class/images/val2014'
cnt = 0
for i, file_name in enumerate(os.listdir(Dir)):
fsize = os.path.getsize(os.path.join(Dir,file_name))
if fsize == 410:
print('removing {} of size{}'.format(file_name,fsize))
os.remove(os.path.join(ImageDir, file_name[:-3]+'jpg'))
os.remove(os.path.join(Dir, file_name))
cnt += 1
print('remove {} files'.format(cnt))
OK,现在我们正式完成了图片的过滤。
Step 4 .xml转.txt生成label信息
修改下面代码的:
- classes
- data_path
- list_file
- in_file
- out_file
import xml.etree.ElementTree as ET
import pickle
import os
from os import listdir, getcwd
from os.path import join
classes = ['person','bicycle','car','motorbike', 'bus', 'truck']
#classes = ['truck']
def convert(size, box):
dw = 1./(size[0])
dh = 1./(size[1])
x = (box[0] + box[1])/2.0 - 1
y = (box[2] + box[3])/2.0 - 1
w = box[1] - box[0]
h = box[3] - box[2]
x = x*dw
w = w*dw
y = y*dh
h = h*dh
return (x,y,w,h)
def convert_annotation(image_id):
in_file = open('/coco_class/Annotations/train2014/%s.xml'%(image_id))
out_file = open('/coco_class/labels/train2014/%s.txt'%(image_id), 'w')
tree=ET.parse(in_file)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
for obj in root.iter('object'):
difficult = obj.find('difficult').text
cls = obj.find('name').text
print(cls)
if cls not in classes or int(difficult)==1:
continue
cls_id = classes.index(cls)
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text))
bb = convert((w,h), b)
out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + 'n')
data_path = '/coco_class/images/train2014'
img_names = os.listdir(data_path)
list_file = open('/coco_class/class_train.txt', 'w')
for img_name in img_names:
if not os.path.exists('coco_class/labels/train2014'):
os.makedirs('/coco_class/labels/train2014')
list_file.write('/coco_class/images/train2014/%sn'%img_name)
image_id = img_name[:-4]
convert_annotation(image_id)
list_file.close()
到这里,我们就完成了对coco数据集的分割,接下来就是yolo环节了~
Step 5 修改YOLO-V3代码
这部分多处copy自https://cloud.tencent.com/developer/ask/210396
- 修改(或复制备份)
data/coco.names
文件,删除你要检测的类之外的所有其他类 - 修改cfg文件(例如
config/yolov3.cfg
),将610,696,783 行的3个类从80更改为你要检测的类数 - 将第603,689,776行的cfg文件中的3个过滤器从255更改为(classes + 5)x3 = 33(我是训练6个类所以(6+5) x 3
- 修改
/config/coco.data
,train和valid为刚刚生成的coco_class文件夹中的class_train.txt和class_valid.txt,class_num也要修改 - 运行train.py或者detect.py即可
Step 6 训练过程中遇到的小bug
在训练过程中,我遇到了CUDA error: device-side assert triggered的bug,在yolov3的官方issue中找到了解决方案:https://github.com/eriklindernoren/PyTorch-YOLOv3/issues/157
修改utils/utils.py即可
b, target_labels = target[:, :2].long().t()
gx, gy = gxy.t()
gw, gh = gwh.t()
gi, gj = gxy.long().t()
########## TODO(arthur77wang):
gi[gi < 0] = 0
gj[gj < 0] = 0
gi[gi > nG - 1] = nG - 1
gj[gj > nG - 1] = nG - 1
###################
# Set masks
obj_mask[b, best_n, gj, gi] = 1
noobj_mask[b, best_n, gj, gi] = 0
不是搞这个方向的,只是做项目遇到一些问题,所以贴上来给大家分享,希望大家少走弯路,有问题欢迎联系yxzhangxmu@163.com多多交流~
最后
以上就是怕黑微笑为你收集整理的COCO数据集提取特定多个类并在YOLO-V3上训练的全部内容,希望文章能够帮你解决COCO数据集提取特定多个类并在YOLO-V3上训练所遇到的程序开发问题。
如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。
发表评论 取消回复