前言
在2017年6月30日,yolo在github上的源码进行了一次更新,有许多文件的位置都改变了。导致我在使用yolo v2训练自己的数据的时候遇到了许多麻烦(网上给的许多解决方案都不太完美)。在这次经历中,我充分认识到了了解源码对于成功训练自己数据的重要作用,所以我决定要把yolo v2的代码为大家简单梳理一下,便于大家对于yolo官网上训练方法的理解。(因为是代码梳理,所以对源码就不一一解释了)
分析过程
首先我们从yolo的训练命令开始分析(yolo的源码是用c++写的):
复制代码
从这里我们可以看出yolo主函数main中的参数argv[]在其中对应的值分别是 argv[0] -> darknet argv[1] -> detector argv[2] -> train .....(剩下的自己看),从这里我们可以看出,yolo主函数main一定在examples/darknet.c中,让我们来看一下主函数:
1./darknet detector train cfg/voc.data cfg/yolo-voc.cfg darknet19_448.conv.23
复制代码
很简单可以看出,主函数就是对于参数argv[1]的一个判断,根据argv[1]的内容来启动不同的程序。让我们继续跟着训练命令走argv[1] = detector时,调用的函数是run_detector,而这个函数在examples/detector.c的最后,让我们再来看看这个函数吧:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111int main(int argc, char **argv) { //test_resize("data/bad.jpg"); //test_box(); //test_convolutional_layer(); if(argc < 2){ fprintf(stderr, "usage: %s <function>n", argv[0]); return 0; } gpu_index = find_int_arg(argc, argv, "-i", 0); if(find_arg(argc, argv, "-nogpu")) { gpu_index = -1; } #ifndef GPU gpu_index = -1; #else if(gpu_index >= 0){ cuda_set_device(gpu_index); } #endif if (0 == strcmp(argv[1], "average")){ average(argc, argv); } else if (0 == strcmp(argv[1], "yolo")){ run_yolo(argc, argv); } else if (0 == strcmp(argv[1], "voxel")){ run_voxel(argc, argv); } else if (0 == strcmp(argv[1], "super")){ run_super(argc, argv); } else if (0 == strcmp(argv[1], "lsd")){ run_lsd(argc, argv); } else if (0 == strcmp(argv[1], "detector")){ run_detector(argc, argv); } else if (0 == strcmp(argv[1], "detect")){ float thresh = find_float_arg(argc, argv, "-thresh", .24); char *filename = (argc > 4) ? argv[4]: 0; char *outfile = find_char_arg(argc, argv, "-out", 0); int fullscreen = find_arg(argc, argv, "-fullscreen"); test_detector("cfg/coco.data", argv[2], argv[3], filename, thresh, .5, outfile, fullscreen); } else if (0 == strcmp(argv[1], "cifar")){ run_cifar(argc, argv); } else if (0 == strcmp(argv[1], "go")){ run_go(argc, argv); } else if (0 == strcmp(argv[1], "rnn")){ run_char_rnn(argc, argv); } else if (0 == strcmp(argv[1], "vid")){ run_vid_rnn(argc, argv); } else if (0 == strcmp(argv[1], "coco")){ run_coco(argc, argv); } else if (0 == strcmp(argv[1], "classify")){ predict_classifier("cfg/imagenet1k.data", argv[2], argv[3], argv[4], 5); } else if (0 == strcmp(argv[1], "classifier")){ run_classifier(argc, argv); } else if (0 == strcmp(argv[1], "regressor")){ run_regressor(argc, argv); } else if (0 == strcmp(argv[1], "segmenter")){ run_segmenter(argc, argv); } else if (0 == strcmp(argv[1], "art")){ run_art(argc, argv); } else if (0 == strcmp(argv[1], "tag")){ run_tag(argc, argv); } else if (0 == strcmp(argv[1], "compare")){ run_compare(argc, argv); } else if (0 == strcmp(argv[1], "dice")){ run_dice(argc, argv); } else if (0 == strcmp(argv[1], "writing")){ run_writing(argc, argv); } else if (0 == strcmp(argv[1], "3d")){ composite_3d(argv[2], argv[3], argv[4], (argc > 5) ? atof(argv[5]) : 0); } else if (0 == strcmp(argv[1], "test")){ test_resize(argv[2]); } else if (0 == strcmp(argv[1], "captcha")){ run_captcha(argc, argv); } else if (0 == strcmp(argv[1], "nightmare")){ run_nightmare(argc, argv); } else if (0 == strcmp(argv[1], "rgbgr")){ rgbgr_net(argv[2], argv[3], argv[4]); } else if (0 == strcmp(argv[1], "reset")){ reset_normalize_net(argv[2], argv[3], argv[4]); } else if (0 == strcmp(argv[1], "denormalize")){ denormalize_net(argv[2], argv[3], argv[4]); } else if (0 == strcmp(argv[1], "statistics")){ statistics_net(argv[2], argv[3]); } else if (0 == strcmp(argv[1], "normalize")){ normalize_net(argv[2], argv[3], argv[4]); } else if (0 == strcmp(argv[1], "rescale")){ rescale_net(argv[2], argv[3], argv[4]); } else if (0 == strcmp(argv[1], "ops")){ operations(argv[2]); } else if (0 == strcmp(argv[1], "speed")){ speed(argv[2], (argc > 3 && argv[3]) ? atoi(argv[3]) : 0); } else if (0 == strcmp(argv[1], "oneoff")){ oneoff(argv[2], argv[3], argv[4]); } else if (0 == strcmp(argv[1], "oneoff2")){ oneoff2(argv[2], argv[3], argv[4], atoi(argv[5])); } else if (0 == strcmp(argv[1], "partial")){ partial(argv[2], argv[3], argv[4], atoi(argv[5])); } else if (0 == strcmp(argv[1], "average")){ average(argc, argv); } else if (0 == strcmp(argv[1], "visualize")){ visualize(argv[2], (argc > 3) ? argv[3] : 0); } else if (0 == strcmp(argv[1], "mkimg")){ mkimg(argv[2], argv[3], atoi(argv[4]), atoi(argv[5]), atoi(argv[6]), argv[7]); } else if (0 == strcmp(argv[1], "imtest")){ test_resize(argv[2]); } else { fprintf(stderr, "Not an option: %sn", argv[1]); } return 0; }
复制代码
在这里 run_detector的主要作用还是在根据argv[]的值执行不同的函数,其他关于gpu啊,threshold啊之类的我们都可以不用管,这里最重要的是argv[2]的值,根据其值的不同,执行不同函数,这里的test_detector,train_detector这些函数在detector.c中都有定义,并且从名字上我们就可以看出这些函数是干什么的。这里我们依旧跟随之前的训练命令,argv[2] = train,这里让我们来看一下train_detector函数(注:这里是我修改过一部分的,不是原来的代码):
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59void run_detector(int argc, char **argv) { char *prefix = find_char_arg(argc, argv, "-prefix", 0); float thresh = find_float_arg(argc, argv, "-thresh", .24); float hier_thresh = find_float_arg(argc, argv, "-hier", .5); int cam_index = find_int_arg(argc, argv, "-c", 0); int frame_skip = find_int_arg(argc, argv, "-s", 0); int avg = find_int_arg(argc, argv, "-avg", 3); if(argc < 4){ fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]n", argv[0], argv[1]); return; } char *gpu_list = find_char_arg(argc, argv, "-gpus", 0); char *outfile = find_char_arg(argc, argv, "-out", 0); int *gpus = 0; int gpu = 0; int ngpus = 0; if(gpu_list){ printf("%sn", gpu_list); int len = strlen(gpu_list); ngpus = 1; int i; for(i = 0; i < len; ++i){ if (gpu_list[i] == ',') ++ngpus; } gpus = calloc(ngpus, sizeof(int)); for(i = 0; i < ngpus; ++i){ gpus[i] = atoi(gpu_list); gpu_list = strchr(gpu_list, ',')+1; } } else { gpu = gpu_index; gpus = &gpu; ngpus = 1; } int clear = find_arg(argc, argv, "-clear"); int fullscreen = find_arg(argc, argv, "-fullscreen"); int width = find_int_arg(argc, argv, "-w", 0); int height = find_int_arg(argc, argv, "-h", 0); int fps = find_int_arg(argc, argv, "-fps", 0); char *datacfg = argv[3]; char *cfg = argv[4]; char *weights = (argc > 5) ? argv[5] : 0; char *filename = (argc > 6) ? argv[6]: 0; if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen); else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear); else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile); else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile); else if(0==strcmp(argv[2], "recall")) validate_detector_recall(cfg, weights); else if(0==strcmp(argv[2], "demo")) { list *options = read_data_cfg(datacfg); int classes = option_find_int(options, "classes", 2); char *name_list = option_find_str(options, "names", "data/names.list"); char **names = get_labels(name_list); demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen); } }
复制代码
这里我们主要重视的函数是第7行的read_data_cfg,第8行的train_images,第9行的backup_directory和第25行的load_network函数:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear) { list *options = read_data_cfg(datacfg); char *train_images = option_find_str(options, "train", "scripts/train.txt"); //训练集路径 char *backup_directory = option_find_str(options, "backup", "/backup/"); //备份训练结果路径 srand(time(0)); char *base = basecfg(cfgfile); printf("%sn", base); float avg_loss = -1; network *nets = calloc(ngpus, sizeof(network)); srand(time(0)); int seed = rand(); int i; for(i = 0; i < ngpus; ++i){ srand(seed); #ifdef GPU cuda_set_device(gpus[i]); #endif nets[i] = load_network(cfgfile, weightfile, clear); //载入网络 nets[i].learning_rate *= ngpus; } srand(time(0)); network net = nets[0]; int imgs = net.batch * net.subdivisions * ngpus; printf("Learning Rate: %g, Momentum: %g, Decay: %gn", net.learning_rate, net.momentum, net.decay); data train, buffer; layer l = net.layers[net.n - 1]; int classes = l.classes; float jitter = l.jitter; list *plist = get_paths(train_images); //int N = plist->size; char **paths = (char **)list_to_array(plist); load_args args = {0}; args.w = net.w; args.h = net.h; args.paths = paths; args.n = imgs; args.m = plist->size; args.classes = classes; args.jitter = jitter; args.num_boxes = l.max_boxes; args.d = &buffer; args.type = DETECTION_DATA; args.threads = 8; args.angle = net.angle; args.exposure = net.exposure; args.saturation = net.saturation; args.hue = net.hue; pthread_t load_thread = load_data(args); clock_t time; int count = 0; //while(i*imgs < N*120){ while(get_current_batch(net) < net.max_batches){ if(l.random && count++%10 == 0){ printf("Resizingn"); int dim = (rand() % 10 + 10) * 32; if (get_current_batch(net)+200 > net.max_batches) dim = 608; //int dim = (rand() % 4 + 16) * 32; printf("%dn", dim); args.w = dim; args.h = dim; pthread_join(load_thread, 0); train = buffer; free_data(train); load_thread = load_data(args); for(i = 0; i < ngpus; ++i){ resize_network(nets + i, dim, dim); } net = nets[0]; } time=clock(); pthread_join(load_thread, 0); train = buffer; load_thread = load_data(args); /* int k; for(k = 0; k < l.max_boxes; ++k){ box b = float_to_box(train.y.vals[10] + 1 + k*5); if(!b.x) break; printf("loaded: %f %f %f %fn", b.x, b.y, b.w, b.h); } */ /* int zz; for(zz = 0; zz < train.X.cols; ++zz){ image im = float_to_image(net.w, net.h, 3, train.X.vals[zz]); int k; for(k = 0; k < l.max_boxes; ++k){ box b = float_to_box(train.y.vals[zz] + k*5); printf("%f %f %f %fn", b.x, b.y, b.w, b.h); draw_bbox(im, b, 1, 1,0,0); } show_image(im, "truth11"); cvWaitKey(0); save_image(im, "truth11"); } */ printf("Loaded: %lf secondsn", sec(clock()-time)); time=clock(); float loss = 0; #ifdef GPU if(ngpus == 1){ loss = train_network(net, train); } else { loss = train_networks(nets, ngpus, train, 4); } #else loss = train_network(net, train); #endif if (avg_loss < 0) avg_loss = loss; avg_loss = avg_loss*.9 + loss*.1; i = get_current_batch(net); printf("%ld: %f, %f avg, %f rate, %lf seconds, %d imagesn", get_current_batch(net), loss, avg_loss, get_current_rate(net), sec(clock()-time), i*imgs); if(i%1000==0){ #ifdef GPU if(ngpus != 1) sync_nets(nets, ngpus, 0); #endif char buff[256]; sprintf(buff, "%s/%s.backup", backup_directory, base); save_weights(net, buff); } if(i%10000==0 || (i < 1000 && i%100 == 0)){ #ifdef GPU if(ngpus != 1) sync_nets(nets, ngpus, 0); #endif char buff[256]; sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i); save_weights(net, buff); } free_data(train); } #ifdef GPU if(ngpus != 1) sync_nets(nets, ngpus, 0); #endif char buff[256]; sprintf(buff, "%s/%s_final.weights", backup_directory, base); save_weights(net, buff); }
read_data_cfg中的参数datacfg在run_detector中可以看出就是arg[3],在本例中对应的就是voc.data
train_images是用来指定所要训练的图片集的路径的。
backup_directory是用来指定训练出来的权值的路劲的。
而load_network是用来载入所要训练的网络结构和参数的,这里run_detector中可以看出load_network的参数之一cfgfile就是argv[4],在我们这个例子中也便就是yolo-voc.cfg
这里我们先看一下cfg/voc.data(注:这里是我修改过了的,不是原来的)
复制代码
这里可以看出voc.data是用来指定类别数classes,训练集路径train,测试集路径valid和类别名称names和备份文件路径backup的(so easy)。
1
2
3
4
5classes= 2 train = /home/iair339-04/darknet/scripts/train.txt valid = /home/iair339-04/darknet/scripts/2007_test.txt names = data/kitti.names backup = backup
接下来我们来看一下yolo-voc.cfg文件(注:修改过)
复制代码
这里[net]里面是网络的超参数的设置,而之后的便是yolo v2的网络结构了。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258net] # Testing #batch=1 #subdivisions=1 # Training batch=64 subdivisions=8 height=416 width=416 channels=3 momentum=0.9 decay=0.0005 angle=0 saturation = 1.5 exposure = 1.5 hue=.1 learning_rate=0.001 burn_in=1000 max_batches = 80200 policy=steps steps=40000,60000 scales=.1,.1 [convolutional] batch_normalize=1 filters=32 size=3 stride=1 pad=1 activation=leaky [maxpool] size=2 stride=2 [convolutional] batch_normalize=1 filters=64 size=3 stride=1 pad=1 activation=leaky [maxpool] size=2 stride=2 [convolutional] batch_normalize=1 filters=128 size=3 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=64 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=128 size=3 stride=1 pad=1 activation=leaky [maxpool] size=2 stride=2 [convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=leaky [maxpool] size=2 stride=2 [convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=leaky [maxpool] size=2 stride=2 [convolutional] batch_normalize=1 filters=1024 size=3 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=1024 size=3 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=leaky [convolutional] batch_normalize=1 filters=1024 size=3 stride=1 pad=1 activation=leaky ####### [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=1024 activation=leaky [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=1024 activation=leaky [route] layers=-9 [convolutional] batch_normalize=1 size=1 stride=1 pad=1 filters=64 activation=leaky [reorg] stride=2 [route] layers=-1,-4 [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=1024 activation=leaky [convolutional] size=1 stride=1 pad=1 filters=35 #此处修改 activation=linear [region] anchors = 1.3221, 1.73145, 3.19275, 4.00944, 5.05587, 8.09892, 9.47112, 4.84053, 11.2364, 10.0071 bias_match=1 classes=2 #此处修改种类 coords=4 num=5 softmax=1 jitter=.3 rescore=1 object_scale=5 noobject_scale=1 class_scale=1 coord_scale=1 absolute=1 thresh = .6 random=1
结语
这里只是通过训练命令来简单的分析一下yolo v2是怎么工作的,主要是为了让大家训练自己数据时能更加方便,如果想要详细了解yolo源码,还需要大家自己认真阅读源码了。
最后
以上就是花痴睫毛膏最近收集整理的关于yolo v2文件结构和源码的简单梳理前言分析过程结语的全部内容,更多相关yolo内容请搜索靠谱客的其他文章。
本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
发表评论 取消回复