我是靠谱客的博主 诚心纸飞机,最近开发中收集的这篇文章主要介绍(转)yolov3运行及保存检测视频(包括摄像头),觉得挺不错的,现在分享给大家,希望可以做个参考。

概述

https://blog.csdn.net/sinat_33718563/article/details/79964758

新代码:可用,如果不想在老代码上折腾更改,可以直接根据下面的新地址拉取代码。  (注意看Readme)

--------------------更新20180703-----------------

由于以前上传的项目有点点小问题,完整更新项目及说明如下链接。

https://github.com/RongSong1993/YOLOv3-SaveVideo-New/tree/master

 ---------------------更新20180703---------------------

 

老代码:同样可用,就是保存视频时会闪烁,现在评论区出现了一个解决方案,欢迎大家尝试(拉到文末查看)。

 YOLOv3保存检测视频完整项目地址: https://github.com/RongSong1993/YOLOv3_SaveVideo

        最近一段时间配置运行了下YOLO3网络,官方项目地址:https://pjreddie.com/darknet/yolo/,整个配置过程比较简单,按照上面那个网站操作就可以了。但是官网项目在测试本地视频或通过摄像头(webcam)获得的视频时,默认是没有保存运行结果的,因此这里主要讲的是如何保存yolo3运行检测的视频结果。有问题可以评论留言,算法没仔细研究,主要是讲述操作流程。

         假设你的项目路径为./darknet,需要改动的主要有两个文件,分别是位于./darknet/src/路径下的demo.c和image.c文件。

(1)首先在image.c文件中添加save_video函数的定义,代码及截图位置如下:


 
 
  1. void save_video(image p, CvVideoWriter *mVideoWriter)
  2. {
  3. image copy = copy_image(p);
  4. if(p.c == 3) rgbgr_image(copy);
  5. int x,y,k;
  6. IplImage *disp = cvCreateImage(cvSize(p.w,p.h), IPL_DEPTH_8U, p.c);
  7. int step = disp->widthStep;
  8. for(y = 0; y < p.h; ++y){
  9. for(x = 0; x < p.w; ++x){
  10. for(k= 0; k < p.c; ++k){
  11. disp->imageData[y*step + x*p.c + k] = ( unsigned char)(get_pixel(copy,x,y,k)* 255);
  12. }
  13. }
  14. }
  15. cvWriteFrame(mVideoWriter,disp);
  16. cvReleaseImage(&disp);
  17. free_image(copy);
  18. }

 

对应位置截图如下:

 

       (2) 然后更改demo.c文件代码,由于改动内容有多处,因此这里帖是完整的demo.c文件代码,每一组//*********rs20180415***********之间代码就是新添加的代码内容,可自行对比,包括设置你输出检测视频的名称和帧率。


 
 
  1. #include "network.h"
  2. #include "detection_layer.h"
  3. #include "region_layer.h"
  4. #include "cost_layer.h"
  5. #include "utils.h"
  6. #include "parser.h"
  7. #include "box.h"
  8. #include "image.h"
  9. #include "demo.h"
  10. #include <sys/time.h>
  11. #define DEMO 1
  12. //*********rs20180415***********
  13. #define SAVEVIDEO
  14. //*********rs20180415***********
  15. #ifdef OPENCV
  16. //*********rs20180415***********
  17. #ifdef SAVEVIDEO
  18. static CvVideoWriter *mVideoWriter;
  19. #endif
  20. //*********rs20180415***********
  21. static char **demo_names;
  22. static image **demo_alphabet;
  23. static int demo_classes;
  24. static network *net;
  25. static image buff [ 3];
  26. static image buff_letter[ 3];
  27. static int buff_index = 0;
  28. static CvCapture * cap;
  29. static IplImage * ipl;
  30. static float fps = 0;
  31. static float demo_thresh = 0;
  32. static float demo_hier = .5;
  33. static int running = 0;
  34. static int demo_frame = 3;
  35. static int demo_index = 0;
  36. static float **predictions;
  37. static float *avg;
  38. static int demo_done = 0;
  39. static int demo_total = 0;
  40. double demo_time;
  41. detection *get_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, int *num);
  42. int size_network(network *net)
  43. {
  44. int i;
  45. int count = 0;
  46. for(i = 0; i < net->n; ++i){
  47. layer l = net->layers[i];
  48. if(l.type == YOLO || l.type == REGION || l.type == DETECTION){
  49. count += l.outputs;
  50. }
  51. }
  52. return count;
  53. }
  54. void remember_network(network *net)
  55. {
  56. int i;
  57. int count = 0;
  58. for(i = 0; i < net->n; ++i){
  59. layer l = net->layers[i];
  60. if(l.type == YOLO || l.type == REGION || l.type == DETECTION){
  61. memcpy(predictions[demo_index] + count, net->layers[i].output, sizeof( float) * l.outputs);
  62. count += l.outputs;
  63. }
  64. }
  65. }
  66. detection *avg_predictions(network *net, int *nboxes)
  67. {
  68. int i, j;
  69. int count = 0;
  70. fill_cpu(demo_total, 0, avg, 1);
  71. for(j = 0; j < demo_frame; ++j){
  72. axpy_cpu(demo_total, 1./demo_frame, predictions[j], 1, avg, 1);
  73. }
  74. for(i = 0; i < net->n; ++i){
  75. layer l = net->layers[i];
  76. if(l.type == YOLO || l.type == REGION || l.type == DETECTION){
  77. memcpy(l.output, avg + count, sizeof( float) * l.outputs);
  78. count += l.outputs;
  79. }
  80. }
  81. detection *dets = get_network_boxes(net, buff[ 0].w, buff[ 0].h, demo_thresh, demo_hier, 0, 1, nboxes);
  82. return dets;
  83. }
  84. void *detect_in_thread(void *ptr)
  85. {
  86. running = 1;
  87. float nms = .4;
  88. layer l = net->layers[net->n -1];
  89. float *X = buff_letter[(buff_index+ 2)% 3].data;
  90. network_predict(net, X);
  91. /*
  92. if(l.type == DETECTION){
  93. get_detection_boxes(l, 1, 1, demo_thresh, probs, boxes, 0);
  94. } else */
  95. remember_network(net);
  96. detection *dets = 0;
  97. int nboxes = 0;
  98. dets = avg_predictions(net, &nboxes);
  99. /*
  100. int i,j;
  101. box zero = {0};
  102. int classes = l.classes;
  103. for(i = 0; i < demo_detections; ++i){
  104. avg[i].objectness = 0;
  105. avg[i].bbox = zero;
  106. memset(avg[i].prob, 0, classes*sizeof(float));
  107. for(j = 0; j < demo_frame; ++j){
  108. axpy_cpu(classes, 1./demo_frame, dets[j][i].prob, 1, avg[i].prob, 1);
  109. avg[i].objectness += dets[j][i].objectness * 1./demo_frame;
  110. avg[i].bbox.x += dets[j][i].bbox.x * 1./demo_frame;
  111. avg[i].bbox.y += dets[j][i].bbox.y * 1./demo_frame;
  112. avg[i].bbox.w += dets[j][i].bbox.w * 1./demo_frame;
  113. avg[i].bbox.h += dets[j][i].bbox.h * 1./demo_frame;
  114. }
  115. //copy_cpu(classes, dets[0][i].prob, 1, avg[i].prob, 1);
  116. //avg[i].objectness = dets[0][i].objectness;
  117. }
  118. */
  119. if (nms > 0) do_nms_obj(dets, nboxes, l.classes, nms);
  120. printf( "33[2J");
  121. printf( "33[1;1H");
  122. printf( "nFPS:%.1fn",fps);
  123. printf( "Objects:nn");
  124. image display = buff[(buff_index+ 2) % 3];
  125. draw_detections(display, dets, nboxes, demo_thresh, demo_names, demo_alphabet, demo_classes);
  126. free_detections(dets, nboxes);
  127. demo_index = (demo_index + 1)%demo_frame;
  128. running = 0;
  129. return 0;
  130. }
  131. void *fetch_in_thread(void *ptr)
  132. {
  133. int status = fill_image_from_stream(cap, buff[buff_index]);
  134. letterbox_image_into(buff[buff_index], net->w, net->h, buff_letter[buff_index]);
  135. if(status == 0) demo_done = 1;
  136. return 0;
  137. }
  138. void *display_in_thread(void *ptr)
  139. {
  140. show_image_cv(buff[(buff_index + 1)% 3], "Demo", ipl);
  141. int c = cvWaitKey( 1);
  142. if (c != -1) c = c% 256;
  143. if (c == 27) {
  144. demo_done = 1;
  145. return 0;
  146. } else if (c == 82) {
  147. demo_thresh += .02;
  148. } else if (c == 84) {
  149. demo_thresh -= .02;
  150. if(demo_thresh <= .02) demo_thresh = .02;
  151. } else if (c == 83) {
  152. demo_hier += .02;
  153. } else if (c == 81) {
  154. demo_hier -= .02;
  155. if(demo_hier <= .0) demo_hier = .0;
  156. }
  157. return 0;
  158. }
  159. void *display_loop(void *ptr)
  160. {
  161. while( 1){
  162. display_in_thread( 0);
  163. }
  164. }
  165. void *detect_loop(void *ptr)
  166. {
  167. while( 1){
  168. detect_in_thread( 0);
  169. }
  170. }
  171. void demo(char *cfgfile, char *weightfile, float thresh, int cam_index, const char *filename, char **names, int classes, int delay, char *prefix, int avg_frames, float hier, int w, int h, int frames, int fullscreen)
  172. {
  173. //demo_frame = avg_frames;
  174. image **alphabet = load_alphabet();
  175. demo_names = names;
  176. demo_alphabet = alphabet;
  177. demo_classes = classes;
  178. demo_thresh = thresh;
  179. demo_hier = hier;
  180. printf( "Demon");
  181. net = load_network(cfgfile, weightfile, 0);
  182. set_batch_network(net, 1);
  183. pthread_t detect_thread;
  184. pthread_t fetch_thread;
  185. srand( 2222222);
  186. int i;
  187. demo_total = size_network(net);
  188. predictions = calloc(demo_frame, sizeof( float*));
  189. for (i = 0; i < demo_frame; ++i){
  190. predictions[i] = calloc(demo_total, sizeof( float));
  191. }
  192. avg = calloc(demo_total, sizeof( float));
  193. if(filename){
  194. printf( "video file: %sn", filename);
  195. cap = cvCaptureFromFile(filename);
  196. //*********rs20180415***********
  197. #ifdef SAVEVIDEO
  198. if(cap){
  199. int mfps = cvGetCaptureProperty(cap,CV_CAP_PROP_FPS); //local video file,needn't change
  200. mVideoWriter=cvCreateVideoWriter( "Output.avi",CV_FOURCC( 'M', 'J', 'P', 'G'),mfps,cvSize(cvGetCaptureProperty(cap,CV_CAP_PROP_FRAME_WIDTH),cvGetCaptureProperty(cap,CV_CAP_PROP_FRAME_HEIGHT)), 1);
  201. }
  202. #endif
  203. //*********rs20180415***********
  204. } else{
  205. cap = cvCaptureFromCAM(cam_index);
  206. //*********rs20180415***********
  207. #ifdef SAVEVIDEO
  208. if(cap){
  209. //int mfps = cvGetCaptureProperty(cap,CV_CAP_PROP_FPS); //webcam video file,need change.
  210. int mfps = 25; //the output video FPS,you can set here.
  211. mVideoWriter=cvCreateVideoWriter( "Output_webcam.avi",CV_FOURCC( 'M', 'J', 'P', 'G'),mfps,cvSize(cvGetCaptureProperty(cap,CV_CAP_PROP_FRAME_WIDTH),cvGetCaptureProperty(cap,CV_CAP_PROP_FRAME_HEIGHT)), 1);
  212. }
  213. #endif
  214. //*********rs20180415***********
  215. if(w){
  216. cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
  217. }
  218. if(h){
  219. cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
  220. }
  221. if(frames){
  222. cvSetCaptureProperty(cap, CV_CAP_PROP_FPS, frames);
  223. }
  224. }
  225. if(!cap) error( "Couldn't connect to webcam.n");
  226. buff[ 0] = get_image_from_stream(cap);
  227. buff[ 1] = copy_image(buff[ 0]);
  228. buff[ 2] = copy_image(buff[ 0]);
  229. buff_letter[ 0] = letterbox_image(buff[ 0], net->w, net->h);
  230. buff_letter[ 1] = letterbox_image(buff[ 0], net->w, net->h);
  231. buff_letter[ 2] = letterbox_image(buff[ 0], net->w, net->h);
  232. ipl = cvCreateImage(cvSize(buff[ 0].w,buff[ 0].h), IPL_DEPTH_8U, buff[ 0].c);
  233. int count = 0;
  234. if(!prefix){
  235. cvNamedWindow( "Demo", CV_WINDOW_NORMAL);
  236. if(fullscreen){
  237. cvSetWindowProperty( "Demo", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
  238. } else {
  239. cvMoveWindow( "Demo", 0, 0);
  240. cvResizeWindow( "Demo", 1352, 1013);
  241. }
  242. }
  243. demo_time = what_time_is_it_now();
  244. while(!demo_done){
  245. buff_index = (buff_index + 1) % 3;
  246. if(pthread_create(&fetch_thread, 0, fetch_in_thread, 0)) error( "Thread creation failed");
  247. if(pthread_create(&detect_thread, 0, detect_in_thread, 0)) error( "Thread creation failed");
  248. if(!prefix){
  249. //*********rs20180415***********
  250. #ifdef SAVEVIDEO
  251. save_video(buff[ 0],mVideoWriter);
  252. #endif
  253. //*********rs20180415***********
  254. fps = 1./(what_time_is_it_now() - demo_time);
  255. demo_time = what_time_is_it_now();
  256. display_in_thread( 0);
  257. } else{
  258. char name[ 256];
  259. sprintf(name, "%s_%08d", prefix, count);
  260. //*********rs20180415***********
  261. #ifdef SAVEVIDEO
  262. save_video(buff[ 0],mVideoWriter);
  263. #else
  264. save_image(buff[(buff_index + 1)% 3], name);
  265. #endif
  266. //*********rs20180415***********
  267. }
  268. pthread_join(fetch_thread, 0);
  269. pthread_join(detect_thread, 0);
  270. ++count;
  271. }
  272. }
  273. /*
  274. void demo_compare(char *cfg1, char *weight1, char *cfg2, char *weight2, float thresh, int cam_index, const char *filename, char **names, int classes, int delay, char *prefix, int avg_frames, float hier, int w, int h, int frames, int fullscreen)
  275. {
  276. demo_frame = avg_frames;
  277. predictions = calloc(demo_frame, sizeof(float*));
  278. image **alphabet = load_alphabet();
  279. demo_names = names;
  280. demo_alphabet = alphabet;
  281. demo_classes = classes;
  282. demo_thresh = thresh;
  283. demo_hier = hier;
  284. printf("Demon");
  285. net = load_network(cfg1, weight1, 0);
  286. set_batch_network(net, 1);
  287. pthread_t detect_thread;
  288. pthread_t fetch_thread;
  289. srand(2222222);
  290. if(filename){
  291. printf("video file: %sn", filename);
  292. cap = cvCaptureFromFile(filename);
  293. }else{
  294. cap = cvCaptureFromCAM(cam_index);
  295. if(w){
  296. cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
  297. }
  298. if(h){
  299. cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
  300. }
  301. if(frames){
  302. cvSetCaptureProperty(cap, CV_CAP_PROP_FPS, frames);
  303. }
  304. }
  305. if(!cap) error("Couldn't connect to webcam.n");
  306. layer l = net->layers[net->n-1];
  307. demo_detections = l.n*l.w*l.h;
  308. int j;
  309. avg = (float *) calloc(l.outputs, sizeof(float));
  310. for(j = 0; j < demo_frame; ++j) predictions[j] = (float *) calloc(l.outputs, sizeof(float));
  311. boxes = (box *)calloc(l.w*l.h*l.n, sizeof(box));
  312. probs = (float **)calloc(l.w*l.h*l.n, sizeof(float *));
  313. for(j = 0; j < l.w*l.h*l.n; ++j) probs[j] = (float *)calloc(l.classes+1, sizeof(float));
  314. buff[0] = get_image_from_stream(cap);
  315. buff[1] = copy_image(buff[0]);
  316. buff[2] = copy_image(buff[0]);
  317. buff_letter[0] = letterbox_image(buff[0], net->w, net->h);
  318. buff_letter[1] = letterbox_image(buff[0], net->w, net->h);
  319. buff_letter[2] = letterbox_image(buff[0], net->w, net->h);
  320. ipl = cvCreateImage(cvSize(buff[0].w,buff[0].h), IPL_DEPTH_8U, buff[0].c);
  321. int count = 0;
  322. if(!prefix){
  323. cvNamedWindow("Demo", CV_WINDOW_NORMAL);
  324. if(fullscreen){
  325. cvSetWindowProperty("Demo", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
  326. } else {
  327. cvMoveWindow("Demo", 0, 0);
  328. cvResizeWindow("Demo", 1352, 1013);
  329. }
  330. }
  331. demo_time = what_time_is_it_now();
  332. while(!demo_done){
  333. buff_index = (buff_index + 1) %3;
  334. if(pthread_create(&fetch_thread, 0, fetch_in_thread, 0)) error("Thread creation failed");
  335. if(pthread_create(&detect_thread, 0, detect_in_thread, 0)) error("Thread creation failed");
  336. if(!prefix){
  337. fps = 1./(what_time_is_it_now() - demo_time);
  338. demo_time = what_time_is_it_now();
  339. display_in_thread(0);
  340. }else{
  341. char name[256];
  342. sprintf(name, "%s_%08d", prefix, count);
  343. save_image(buff[(buff_index + 1)%3], name);
  344. }
  345. pthread_join(fetch_thread, 0);
  346. pthread_join(detect_thread, 0);
  347. ++count;
  348. }
  349. }
  350. */
  351. #else
  352. void demo(char *cfgfile, char *weightfile, float thresh, int cam_index, const char *filename, char **names, int classes, int delay, char *prefix, int avg, float hier, int w, int h, int frames, int fullscreen)
  353. {
  354. fprintf( stderr, "Demo needs OpenCV for webcam images.n");
  355. }
  356. #endif

      我在运行的过程,碰到了很多问题,比如在通过摄像头实时检测时,遇到了下面这个问题,这个错误不致命,就是可执行检测但是保存不了检测视频,错误提示如下:

               HIGHGUI ERROR: V4L/V4L2:VIDIOC_S_CROP

               HIGHGUI ERROR: V4L/V4L2:getting property #5 is not supported

             GLib-GIOMessage: Using the 'memory' GSettings backend.  Your settings will not be saved or shared with other applications.              

解决办法:

         (1)第一个问题

 

           HIGHGUI ERROR: V4L/V4L2:VIDIOC_S_CROP

           HIGHGUI ERROR: V4L/V4L2:getting property #5 is not supported

       我自己测试了下,是代码问题,上面给的代码已更改,主要在通过摄像头获取视频帧率时采用了://int mfps = cvGetCaptureProperty(cap,CV_CAP_PROP_FPS); 指令,这样是不行的,测试本地时可以,所以这里给帧率设置了一个常数:int mfps = 25;可以结合demo.c文件对应着看,给了相应的注释。

        (2)第二个问题

       GLib-GIOMessage: Using the 'memory' GSettings backend.  Your settings will not be saved or shared with other applications.       

          需要添加一个路径,操作如下:

          在ubuntu终端执行:

                                                sudo gedit /etc/profile

          然后在打开的文件内(需要sudo权限)添加下面内容:

                                                export GIO_EXTRA_MODULES=/usr/lib/x86_64-linux-gnu/gio/modules/ 

         最后是上面的更改立即生效:

                                                source .bashrc

补充:这个程序好像保存出来的视频确实有点点问题,最终的视频是一闪一闪的。暂时解决方案:将被检测视频的每张图片分别保存为单张图片,然后单独合成一个视频就行了(正常),需要两步操作,整合在一起遇到了问题,欢迎交流。

操作:(1)在darket目录下手动新建一个文件夹“picture”,用于存放被检测的单张结果图片。

          (2)将图片合成视频,C++和python利用opencv很简单,百度以下都是的。你只需要更改两个文件即可,demo.c与image.c,下载链接为:https://pan.baidu.com/s/14s8vaF4Wac0hJY7S7hbz5A

有问题加我主页微信联系。

补充:20181021,新上传的代码可用。老代码也可用,就是保存的视频有点点闪烁,评论区貌似出现了一个解决方法,大家可以尝试下。(我自己工作了,现在没有这个实验环境)

 

参考:https://blog.csdn.net/hrsstudy/article/details/60876451

最后

以上就是诚心纸飞机为你收集整理的(转)yolov3运行及保存检测视频(包括摄像头)的全部内容,希望文章能够帮你解决(转)yolov3运行及保存检测视频(包括摄像头)所遇到的程序开发问题。

如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。

本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
点赞(51)

评论列表共有 0 条评论

立即
投稿
返回
顶部