Opencv two frame difference method detects moving target and extracts contour

  • 2020-08-22 22:17:33
  • OfStack

Opencv learned 2 frame difference method for moving target detection and contour extraction, for your reference, the specific content is as follows

The code is copied and learned from the Internet, with a lot of comments added. It feels like taking notes while reading a book, which gives people a satisfying enjoyment. Let 's do this!


#include "highgui.h"
#include "cv.h"
#include "stdio.h"
#include <time.h>
#include <math.h>
#include <string.h>

const double MHI_DURATION=0.1;// Maximum duration of motion tracking 0.1s
const double MAX_TIME_DELTA=0.5// Maximum time increment 0.5s
const double MIN_TIME_DELTA=0.05;// Minimum time increment 0.05s
const int N=3;
const int CONTOUR_MAX_AERA=16;

/* Image buffering used to do frame difference */
IplImage **buf=0;
int last=0;
/* Temporary image */
IplImage* mhi=0;// Motion history image mhi

CvConnectedComp* cur_comp,mincomp;
/*typedef struct CvConnectedComp 
 {
 double area; // Area of a region 
 CvScalar value; // The average color of a region 
 CvRect rect; // is 1 An enclosing rectangle of a region 
 CvSeq * contour; // Pointing to the other 1 A pointer to a sequence 
 };*/
/* define 1 Memory memory */
CvMemStorage* storage;
/*2 A point in dimensional coordinates, of type integer, usually of type 0 The point is the origin x , y coordinates */
CvPoint pt[4];

/* Current screen index */
int nCurFrameIndex=0;

/* Defines the function used to update the motion history image */
/*img- Input video frame; dst- Test results */
void update(IplImage *img,IplImage *dst,int diff_threshold)
{
 /* Gets the current time in seconds */
 double timestamp=clock()/100;
 /* Get the size of the input video frame, using save to size In the */
 CvSize size=cvSize(img->width,img->height);
 /* The intermediate variable used to make the frame difference */
 int i,idx1,idx2;
 /* Current frame and up 1 The frame does the frame difference after the resulting image data is stored in nimg In the */
 IplImage* nimg;
 /* I don't understand this step yet - - ! */
 IplImage* pyr=cvCreateImage(cvSize((size.width&-2)/2,(size.height&-2)/2),8,1);
 /* define 1 Memory memory */
 CvMemStorage* stor;
 /* create 1 Student: An incrementable sequence seq*/
 CvSeq* seq;

 /* Initialize the data first */
 /* If the historical image is empty, or if the historical image size does not match the current frame size entered (does this mean a new video is open?) */
 if(!mhi||mhi->width!=size.width||mhi->height!=size.height)
 {
 /* if buf If it has not been initialized, then it is buf Allocate memory */
 if(buf==0)
 {
  /*N=3*/
  buf=(IplImage**)malloc(N*sizeof(buf[0]));
  /* The pointer s I'm pointing to something 1 The contents of each byte in block memory are all set to ch The specified ASCII value , The size of the block is regulated 3 Parameter designation :memset(void *s,char ch,unsigned n) . The effect here is equivalent to will buf All elements within are set to zero */
  memset(buf,0,N*sizeof(buf[0]));
 }
 /* if buf Has been initialized and will be buf zero */
 for(i=0;i<N;i++)
 {
  cvReleaseImage(&buf[i]);
  buf[i]=cvCreateImage(size,IPL_DEPTH_8U,1);
  cvZero(buf[i]);
 }
 /* Reinitializes the motion history image mhi*/
 cvReleaseImage(&mhi);
 mhi=cvCreateImage(size,IPL_DEPTH_32F,1);
 cvZero(mhi);
 }

 /* Convert the current frame to a grayscale and put it in buf At the end of the 1 frame */
 cvCvtColor(img,buf[last],CV_BGR2GRAY);
 /* this 3 Part is to make frame difference, let buf[idx1] Forever is saved on 1 The frame, buf[idx2] Save current frame */
 idx1=last;
 idx2=(last+1)%N;
 last=idx2;
 /* Do the frame difference , function  cvAbsDiff  Calculate the absolute value of the difference between the two arrays */
 nimg=buf[idx2];
 cvAbsDiff(buf[idx1],buf[idx2],nimg);
 /* After the frame difference, the image will be obtained 2 threshold */
 cvThreshold(nimg,nimg,50,255,CV_THRESH_BINARY);
 /* Remove the time-out image to update the motion history image */
 cvUpdateMotionHistory(nimg,mhi,timestamp,MHI_DURATION);
 cvConvert(mhi,dst);
 /* Median filter eliminates small noise 
  function cvPyrDown use Gaussian Pyramid decomposition samples the input image downward , Remove the noise, the image is the original image 4 m 1
  function cvDialate Do the expansion operation to remove the discontinuous cavity of the target 
  function cvPyrUp use Gaussian Pyramid decomposition samples the input image upward to restore the image, the image is the original image 4 times */
 cvSmooth(dst,dst,CV_MEDIAN,3,0,0,0);
 cvPyrDown(dst,pyr,CV_GAUSSIAN_5x5);
 cvDilate(pyr,pyr,0,1);
 cvPyrUp(pyr,dst,CV_GAUSSIAN_5x5);

 /* Create a profile */
 stor=cvCreateMemStorage(0);
 seq=cvCreateSeq(CV_SEQ_ELTYPE_POINT,// Select from a predefined sequence type 1 Suitable type 
 sizeof(CvSeq),// This parameter represents the size of the sequence header; Must be greater than or equal to sizeof(CvSeq)
 /* The first 3 The parameters are the size of the element, in bytes. This size must be associated with the sequence type (by seq_flags Specify) 1 to , For example, for 1 Sequence of points, element types  CV_SEQ_ELTYPE_POINT Should be specified, parameter elem_size Must be equal to sizeof(CvPoint) . 
*/
 sizeof(CvPoint),
 stor);// A pointer to the memory memory previously defined 

 /* Find all the Outlines */
 cvFindContours(dst,// The source 2 Value image 
 stor,// Returns the container of the outline 
 &seq,// Output parameter, no 1 Addresses of external Outlines. 
 sizeof(CvContour),
 CV_RETR_EXTERNAL,//mode:EXTERNAL Look only for the outermost outline 
 CV_CHAIN_APPROX_NONE,// For the method of contour approximation, see Baidupedia - -
 cvPoint(0,0));

 /* Use directly CONTOUR Rectangle to draw the outline */
 /* traverse seq The sequence */
 for(;seq;seq=seq->h_next)
 {
 /* Direct use of the outline of the rectangle, call rect Get to a x , y A rectangle with parallel axes, not the smallest rectangle */
 CvRect r=((CvContour*)cont)->rect;// A pointer that converts a sequence type to a contour type? 
 /* If the area of the rectangle is less than the area of the contour, discard it ; The area of the rectangle should not be too small */
 if((r.height*r.width>CONTOUR_MAX_AERA)&&(r.height*r.width>2560))
 {
  /*cvRectangle The function draws a rectangle with two vertices along the diagonal */
  cvRectangle(img,// image 
  cvPoint(r.x,r.y),//1 vertices 
  cvPoint(r.x + r.width, r.y + r.height),// On the other 1 vertices 
  CV_RGB(255,0,0),// Line color 
  1,// The thickness of the line 
  CV_AA,// Line type 
  0); // The number of decimal points at a coordinate 
 }
 }

 /* The function is called and memory is freed */
 cvReleaseMemStorage(&stor);
 cvReleaseImage(&pyr);
}

/ Deal with the video, the main function /
int main(int argc,char**argv)
{
 IplImage *motion=0;
 CvCapture *capture=0;
 /* Read video frame */
 capture=cvCaptureFromFile("D:\\ video \\01.mp4");
 if(capture)
 {
 cvNamedWindow("Motion",1);
 for(;;)
 {
  IplImage *image;
  /* use cvGrabFrame Function capture frame */
  if(!cvGrabFrame(capture))
  break;
  /* use cvRetrieveFrame Function fetch quilt cvGrabFrame Grab the frame */
  image=cvRetrieveFrame(capture);
  if(image)
  {
  /* if motion It's not initialized, so this is number one 1 The frame. We will be motion Initialize the */
  if(!motion)
  {
   motion=cvCreateImage(cvSize(image->width,image->height),8,1);
   cvZero(motion);
   /* You need to make sure that the memory is stored in the same order as the frames you pull out */
   motion->origin=image->origin;
  }
  }
  /* If you take out a new one 1 The frame, and motion Not empty, then update the screen */
  update(image,motion,10);
  /* Displays processed images */
  cvShowImage("Motion",image);

  /*10ms Internal detected that the user pressed any key, all exit */
  if(cvWaitKey(10)>=0)
  break;
 }
 /* When this one up here for At the end of the loop, the video has been processed or the user has stopped processing the video */
 cvReleaseCapture(&capture);
 cvDestroyWindow("Motion");
 }
 return 0;
}

In tests, the program was able to successfully detect and circle moving vehicles and pedestrians in red boxes.

Areas for improvement include:

Slow video processing speed, resulting in video processing speed only video normal playback speed 1/2.

For the detection of pedestrians, the red box drawn is not stable, which does not frame the whole pedestrian, but often frames several different parts of a person.

When two objects overlap slightly, the overlapping object will be circled as one object.


Related articles: