OpenCV学习笔记非打印版本

OpenCV学习笔记非打印版本直方图

#include"opencv2/highgui/highgui.hpp"

#include"opencv2/imgproc/imgproc.hpp"

#include

#include

using namespace std;

using namespace cv;

/**@函数main*/

int main(int argc,char**argv)

{

Mat src,dst;

///装载图像

src=imread("001.jpg");

if(!src.data)

{

return-1;

}

///分割成3个单通道图像(R,G和B)

vectorrgb_planes;

split(src,rgb_planes);

///设定bin数目

int histSize=255;

///设定取值范围(R,G,B))

float range[]={0,255};

const float*histRange={range};

bool uniform=true;bool accumulate=false;

Mat r_hist,g_hist,b_hist;

///计算直方图:

calcHist(&rgb_planes[0],1,0,Mat(),r_hist,1,&histSize,&histRange,uniform,accumulate); calcHist(&rgb_planes[1],1,0,Mat(),g_hist,1,&histSize,&histRange,uniform,accumulate); calcHist(&rgb_planes[2],1,0,Mat(),b_hist,1,&histSize,&histRange,uniform,accumulate); //创建直方图画布

int hist_w=400;int hist_h=400;

int bin_w=cvRound((double)hist_w/histSize);

Mat histImage(hist_w,hist_h,CV_8UC3,Scalar(0,0,0));

///将直方图归一化到范围[0,histImage.rows]

normalize(r_hist,r_hist,0,histImage.rows,NORM_MINMAX,-1,Mat());

normalize(g_hist,g_hist,0,histImage.rows,NORM_MINMAX,-1,Mat());

normalize(b_hist,b_hist,0,histImage.rows,NORM_MINMAX,-1,Mat());

///在直方图画布上画出直方图

for(int i=1;i

{

line(histImage,Point(bin_w*(i-1),hist_h-cvRound(r_hist.at(i-1))), Point(bin_w*(i),hist_h-cvRound(r_hist.at(i))),

Scalar(0,0,255),2,8,0);

line(histImage,Point(bin_w*(i-1),hist_h-cvRound(g_hist.at(i-1))), Point(bin_w*(i),hist_h-cvRound(g_hist.at(i))),

Scalar(0,255,0),2,8,0);

line(histImage,Point(bin_w*(i-1),hist_h-cvRound(b_hist.at(i-1))), Point(bin_w*(i),hist_h-cvRound(b_hist.at(i))),

Scalar(255,0,0),2,8,0);

}

///显示直方图

namedWindow("calcHist Demo",CV_WINDOW_AUTOSIZE);

imshow("calcHist Demo",histImage);

waitKey(0);

return0;

}

图像金字塔

#include

#include

#include

#include

#include

#include

using namespace cv;

using namespace std;

Mat src,dst,tmp;

char*window_name="Pyramids Demo";

int main()

{

cout<<"\n Zoom In-Out demo\n"<

cout<<"--------------------\n";

cout<<"*[u]->Zoom in\n"<

cout<<"*[d]->Zoom out\n"<

cout<<"*[ESC]->Close program\n\n"<

src=imread("001.jpg");

if(!src.data)

{

cout<<"No data!--Exiting the program\n"<

return-1;

}

tmp=src;

dst=tmp;

namedWindow(window_name,CV_WINDOW_AUTOSIZE);

imshow(window_name,dst);

while(true)

{

int c;

c=waitKey(10);

if((char)c==27)

{

break;

}

if((char)c=='u')

{

pyrUp(tmp,dst,Size(tmp.cols*2,tmp.rows*2));

printf("**Zoom In:Image x2\n");

}

else if((char)c=='d')

{

pyrDown(tmp,dst,Size(tmp.cols/2,tmp.rows/2));

cout<<"**Zoom Out:Image/2\n"<

}

imshow(window_name,dst);

tmp=dst;

}

return0;

}

基于混合高斯背景建模的目标跟踪

//混合高斯背景建模的目标跟踪

#include"cv.h"

#include"highgui.h"

int main()

{

CvCapture*capture=cvCreateFileCapture("001.mp4");

IplImage*mframe,*current,*frg,*test;

int*fg,*bg_bw,*rank_ind;

double*w,*mean,*sd,*u_diff,*rank;

int C,M,sd_init,i,j,k,m,rand_temp=0,rank_ind_temp=0,min_index=0,x=0,y=0,counter_frame =0;

double D,alph,thresh,p,temp;

CvRNG state;

int match,height,width;

mframe=cvQueryFrame(capture);

frg=cvCreateImage(cvSize(mframe->width,mframe->height),IPL_DEPTH_8U,1);

current=cvCreateImage(cvSize(mframe->width,mframe->height),IPL_DEPTH_8U,1);

test=cvCreateImage(cvSize(mframe->width,mframe->height),IPL_DEPTH_8U,1);

C=4;//number of gaussian components(typically3-5)

M=4;//number of background components

sd_init=6;//initial standard deviation(for new components)var=36in paper

alph=0.01;//learning rate(between0and1)(from paper0.01)

D=2.5;//positive deviation threshold

thresh=0.25;//foreground threshold(0.25or0.75in paper)

p=alph/(1/C);//initial p variable(used to update mean and sd)

height=current->height;width=current->widthStep;

fg=(int*)malloc(sizeof(int)*width*height);//foreground array

bg_bw=(int*)malloc(sizeof(int)*width*height);//background array

rank=(double*)malloc(sizeof(double)*1*C);//rank of components(w/sd)

w=(double*)malloc(sizeof(double)*width*height*C);//weights array

mean=(double*)malloc(sizeof(double)*width*height*C);//pixel means

sd=(double*)malloc(sizeof(double)*width*height*C);//pixel standard deviations

u_diff=(double*)malloc(sizeof(double)*width*height*C);//difference of each pixel from mean for(i=0;i

{

for(j=0;j

{

for(k=0;k

{

mean[i*width*C+j*C+k]=cvRandReal(&state)*255;

w[i*width*C+j*C+k]=(double)1/C;

sd[i*width*C+j*C+k]=sd_init;

}

}

}

while(1){

rank_ind=(int*)malloc(sizeof(int)*C);

cvCvtColor(mframe,current,CV_BGR2GRAY);

//calculate difference of pixel values from mean

for(i=0;i

{

for(j=0;j

{

for(m=0;m

{

u_diff[i*width*C+j*C+m]=abs((uchar)current->imageData[i*width+j]-mean[i*width*C +j*C+m]);

}

}

}

//update gaussian components for each pixel

for(i=0;i

{

for(j=0;j

{

match=0;

temp=0;

for(k=0;k

{

if(abs(u_diff[i*width*C+j*C+k])<=D*sd[i*width*C+j*C+k])//pixel matches component

{

match=1;//variable to signal component match

//update weights,mean,sd,p

w[i*width*C+j*C+k]=(1-alph)*w[i*width*C+j*C+k]+alph;

p=alph/w[i*width*C+j*C+k];

mean[i*width*C+j*C+k]=(1-p)*mean[i*width*C+j*C+k]+

p*(uchar)current->imageData[i*width+j];

sd[i*width*C+j*C+k]=sqrt((1-p)*(sd[i*width*C+j*C+k]*sd[i*width*C+j*C +k])+p*(pow((uchar)current->imageData[i*width+j]-mean[i*width*C+j*C+k],2)));

}

else{

w[i*width*C+j*C+k]=(1-alph)*w[i*width*C+j*C+k];//weight slighly decreases

}

temp+=w[i*width*C+j*C+k];

}

for(k=0;k

{

w[i*width*C+j*C+k]=w[i*width*C+j*C+k]/temp;

}

temp=w[i*width*C+j*C];

bg_bw[i*width+j]=0;

for(k=0;k

{

bg_bw[i*width+j]=bg_bw[i*width+j]+mean[i*width*C+j*C+k]*w[i*width*C+j*C +k];

if(w[i*width*C+j*C+k]<=temp)

{

min_index=k;

temp=w[i*width*C+j*C+k];

}

rank_ind[k]=k;

}

test->imageData[i*width+j]=(uchar)bg_bw[i*width+j];

//if no components match,create new component

if(match==0)

{

mean[i*width*C+j*C+min_index]=(uchar)current->imageData[i*width+j];

//printf("%d",(uchar)bg->imageData[i*width+j]);

sd[i*width*C+j*C+min_index]=sd_init;

}

for(k=0;k

{

rank[k]=w[i*width*C+j*C+k]/sd[i*width*C+j*C+k];

//printf("%f",w[i*width*C+j*C+k]);

}

//sort rank values

for(k=1;k

{

for(m=0;m

{

if(rank[k]>rank[m])

{

//swap max values

rand_temp=rank[m];

rank[m]=rank[k];

rank[k]=rand_temp;

//swap max index values

rank_ind_temp=rank_ind[m];

rank_ind[m]=rank_ind[k];

rank_ind[k]=rank_ind_temp;

}

}

}

//calculate foreground

match=0;k=0;

//frg->imageData[i*width+j]=0;

while((match==0)&&(k

if(w[i*width*C+j*C+rank_ind[k]]>=thresh)

if(abs(u_diff[i*width*C+j*C+rank_ind[k]])<=D*sd[i*width*C+j*C+rank_ind[k]]){ frg->imageData[i*width+j]=0;

match=1;

}

else

frg->imageData[i*width+j]=(uchar)current->imageData[i*width+j];

k=k+1;

}

}

}

mframe=cvQueryFrame(capture);

cvShowImage("fore",frg);

cvShowImage("back",test);

char s=cvWaitKey(33);

if(s==27)break;

free(rank_ind);

}

free(fg);free(w);free(mean);free(sd);free(u_diff);free(rank);

cvNamedWindow("back",0);

cvNamedWindow("fore",0);

cvReleaseCapture(&capture);

cvDestroyWindow("fore");

cvDestroyWindow("back");

return0;

}

Camshift

#include"opencv2/video/tracking.hpp"

#include"opencv2/imgproc/imgproc.hpp"

#include"opencv2/highgui/highgui.hpp"

#include

#include

using namespace cv;

using namespace std;

Mat image;

bool backprojMode=false;//表示是否要进入反向投影模式,ture表示准备进入反向投影模式bool selectObject=false;//代表是否在选要跟踪的初始目标,true表示正在用鼠标选择

int trackObject=0;//代表跟踪目标数目

bool showHist=true;//是否显示直方图

Point origin;//用于保存鼠标选择第一次单击时点的位置

Rect selection;//用于保存鼠标选择的矩形框

int vmin=10,vmax=256,smin=30;

void onMouse(int event,int x,int y,int,void*)

{

if(selectObject)//只有当鼠标左键按下去时才有效,然后通过if里面代码就可以确定所选择的矩形区域selection了{

selection.x=MIN(x,origin.x);//矩形左上角顶点坐标

selection.y=MIN(y,origin.y);

selection.width=std::abs(x-origin.x);//矩形宽

selection.height=std::abs(y-origin.y);//矩形高

selection&=Rect(0,0,image.cols,image.rows);//用于确保所选的矩形区域在图片范围内}

switch(event)

{

case CV_EVENT_LBUTTONDOWN:

origin=Point(x,y);

selection=Rect(x,y,0,0);//鼠标刚按下去时初始化了一个矩形区域

selectObject=true;

break;

case CV_EVENT_LBUTTONUP:

selectObject=false;

if(selection.width>0&&selection.height>0)

trackObject=-1;

break;

}

}

void help()

{

cout<<"\nThis is a demo that shows mean-shift based tracking\n"

"You select a color objects such as your face and it tracks it.\n"

"This reads from video camera(0by default,or the camera number the user enters\n"

"Usage:\n"

"./camshiftdemo[camera number]\n";

cout<<"\n\nHot keys:\n"

"\tESC-quit the program\n"

"\tc-stop the tracking\n"

"\tb-switch to/from backprojection view\n"

"\th-show/hide object histogram\n"

"\tp-pause video\n"

"To initialize tracking,select the object with mouse\n";

}

const char*keys=

{

"{1||0|camera number}"

};

int main(int argc,const char**argv)

{

help();

VideoCapture cap;//定义一个摄像头捕捉的类对象

Rect trackWindow;

RotatedRect trackBox;//定义一个旋转的矩阵类对象

int hsize=16;

float hranges[]={0,180};//hranges在后面的计算直方图函数中要用到const float*phranges=hranges;

CommandLineParser parser(argc,argv,keys);//命令解析器函数

int camNum=parser.get("1");

cap.open(camNum);//直接调用成员函数打开摄像头

if(!cap.isOpened())

{

help();

cout<<"***Could not initialize capturing...***\n";

cout<<"Current parameter's value:\n";

parser.printParams();

return-1;

}

namedWindow("Histogram",0);

namedWindow("CamShift Demo",0);

setMouseCallback("CamShift Demo",onMouse,0);//消息响应机制

createTrackbar("Vmin","CamShift Demo",&vmin,256,0);//createTrackbar函数的功能是在对应的窗口创建滑动条,滑动条Vmin,vmin表示滑动条的值,最大为256

createTrackbar("Vmax","CamShift Demo",&vmax,256,0);//最后一个参数为0代表没有调用滑动拖动的响应函数createTrackbar("Smin","CamShift Demo",&smin,256,0);//vmin,vmax,smin初始值分别为10,256,30 Mat frame,hsv,hue,mask,hist,histimg=Mat::zeros(200,320,CV_8UC3),backproj;

bool paused=false;

for(;;)

{

if(!paused)//没有暂停

{

cap>>frame;//从摄像头抓取一帧图像并输出到frame中

if(frame.empty())

break;

}

frame.copyTo(image);

if(!paused)//没有按暂停键

{

cvtColor(image,hsv,CV_BGR2HSV);//将rgb摄像头帧转化成hsv空间的

if(trackObject)//trackObject初始化为0,或者按完键盘的'c'键后也为0,当鼠标单击松开后为-1

{

int_vmin=vmin,_vmax=vmax;

//inRange函数的功能是检查输入数组每个元素大小是否在2个给定数值之间,可以有多通道,mask保存0通道的最小值,也就是h分量

//这里利用了hsv的3个通道,比较h,0~180,s,smin~256,v,min(vmin,vmax),max(vmin,vmax)。如果3个通道都在对应的范围内,则

//mask对应的那个点的值全为1(0xff),否则为0(0x00).

inRange(hsv,Scalar(0,smin,MIN(_vmin,_vmax)),

Scalar(180,256,MAX(_vmin,_vmax)),mask);

int ch[]={0,0};

hue.create(hsv.size(),hsv.depth());//hue初始化为与hsv大小深度一样的矩阵,色调的度量是用角度表示的,红绿蓝之间相差120度,反色相差180度

mixChannels(&hsv,1,&hue,1,ch,1);//将hsv第一个通道(也就是色调)的数复制到hue中,0索引数组

if(trackObject<0)//鼠标选择区域松开后,该函数内部又将其赋值1

{

//此处的构造函数roi用的是Mat hue的矩阵头,且roi的数据指针指向hue,即共用相同的数据,select 为其感兴趣的区域

相关主题
相关文档
最新文档