基于opencv的手掌检测与移动的跟踪等源代码
模式识别开发之项目---基于opencv的手势识别

模式识别开发之项⽬---基于opencv的⼿势识别我使⽤OpenCV2.4.4的windows版本+Qt4.8.3+VS2010的编译器做了⼀个⼿势识别的⼩程序。
本程序主要使到了Opencv的特征训练库和最基本的图像处理的知识,包括肤⾊检测等等。
废话不多,先看⼀下基本的界⾯设计,以及主要功能:相信对于Qt有⼀些了解的⼈都不会对这个界⾯的设计感到陌⽣吧!(该死,该死!)我们向下⾛:紧接着是Qt导⼊OPenCV2.4.4的库⽂件:(先看⼀下Qt的⼯程⽂件吧)[cpp]1. #-------------------------------------------------2. #3. # Project created by QtCreator 2013-05-25T11:16:114. #5. #-------------------------------------------------6.7. QT += core gui8.9. CONFIG += warn_off10.11. greaterThan(QT_MAJOR_VERSION, 4): QT += widgets12.13. TARGET = HandGesture14. TEMPLATE = app15.16. INCLUDEPATH += E:/MyQtCreator/MyOpenCV/opencv/build/include17.18. SOURCES += main.cpp\19. handgesturedialog.cpp \20. SRC/GestrueInfo.cpp \21. SRC/AIGesture.cpp22.23. HEADERS += handgesturedialog.h \24. SRC/GestureStruct.h \25. SRC/GestrueInfo.h \26. SRC/AIGesture.h27.28. FORMS += handgesturedialog.ui29.30. #Load OpenCV runtime libs31. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_core24432. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_core244d33.34. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1035. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1036.37. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_features2d24438. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_features2d244d39.40. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1041. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1042.43. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_haartraining_engine44. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_haartraining_engined45.46. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1047. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1048.49. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_highgui24450. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_highgui244d51.52. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1053. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1054.55. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_objdetect24456. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_objdetect244d57.58. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1059. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1060.61. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_video24462. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_video244d63.64. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1065. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1066.67. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_calib3d24468. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_calib3d244d69.70. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1071. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1072.73. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_contrib24474. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_contrib244d75.76. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1077. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1078.79. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_imgproc24480. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_imgproc244d81.82. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1083. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1084.85.86. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_legacy24487. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_legacy244d88.89. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1090. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1091.92. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_ml24493. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_ml244d94.95. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1096. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc1097.98. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_photo24499. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_photo244d100.101. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10102. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10103.104. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_nonfree244105. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_nonfree244d106.107. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10108. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10当做好以上的基本配置之后,我们进⾏⼿势识别的开发:第⼀:要采集到原始的图⽚采集好原始图⽚后进⾏修正,包括尺⼨⼤⼩,那时我还使⽤到了matlab这个强⼤的⼯具,紧接着进⾏图像的样本特征提取,到⽹上把,CSDN中有⼤量的关于对图像特征训练库的识别与训练,按照他们⼀步⼀步的操作模式不会有问题的饿下⾯是要通过摄像头进⾏图像的采集,直接贴代码:[cpp]1. void HandGestureDialog::on_pushButton_OpenCamera_clicked()2. {3. cam = cvCreateCameraCapture(0);4. timer->start(time_intervals);5. frame = cvQueryFrame(cam);6.7. ui->pushButton_OpenCamera->setDisabled (true);8. ui->pushButton_CloseCamera->setEnabled (true);9. ui->pushButton_ShowPause->setEnabled (true);10. ui->pushButton_SnapImage->setEnabled (true);11. afterSkin = cvCreateImage (cvSize(frame->width,frame->height),IPL_DEPTH_8U,1);12. }[cpp]1. void HandGestureDialog::readFarme()2. {3. frame = cvQueryFrame(cam);4. QImage image((const uchar*)frame->imageData,5. frame->width,6. frame->height,7. QImage::Format_RGB888);8. image = image.rgbSwapped();9. image = image.scaled(320,240);10. ui->label_CameraShow->setPixmap(QPixmap::fromImage(image));11. gesture.SkinDetect (frame,afterSkin);12.13. /*next to opencv*/14.15. if(status_switch == Recongnise)16. {17. // Flips the frame into mirror image18. cvFlip(frame,frame,1);19.20. // Call the function to detect and draw the hand positions21. StartRecongizeHand(frame);22. }23. }查看⼀下样例图⽚:开始训练的核⼼代码:[cpp]1. void HandGestureDialog::on_pushButton_StartTrain_clicked()2. {3. QProgressDialog* process = new QProgressDialog(this);4. process->setWindowTitle ("Traning Model");5. process->setLabelText("Processing...");6. process->setModal(true);7. process->show ();8. gesture.setMainUIPointer (this);9. gesture.Train(process);10. QMessageBox::about (this,tr("完成"),tr("⼿势训练模型完成"));11. }[cpp]1. void CAIGesture::Train(QProgressDialog *pBar)//对指定训练⽂件夹⾥⾯的所有⼿势进⾏训练2. {3. QString curStr = QDir::currentPath ();4. QString fp1 = "InfoDoc/gestureFeatureFile.yml";5. fp1 = curStr + "/" + fp1;6. CvFileStorage *GestureFeature=cvOpenFileStorage(fp1.toStdString ().c_str (),0,CV_STORAGE_WRITE);7. FILE* fp;8. QString fp2 = "InfoDoc/gestureFile.txt";9. fp2 = curStr + "/" + fp2;10. fp=fopen(fp2.toStdString ().c_str (),"w");11. int FolderCount=0;12.13. /*获取当前的⽬录,然后得到当前的⼦⽬录*/14. QString trainStr = curStr;15. trainStr += "/TraningSample/";16. QDir trainDir(trainStr);17. GestureStruct gesture;18. QFileInfoList list = trainDir.entryInfoList();19.20. pBar->setRange(0,list.size ()-2);21.22.23. for(int i=2;i<list.size ();i++)24. {25. pBar->setValue(i-1);26.27. QFileInfo fileInfo = list.at (i);28. if(fileInfo.isDir () == true)29. {30. FolderCount++;31.32. QString tempStr = fileInfo.fileName ();33. fprintf(fp,"%s\n",tempStr.toStdString ().c_str ());34. gesture.angleName = tempStr.toStdString ()+"angleName";35. gesture.anglechaName = tempStr.toStdString ()+"anglechaName";36. gesture.countName = tempStr.toStdString ()+"anglecountName";37.38. tempStr = trainStr + tempStr + "/";39. QDir subDir(tempStr);40. OneGestureTrain(subDir,GestureFeature,gesture);41. }42. }43. pBar->autoClose ();44. delete pBar;45. pBar = NULL;46. fprintf(fp,"%s%d","Hand Gesture Number: ",FolderCount);47. fclose(fp);48. cvReleaseFileStorage(&GestureFeature);49. }[cpp]1. void CAIGesture::OneGestureTrain(QDir GestureDir,CvFileStorage *fs,GestureStruct gesture)//对单张图⽚进⾏训练2. {3. IplImage* TrainImage=0;4. IplImage* dst=0;5. CvSeq* contour=NULL;6. CvMemStorage* storage;7. storage = cvCreateMemStorage(0);8. CvPoint center=cvPoint(0,0);9. float radius=0.0;10. float angle[FeatureNum][10]={0},anglecha[FeatureNum][10]={0},anglesum[FeatureNum][10]={0},anglechasum[FeatureNum][10]={0};11. float count[FeatureNum]={0},countsum[FeatureNum]={0};12.13. int FileCount=0;14. /*读取该⽬录下的所有jpg⽂件*/15. QFileInfoList list = GestureDir.entryInfoList();16. QString currentDirPath = GestureDir.absolutePath ();17. currentDirPath += "/";18. for(int k=2;k<list.size ();k++)19. {20. QFileInfo tempInfo = list.at (k);21. if(tempInfo.isFile () == true)22. {23. QString fileNamePath = currentDirPath + tempInfo.fileName ();24. TrainImage=cvLoadImage(fileNamePath.toStdString ().c_str(),1);25. if(TrainImage==NULL)26. {27. cout << "can't load image" << endl;28. cvReleaseMemStorage(&storage);29. cvReleaseImage(&dst);30. cvReleaseImage(&TrainImage);31. return;32. }33. if(dst==NULL&&TrainImage!=NULL)34. dst=cvCreateImage(cvGetSize(TrainImage),8,1);35. SkinDetect(TrainImage,dst);36. FindBigContour(dst,contour,storage);37. cvZero(dst);38. cvDrawContours( dst, contour, CV_RGB(255,255,255),CV_RGB(255,255,255), -1, -1, 8 );39. ComputeCenter(contour,center,radius);40.41. GetFeature(dst,center,radius,angle,anglecha,count);42. for(int j=0;j<FeatureNum;j++)43. {44. countsum[j]+=count[j];45. for(int k=0;k<10;k++)46. {47. anglesum[j][k]+=angle[j][k];48. anglechasum[j][k]+=anglecha[j][k];49. }50. }51. FileCount++;52. cvReleaseImage(&TrainImage);53. }54. }55. for(int i=0;i<FeatureNum;i++)56. {57. gesture.count[i]=countsum[i]/FileCount;58. for(int j=0;j<10;j++)59. {60. gesture.angle[i][j]=anglesum[i][j]/FileCount;61. gesture.anglecha[i][j]=anglechasum[i][j]/FileCount;62. }63. }64. cvStartWriteStruct(fs,gesture.angleName.c_str (),CV_NODE_SEQ,NULL);//开始写⼊yml⽂件65.66. int i=0;67. for(i=0;i<FeatureNum;i++)68. cvWriteRawData(fs,&gesture.angle[i][0],10,"f");//写⼊肤⾊⾓度的值69.70. cvEndWriteStruct(fs);71. cvStartWriteStruct(fs,gesture.anglechaName.c_str (),CV_NODE_SEQ,NULL);72.73. for(i=0;i<FeatureNum;i++)74. cvWriteRawData(fs,&gesture.anglecha[i][0],10,"f");//写⼊⾮肤⾊⾓度的值75.76. cvEndWriteStruct(fs);77. cvStartWriteStruct(fs,gesture.countName.c_str (),CV_NODE_SEQ,NULL);78. cvWriteRawData(fs,&gesture.count[0],FeatureNum,"f");//写⼊肤⾊⾓度的个数79. cvEndWriteStruct(fs);80.81. cvReleaseMemStorage(&storage);82. cvReleaseImage(&dst);83. }[cpp]1. void CAIGesture::SkinDetect(IplImage* src,IplImage* dst)2. {3. IplImage* hsv = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 3);//use to split to HSV4. IplImage* tmpH1 = cvCreateImage( cvGetSize(src), IPL_DEPTH_8U, 1);//Use To Skin Detect5. IplImage* tmpS1 = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);6. IplImage* tmpH2 = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);7. IplImage* tmpS3 = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);8. IplImage* tmpH3 = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);9. IplImage* tmpS2 = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);10. IplImage* H = cvCreateImage( cvGetSize(src), IPL_DEPTH_8U, 1);11. IplImage* S = cvCreateImage( cvGetSize(src), IPL_DEPTH_8U, 1);12. IplImage* V = cvCreateImage( cvGetSize(src), IPL_DEPTH_8U, 1);13. IplImage* src_tmp1=cvCreateImage(cvGetSize(src),8,3);14.15. cvSmooth(src,src_tmp1,CV_GAUSSIAN,3,3); //Gaussian Blur16. cvCvtColor(src_tmp1, hsv, CV_BGR2HSV );//Color Space to Convert17. cvCvtPixToPlane(hsv,H,S,V,0);//To Split 3 channel18.19. /*********************Skin Detect**************/20. cvInRangeS(H,cvScalar(0.0,0.0,0,0),cvScalar(20.0,0.0,0,0),tmpH1);21. cvInRangeS(S,cvScalar(75.0,0.0,0,0),cvScalar(200.0,0.0,0,0),tmpS1);22. cvAnd(tmpH1,tmpS1,tmpH1,0);23.24. // Red Hue with Low Saturation25. // Hue 0 to 26 degree and Sat 20 to 9026. cvInRangeS(H,cvScalar(0.0,0.0,0,0),cvScalar(13.0,0.0,0,0),tmpH2);27. cvInRangeS(S,cvScalar(20.0,0.0,0,0),cvScalar(90.0,0.0,0,0),tmpS2);28. cvAnd(tmpH2,tmpS2,tmpH2,0);29.30. // Red Hue to Pink with Low Saturation31. // Hue 340 to 360 degree and Sat 15 to 9032. cvInRangeS(H,cvScalar(170.0,0.0,0,0),cvScalar(180.0,0.0,0,0),tmpH3);33. cvInRangeS(S,cvScalar(15.0,0.0,0,0),cvScalar(90.,0.0,0,0),tmpS3);34. cvAnd(tmpH3,tmpS3,tmpH3,0);35.36. // Combine the Hue and Sat detections37. cvOr(tmpH3,tmpH2,tmpH2,0);38. cvOr(tmpH1,tmpH2,tmpH1,0);39.40. cvCopy(tmpH1,dst);41.42. cvReleaseImage(&hsv);43. cvReleaseImage(&tmpH1);44. cvReleaseImage(&tmpS1);45. cvReleaseImage(&tmpH2);46. cvReleaseImage(&tmpS2);47. cvReleaseImage(&tmpH3);48. cvReleaseImage(&tmpS3);49. cvReleaseImage(&H);50. cvReleaseImage(&S);51. cvReleaseImage(&V);52. cvReleaseImage(&src_tmp1);53. }[cpp]1. //To Find The biggest Countour2. void CAIGesture::FindBigContour(IplImage* src,CvSeq* (&contour),CvMemStorage* storage)3. {4. CvSeq* contour_tmp,*contourPos;5. int contourcount=cvFindContours(src, storage, &contour_tmp, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE );6. if(contourcount==0)7. return;8. CvRect bndRect = cvRect(0,0,0,0);9. double contourArea,maxcontArea=0;10. for( ; contour_tmp != 0; contour_tmp = contour_tmp->h_next )11. {12. bndRect = cvBoundingRect( contour_tmp, 0 );13. contourArea=bndRect.width*bndRect.height;14. if(contourArea>=maxcontArea)//find Biggest Countour15. {16. maxcontArea=contourArea;17. contourPos=contour_tmp;18. }19. }20. contour=contourPos;21. }[cpp]1. //Calculate The Center2. void CAIGesture::ComputeCenter(CvSeq* (&contour),CvPoint& center,float& radius)3. {4. CvMoments m;5. double M00,X,Y;6. cvMoments(contour,&m,0);7. M00=cvGetSpatialMoment(&m,0,0);8. X=cvGetSpatialMoment(&m,1,0)/M00;9. Y=cvGetSpatialMoment(&m,0,1)/M00;10.11. center.x=(int)X;12. center.y=(int)Y;13.14. /*******************tO find radius**********************/15. int hullcount;16. CvSeq* hull;17. CvPoint pt;18. double tmpr1,r=0;19. hull=cvConvexHull2(contour,0,CV_COUNTER_CLOCKWISE,0);20. hullcount=hull->total;21. for(int i=1;i<hullcount;i++)22. {23. pt=**CV_GET_SEQ_ELEM(CvPoint*,hull,i);//get each point24. tmpr1=sqrt((double)((center.x-pt.x)*(center.x-pt.x))+(double)((center.y-pt.y)*(center.y-pt.y)));//计算与中⼼点的⼤⼩25. if(tmpr1>r)//as the max radius26. r=tmpr1;27. }28. radius=r;29. }[cpp]1. void CAIGesture::GetFeature(IplImage* src,CvPoint& center,float radius,2. float angle[FeatureNum][10],3. float anglecha[FeatureNum][10],4. float count[FeatureNum])5. {6. int width=src->width;7. int height=src->height;8. int step=src->widthStep/sizeof(uchar);9. uchar* data=(uchar*)src->imageData;10.11. float R=0.0;12. int a1,b1,x1,y1,a2,b2,x2,y2;//the distance of the center to other point13. float angle1_tmp[200]={0},angle2_tmp[200]={0},angle1[50]={0},angle2[50]={0};//temp instance to calculate angule14. int angle1_tmp_count=0,angle2_tmp_count=0,angle1count=0,angle2count=0,anglecount=0;15.16. for(int i=0;i<FeatureNum;i++)//分FeatureNum层进⾏特征提取(也就是5层)分析17. {18. R=(i+4)*radius/9;19. for(int j=0;j<=3600;j++)20. {21. if(j<=900)22. {23. a1=(int)(R*sin(j*3.14/1800));//这个要⾃⼰实际画⼀张图就明⽩了24. b1=(int)(R*cos(j*3.14/1800));25. x1=center.x-b1;26. y1=center.y-a1;27. a2=(int)(R*sin((j+1)*3.14/1800));28. b2=(int)(R*cos((j+1)*3.14/1800));29. x2=center.x-b2;30. y2=center.y-a2;31. }32. else33. {34. if(j>900&&j<=1800)35. {36. a1=(int)(R*sin((j-900)*3.14/1800));37. b1=(int)(R*cos((j-900)*3.14/1800));38. x1=center.x+a1;39. y1=center.y-b1;40. a2=(int)(R*sin((j+1-900)*3.14/1800));41. b2=(int)(R*cos((j+1-900)*3.14/1800));42. x2=center.x+a2;43. y2=center.y-b2;44. }45. else46. {47. if(j>1800&&j<2700)48. {49. a1=(int)(R*sin((j-1800)*3.14/1800));50. b1=(int)(R*cos((j-1800)*3.14/1800));51. x1=center.x+b1;52. y1=center.y+a1;53. a2=(int)(R*sin((j+1-1800)*3.14/1800));54. b2=(int)(R*cos((j+1-1800)*3.14/1800));55. x2=center.x+b2;56. y2=center.y+a2;57. }58. else59. {60. a1=(int)(R*sin((j-2700)*3.14/1800));61. b1=(int)(R*cos((j-2700)*3.14/1800));62. x1=center.x-a1;63. y1=center.y+b1;64. a2=(int)(R*sin((j+1-2700)*3.14/1800));65. b2=(int)(R*cos((j+1-2700)*3.14/1800));66. x2=center.x-a2;67. y2=center.y+b2;68. }69. }70. }71.72. if(x1>0&&x1<width&&x2>0&&x2<width&&y1>0&&y1<height&&y2>0&&y2<height)73. {74. if((int)data[y1*step+x1]==255&&(int)data[y2*step+x2]==0)75. {76. angle1_tmp[angle1_tmp_count]=(float)(j*0.1);//从肤⾊到⾮肤⾊的⾓度77. angle1_tmp_count++;78. }79. else if((int)data[y1*step+x1]==0&&(int)data[y2*step+x2]==255)80. {81. angle2_tmp[angle2_tmp_count]=(float)(j*0.1);//从⾮肤⾊到肤⾊的⾓度82. angle2_tmp_count++;83. }84. }85. }86. int j=0;87. for(j=0;j<angle1_tmp_count;j++)88. {89. if(angle1_tmp[j]-angle1_tmp[j-1]<0.2)//忽略太⼩的⾓度90. continue;91. angle1[angle1count]=angle1_tmp[j];92. angle1count++;93. }94.95. for(j=0;j<angle2_tmp_count;j++)96. {97. if(angle2_tmp[j]-angle2_tmp[j-1]<0.2)98. continue;99. angle2[angle2count]=angle2_tmp[j];100. angle2count++;101. }102.103. for(j=0;j<max(angle1count,angle2count);j++)104. {105. if(angle1[0]>angle2[0])106. {107. if(angle1[j]-angle2[j]<7)//忽略⼩于7度的⾓度,因为⼈的⼿指⼀般都⼤于这个值108. continue;109. angle[i][anglecount]=(float)((angle1[j]-angle2[j])*0.01);//肤⾊的⾓度110. anglecha[i][anglecount]=(float)((angle2[j+1]-angle1[j])*0.01);//⾮肤⾊的⾓度,例如⼿指间的⾓度111. anglecount++;112. }113. else114. {115. if(angle1[j+1]-angle2[j]<7)116. continue;117. anglecount++;118. angle[i][anglecount]=(float)((angle1[j+1]-angle2[j])*0.01);119. anglecha[i][anglecount]=(float)((angle2[j]-angle1[j])*0.01);120. }121. }122.123. if(angle1[0]<angle2[0])124. angle[i][0]=(float)((angle1[0]+360-angle2[angle2count-1])*0.01);125. else126. anglecha[i][0]=(float)((angle2[0]+360-angle1[angle1count-1])*0.01);127.128. count[i]=(float)anglecount;129. angle1_tmp_count=0,angle2_tmp_count=0,angle1count=0,angle2count=0,anglecount=0;130. for(j=0;j<200;j++)131. {132. angle1_tmp[j]=0;133. angle2_tmp[j]=0;134. }135. for(j=0;j<50;j++)136. {137. angle1[j]=0;138. angle2[j]=0;139. }140. }141. }基本上对于⾃⼰使⽤代码创建的训练库的特征提取函数和基本的肤⾊检测和连通域的检测的函数的核⼼代码都已经贴到上⾯去了。
基于OpenCV的运动目标检测与跟踪

基于OpenCV的运动目标检测与跟踪基于OpenCV的运动目标检测与跟踪摘要:运动目标检测与跟踪在计算机视觉和图像处理领域中具有重要的应用价值。
它可以应用于视频监控、自动驾驶、行人识别等多个领域。
本文将介绍如何使用OpenCV库实现运动目标的检测与跟踪,并通过实例演示其应用。
其中包括运动物体检测、运动轨迹跟踪和背景建模等关键技术。
通过对运动目标的检测和跟踪,可以提供实时的监控和追踪能力,为各种应用场景提供技术支持。
1. 引言运动目标检测与跟踪是计算机视觉领域的一个重要研究方向,它的核心任务是从图像序列中提取有意义的运动目标,并对其进行跟踪和分析。
运动目标检测与跟踪在实际应用中有着广泛的需求和应用场景。
例如,在视频监控系统中,可以通过运动目标的检测和跟踪来提供实时的监控和报警能力。
在自动驾驶系统中,可以通过识别和跟踪其他车辆和行人来实现智能的行车决策。
因此,研究和实现高效准确的运动目标检测与跟踪技术对于提升计算机视觉系统的性能和可靠性具有重要意义。
2. 基于OpenCV的运动目标检测与跟踪方法2.1 运动物体检测运动物体检测是运动目标检测与跟踪的第一步,其目标是从图像序列中分离出具有运动的物体。
在OpenCV中,可以使用背景差分法实现运动物体的检测。
背景差分法基于假设每一帧图像中静止部分为背景,通过对当前帧图像与历史帧图像之间的差异进行比较,提取出具有运动的前景物体。
这种方法简单有效,在实际应用中具有广泛的应用场景。
2.2 运动轨迹跟踪运动轨迹跟踪是对运动目标进行持续追踪的技术,其目标是实时获取目标物体在图像序列中的位置和运动情况。
在OpenCV中,可以使用卡尔曼滤波器实现运动轨迹的跟踪。
卡尔曼滤波器是一种能够根据过去的位置和速度信息来预测当前物体位置的滤波器。
通过不断更新目标物体的位置和速度信息,可以实现准确的运动轨迹跟踪。
2.3 背景建模背景建模是用于建立背景模型的方法,用于对比和识别运动目标。
《2024年基于OpenCV的运动目标检测与跟踪》范文

《基于OpenCV的运动目标检测与跟踪》篇一一、引言运动目标检测与跟踪作为计算机视觉的重要研究领域,其广泛应用于视频监控、智能交通、人机交互等多个领域。
随着计算机视觉技术的不断发展,基于OpenCV的运动目标检测与跟踪技术因其高效、准确的特点,逐渐成为研究热点。
本文旨在介绍基于OpenCV的运动目标检测与跟踪方法,分析其原理、应用及优化方法,以提高目标检测与跟踪的准确性和实时性。
二、OpenCV简介OpenCV(Open Source Computer Vision Library)是一个开源的计算机视觉和机器学习软件库,包含了大量用于图像处理和计算机视觉的算法。
OpenCV提供了丰富的API接口,方便开发者快速实现各种计算机视觉算法。
在运动目标检测与跟踪方面,OpenCV提供了多种方法,如背景减除法、光流法、特征匹配法等。
三、运动目标检测运动目标检测是从视频序列中提取出运动目标的过程。
基于OpenCV的运动目标检测方法主要包括背景减除法和帧间差分法。
1. 背景减除法:通过将当前帧与背景帧进行差分,得到前景目标。
该方法可以有效地提取出运动目标,但对背景的更新和模型的适应性要求较高。
OpenCV提供了多种背景减除算法,如MOG2、KNN等。
2. 帧间差分法:通过比较相邻两帧的差异来检测运动目标。
该方法对光照变化和背景干扰具有一定的鲁棒性,但可能会产生“鬼影”现象。
四、运动目标跟踪运动目标跟踪是在检测出运动目标的基础上,对目标进行持续跟踪的过程。
基于OpenCV的运动目标跟踪方法主要包括特征匹配法和光流法。
1. 特征匹配法:通过提取目标的特征,在后续帧中寻找与该特征相似的区域来实现跟踪。
该方法对目标的形变和部分遮挡具有一定的鲁棒性,但当目标与周围环境相似时,容易产生误匹配。
2. 光流法:利用光流信息来实现目标的跟踪。
光流表示了图像中像素点的运动信息,通过计算相邻帧的光流场,可以估计出目标的运动轨迹。
C++基于OpenCV实现手势识别的源码

C++基于OpenCV实现⼿势识别的源码先给⼤家上效果图:源码在下⾯使⽤ RGB 值分割⼿部区域,即⼿部的 GB 值将与背景不同或者使⽤边缘检测或者背景减法。
我这⾥使⽤了背景减法模型。
OpenCV为我们提供了不同的背景减法模型,codebook 它的作⽤是对某些帧进⾏⼀段时间的精确校准。
其中对于它获取的所有图像;它计算每个像素的平均值和偏差,并相应地指定框。
在前景中它就像⼀个⿊⽩图像,只有⼿是⽩⾊的⽤ Convex Hull 来找到指尖。
Convex hull 基本上是包围⼿部区域的凸集。
包围⼿的红线是凸包。
基本上它是⼀个凸起;如果我们在红⾊区域内取任意两点并将它们连接起来形成⼀条线,那么这条线就完全位于集合内。
黄点是缺陷点,会有很多这样的缺陷点,即每个⾕都有⼀个缺陷点。
现在根据缺陷点的数量,我们可以计算展开的⼿指数量。
⼤概就是⼿部区域提取是使⽤背景减法完成的。
对于尖端点,深度点凸度缺陷。
提取轮廓和检测凸点的主要代码在函数中⽆效检测(IplImage* img_8uc1,IplImage* img_8uc3);将相机放在稳定的背景前;运⾏代码,等待⼀段时间。
校准完成后。
你会看到显⽰⼀些⼲扰的连接组件图像。
把你的⼿放在相机视图中。
没什么好说的直接看代码会⽐较容易理解核⼼代码int main(int argc, char** argv){const char* filename = 0;IplImage* rawImage = 0, *yuvImage = 0;IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0;CvCapture* capture = 0;int c, n, nframes = 0;int nframesToLearnBG = 300;model = cvCreateBGCodeBookModel();model->modMin[0] = 3;model->modMin[1] = model->modMin[2] = 3;model->modMax[0] = 10;model->modMax[1] = model->modMax[2] = 10;model->cbBounds[0] = model->cbBounds[1] = model->cbBounds[2] = 10;bool pause = false;bool singlestep = false;for( n = 1; n < argc; n++ ){static const char* nframesOpt = "--nframes=";if( strncmp(argv[n], nframesOpt, strlen(nframesOpt))==0 ){if( sscanf(argv[n] + strlen(nframesOpt), "%d", &nframesToLearnBG) == 0 ){help();return -1;}}elsefilename = argv[n];}if( !filename ){printf("Capture from camera\n");capture = cvCaptureFromCAM( 0 );}else{printf("Capture from file %s\n",filename);capture = cvCreateFileCapture( filename );}if( !capture ){printf( "Can not initialize video capturing\n\n" );help();return -1;}for(;;){if( !pause ){rawImage = cvQueryFrame( capture );++nframes;if(!rawImage)break;}if( singlestep )pause = true;if( nframes == 1 && rawImage ){// CODEBOOK METHOD ALLOCATIONyuvImage = cvCloneImage(rawImage);ImaskCodeBook = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); ImaskCodeBookCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); cvSet(ImaskCodeBook,cvScalar(255));cvNamedWindow( "Raw", 1 );cvNamedWindow( "ForegroundCodeBook",1);cvNamedWindow( "CodeBook_ConnectComp",1);}if( rawImage ){cvCvtColor( rawImage, yuvImage, CV_BGR2YCrCb );if( !pause && nframes-1 < nframesToLearnBG )cvBGCodeBookUpdate( model, yuvImage );if( nframes-1 == nframesToLearnBG )cvBGCodeBookClearStale( model, model->t/2 );if( nframes-1 >= nframesToLearnBG ){cvBGCodeBookDiff( model, yuvImage, ImaskCodeBook );centers if desiredcvCopy(ImaskCodeBook,ImaskCodeBookCC);cvSegmentFGMask( ImaskCodeBookCC );cvShowImage( "CodeBook_ConnectComp",ImaskCodeBookCC);detect(ImaskCodeBookCC,rawImage);}cvShowImage( "Raw", rawImage );cvShowImage( "ForegroundCodeBook",ImaskCodeBook);}c = cvWaitKey(10)&0xFF;c = tolower(c);if(c == 27 || c == 'q')break;switch( c ){case 'h':help();break;case 'p':pause = !pause;break;case 's':singlestep = !singlestep;pause = false;break;case 'r':pause = false;singlestep = false;break;case ' ':cvBGCodeBookClearStale( model, 0 );nframes = 0;break;case 'y': case '0':case 'u': case '1':case 'v': case '2':case 'a': case '3':case 'b':ch[0] = c == 'y' || c == '0' || c == 'a' || c == '3';ch[1] = c == 'u' || c == '1' || c == 'a' || c == '3' || c == 'b';ch[2] = c == 'v' || c == '2' || c == 'a' || c == '3' || c == 'b';printf("CodeBook YUV Channels active: %d, %d, %d\n", ch[0], ch[1], ch[2] );break;case 'i':case 'o':case 'k':case 'l':{uchar* ptr = c == 'i' || c == 'o' ? model->modMax : model->modMin;for(n=0; n<NCHANNELS; n++){if( ch[n] ){int v = ptr[n] + (c == 'i' || c == 'l' ? 1 : -1);ptr[n] = CV_CAST_8U(v);}printf("%d,", ptr[n]);}printf(" CodeBook %s Side\n", c == 'i' || c == 'o' ? "High" : "Low" );}break;}}cvReleaseCapture( &capture );cvDestroyWindow( "Raw" );cvDestroyWindow( "ForegroundCodeBook");cvDestroyWindow( "CodeBook_ConnectComp");return 0;}要直接跑代码调试的,可以直接去下载到此这篇关于C++基于OpenCV实现⼿势识别的源码的⽂章就介绍到这了,更多相关OpenCV⼿势识别内容请搜索以前的⽂章或继续浏览下⾯的相关⽂章希望⼤家以后多多⽀持!。
OpenCV+python手势识别框架和实例讲解

OpenCV+python⼿势识别框架和实例讲解基于OpenCV2.4.8和 python 2.7实现简单的⼿势识别。
以下为基本步骤1.去除背景,提取⼿的轮廓2. RGB->YUV,同时计算直⽅图3.进⾏形态学滤波,提取感兴趣的区域4.找到⼆值化的图像轮廓5.找到最⼤的⼿型轮廓6.找到⼿型轮廓的凸包7.标记⼿指和⼿掌8.把提取的特征点和⼿势字典中的进⾏⽐对,然后判断⼿势和形状提取⼿的轮廓 cv2.findContours()找到最⼤凸包cv2.convexHull(),然后找到⼿掌和⼿指的相对位置,定位⼿型的轮廓和关键点,包括⼿掌的中⼼,⼿指的相对位置特征字典主要包括以下⼏个⽅⾯:名字,⼿掌中⼼点,⼿掌的直径,⼿指的坐标点,⼿指的个数,每个⼿指之间的⾓度例如:# BEGIN ------------------------------------#V=gesture("V")V.set_palm((475,225),45)V.set_finger_pos([(490,90),(415,105)])V.calc_angles()dict[V.getname()]=V# END --------------------------------------#最终的识别结果如下:⽰例代码frame=hand_threshold(fg_frame,hand_histogram)contour_frame=np.copy(frame)contours,hierarchy=cv2.findContours(contour_frame,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)found,hand_contour=hand_contour_find(contours)if(found):hand_convex_hull=cv2.convexHull(hand_contour)frame,hand_center,hand_radius,hand_size_score=mark_hand_center(frame_original,hand_contour)if(hand_size_score):frame,finger,palm=mark_fingers(frame,hand_convex_hull,hand_center,hand_radius)frame,gesture_found=find_gesture(frame,finger,palm)else:frame=frame_original以上这篇OpenCV+python⼿势识别框架和实例讲解就是⼩编分享给⼤家的全部内容了,希望能给⼤家⼀个参考,也希望⼤家多多⽀持。
OpenCv手势识别

应用OpenCv实现手势识别第一部分环境搭建与算法设计一、环境搭建:1.分别安装Visual Studio 2008和opencv-2.1.0-win32-vs2008,安装OpenCV的过程中注意选择添加环境变量,把“add it to your Current User PATH “前的复选框勾上 .2. 包含相关的库文件、头文件和源文件,过程如下:工具->选项->项目和解决方案->vc++目录,“显示一下内容的目录”下拉列表中选择“包含文件”,添加条目“D:\Program Files\OpenCV2.1\include\opencv”;“显示一下内容的目录”下拉列表中选择“库文件” ,添加条目“D:\Program Files\OpenCV2.1\lib”;"显示一下内容的目录"下拉列表选择“源文件”,添加条目”D:\Program Files\OpenCV2.1\src\cv” ,”D:\Program Files\OpenCV2.1\src\cvaux” ,”D:\Program Files\OpenCV2.1\src\cxcore” ,”D:\Program Files\OpenCV2.1\src\highgui”。
点击“确定”。
3.建立工程与配置工程:新建工程(或者叫解决方案),在解决方案资源管理器中右键点击项目名称opencvhello,选择“属性”,在“配置(C)”下拉列表中选择Debug, 然后“配置属性”->“链接器”->“输入”->附加依赖项,添加cxcore210d.libcv210d.libhighgui210d.lib在“配置(C)”下拉列表中选择Release, 然后“配置属性”->“链接器”->“输入”->附加依赖项,添加 cxcore210d.libcv210d.libhighgui210d.lib中间如果提示要保存的话,就保存。
手部位置跟踪opencv程序

* HandVu - a library for computer vision-based hand gesture* recognition.* Copyright (C) 2004 Mathias Kolsch, matz@** This program is free software; you can redistribute it and/or* modify it under the terms of the GNU General Public License* as published by the Free Software Foundation; either version 2* of the License, or (at your option) any later version.** This program is distributed in the hope that it will be useful,* but WITHOUT ANY WARRANTY; without even the implied warranty of* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the* GNU General Public License for more details.** You should have received a copy of the GNU General Public License* along with this program; if not, write to the Free Software* Foundation, Inc., 59 Temple Place - Suite 330,* Boston, MA 02111-1307, USA.** $Id: hv_OpenCV.cpp,v 1.15 2006/01/03 21:44:15 matz Exp $**/#ifdef WIN32#include<windows.h>#endif#include<stdio.h>#include<cv.h>#include<highgui.h>#include<ctype.h>#include<time.h>#include"HandVu.h"IplImage *capture_image = 0;IplImage *display_image = 0;bool async_processing = false;int num_async_bufs = 30;IplImage *m_async_image = 0;int m_async_bufID = -1;bool sync_display = true;CvPoint origin;int select_object = 0;int sel_area_left=0, sel_area_top=0, sel_area_right=0, sel_area_bottom=0; bool correct_distortion = false;void OnMouse( int event, int x, int y, int/*flags*/, void* /*params*/ ) {if( !capture_image )return;if( capture_image->origin )y = capture_image->height - y;if( select_object ){sel_area_left = MIN(x,origin.x);sel_area_top = MIN(y,origin.y);sel_area_right = sel_area_left + CV_IABS(x - origin.x);sel_area_bottom = sel_area_top + CV_IABS(y - origin.y);sel_area_left = MAX( sel_area_left, 0 );sel_area_top = MAX( sel_area_top, 0 );sel_area_right = MIN( sel_area_right, capture_image->width );sel_area_bottom = MIN( sel_area_bottom, capture_image->height );if( sel_area_right-sel_area_left > 0 && sel_area_bottom-sel_area_top> 0 ) hvSetDetectionArea(sel_area_left, sel_area_top,sel_area_right, sel_area_bottom);}switch( event ){case CV_EVENT_LBUTTONDOWN:origin = cvPoint(x,y);sel_area_left = sel_area_right = x;sel_area_top = sel_area_bottom = y;select_object = 1;break;case CV_EVENT_LBUTTONUP:select_object = 0;break;}}void showFrame(IplImage* img, hvAction action){if (action==HV_DROP_FRAME) {// HandVu recommends dropping the frame entirely// printf("HandVuFilter: dropping frame\n");return;} else if (action==HV_SKIP_FRAME) {// HandVu recommends displaying the frame, but not doing any further// processing on it - keep going// printf("HandVuFilter: supposed to skip frame\n");} else if (action==HV_PROCESS_FRAME) {// full processing was done and is recommended for following steps;// keep going//printf("HandVuFilter: processed frame\n");} else {assert(0); // unknown action}hvState state;hvGetState(0, state);cvShowImage( "HandVu", img );}void displayCallback(IplImage* img, hvAction action){if (sync_display) {cvCopy(img, display_image);} else {showFrame(img, action);}}int main( int argc, char** argv ){CvCapture* capture = 0;if (argc<2) {printf("you need to specify a conductor file as first argument\n");printf("for example: ../config/default.conductor\n");return -1;}string conductor_fname(argv[1]);printf("will load conductor from file:\n%s\n", conductor_fname.c_str());if( argc == 2 || argc == 3) {int num = 0;if (argc==3) {num = atoi(argv[2]);}capture = cvCaptureFromCAM( num );if (!capture) {capture = cvCaptureFromAVI( argv[2] );}}if( !capture ){fprintf(stderr,"Could not initialize capturing through OpenCV.\n");return -1;}printf( "Hot keys: \n""\tESC - quit the program\n""\tr - restart the tracking\n""\t0-3 - set the overlay (verbosity) level\n""use the mouse to select the initial detection area\n" );int p = 0; // according to docs, these calls don't work in OpenCV beta 4 yet p = cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 640);p = cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 480);capture_image = cvQueryFrame( capture );if ( !capture_image ) {fprintf(stderr,"Could not retrieve image through OpenCV.\n");return -1;}/* allocate all the buffers */CvSize size = cvGetSize(capture_image);hvInitialize(size.width, size.height);hvLoadConductor(conductor_fname);hvStartRecognition();hvSetOverlayLevel(2);if (async_processing) {hvAsyncSetup(num_async_bufs, displayCallback);if (sync_display) display_image = cvCloneImage(capture_image);}cvSetMouseCallback( "HandVu", OnMouse );int success = cvNamedWindow( "HandVu", 1 );if (success!=1) {printf("can't open window - did you compile OpenCV with highgui support?");return -1;}fprintf(stderr, "initialized highgui\n");for (;;) {int c;if (async_processing) {// asynchronous processing in HandVuif (sync_display) cvShowImage("HandVu", display_image);// ------- main library call ---------hvAsyncGetImageBuffer(&m_async_image, &m_async_bufID);cvCopy(capture_image, m_async_image);hvAsyncProcessFrame(m_async_bufID);// -------} else {// synchronous processing in HandVu// ------- main library call ---------hvAction action = HV_INVALID_ACTION;action = hvProcessFrame(capture_image);// -------showFrame(capture_image, action);}c = cvWaitKey(10);if( c == 27 || c == 'q' )break;switch( c ){case'r':hvStopRecognition();hvStartRecognition();break;case'0':hvSetOverlayLevel(0);break;case'1':hvSetOverlayLevel(1);break;case'2':hvSetOverlayLevel(2);break;case'3':hvSetOverlayLevel(3);break;case'u':if (hvCanCorrectDistortion()) {correct_distortion = !correct_distortion;hvCorrectDistortion(correct_distortion);}break;default:;}// capture next imagecapture_image = cvQueryFrame( capture );if ( !capture_image ) {fprintf(stderr,"Could not retrieve image through OpenCV.\n");break;}}cvReleaseCapture( &capture );cvDestroyWindow("HandVu");return 0;}。
基于Opencv的运动目标的检测和跟踪

- 1、下载文档前请自行甄别文档内容的完整性,平台不提供额外的编辑、内容补充、找答案等附加服务。
- 2、"仅部分预览"的文档,不可在线预览部分如存在完整性等问题,可反馈申请退款(可完整预览的文档不适用该条件!)。
- 3、如文档侵犯您的权益,请联系客服反馈,我们会尽快为您处理(人工客服工作时间:9:00-18:30)。
1.#include <Windows.h>2.3.#include <cv.h>4.#include <cxcore.h>5.#include <highgui.h>6.7.#include <fstream>8.#include <iostream>9.#include <iomanip>10.#include <algorithm>ing namespace std;12.13.#pragma comment(lib,"cv210.lib")14.#pragma comment(lib,"cxcore210.lib")15.#pragma comment(lib,"highgui210.lib")16.17.void ErrorHandler(char* message)18.{19. cout<<message<<endl;20. exit(0);21.}22.#undef UNICODE23.24.void fingerTip(char* imgname);25.26.int main()27.{28. WIN32_FIND_DATA FileData;29. HANDLE hSearch;30. BOOL fFinished = FALSE;31.32. if(!SetCurrentDirectory("images")){33. cout<<"failed to change work directory"<<endl;34. exit(0);35. }36.37. hSearch = FindFirstFile("*.bmp", &FileData);38. if (hSearch == INVALID_HANDLE_VALUE){39. ErrorHandler("No .bmp files found.");40. }41. while (!fFinished){42. fingerTip(FileData.cFileName);43. if (!FindNextFile(hSearch, &FileData)){44. if (GetLastError() ==ERROR_NO_MORE_FILES){45. fFinished = TRUE;46. } else {47. ErrorHandler("Couldn't find next file.");48. }49. }50. cvWaitKey(0);51. }52.53. // Close the search handle.54. if (!FindClose(hSearch)){55. ErrorHandler("Couldn't close search handle.");56. }57.58. return 0;59.}60.61.void fingerTip(char* imgname)62.{63. IplImage* pImgColor=NULL;64. IplImage* pImgGray=NULL;65. IplImage* pImgContourAll=NULL;66. IplImage* pImgContourAppr=NULL;67. IplImage* pImgHull=NULL;68. IplImage* pImgDefects=NULL;69. pImgColor=cvLoadImage(imgname,CV_LOAD_IMAGE_COLOR);70. if (!pImgColor){71. cout<<"failed to load image"<<endl;72. exit(0);73. }74.75. pImgGray=cvCreateImage(cvGetSize(pImgColor),8,1);76. cvCvtColor(pImgColor,pImgGray,CV_RGB2GRAY);77. pImgContourAppr=cvCreateImage(cvGetSize(pImgGray),8,3);78. pImgContourAll=cvCreateImage(cvGetSize(pImgGray),8,3);79. pImgHull=cvCreateImage(cvGetSize(pImgGray),8,3);80. pImgDefects=cvCreateImage(cvGetSize(pImgGray),8,3);81. cvZero(pImgContourAppr);82. cvZero(pImgContourAll);83. cvZero(pImgHull);84. cvZero(pImgDefects);85.86. //canny87. CvMemStorage* storage=cvCreateMemStorage();88. CvSeq* contourSeqAll=cvCreateSeq(0,sizeof(CvSeq),sizeof(CvPoint),storage);89. cvCanny(pImgGray,pImgGray,10,30,5);90. cvFindContours(pImgGray,storage,&contourSeqAll,sizeof(CvContour),CV_RETR_LIST,CV_LINK_RUNS);91. //original contours92. CvSeq* tseq=contourSeqAll;93. for (;contourSeqAll;contourSeqAll=contourSeqAll->h_next){94. cvDrawContours(pImgContourAll,contourSeqAll,cvScalar(255,0,0),cvScalar(0,0,255),0,2);95. }96. contourSeqAll=tseq;97.98. CvMemStorage* storageAppr=cvCreateMemStorage();99. CvSeq* contourAppr=cvCreateSeq(0,sizeof(CvSeq),sizeof(CvPoint),storageAppr);100. contourAppr=cvApproxPoly(contourSeqAll,sizeof(CvContour),storageAppr,CV_POLY_APPROX_ DP,5,1);101. //approximated contours102. tseq=contourAppr;103. for (;contourAppr;contourAppr=contourAppr->h_next){104. cvDrawContours(pImgContourAppr,contourAppr,cvScalar(255,0,0),cvScalar(0,0,255 ),0,2);105. }106. contourAppr=tseq;107.108. //print contours109. /*cout<<"contours:"<<endl;110. for (int i=0;i<contourAppr->total;i++){111. CvPoint* p=(CvPoint*)CV_GET_SEQ_ELEM(CvPoint,contourAppr,i);112. cout<<p->x<<","<<p->y<<endl;113. cvCircle(pImgHull,*p,3,cvScalar(0,255,255));114. cvShowImage("hull",pImgHull);115. cvWaitKey(0);116. }*/117.118.119. ////convex hull120. CvSeq* hull=cvConvexHull2(contourAppr);121. //convexity defects122. CvSeq* defectSeq=cvConvexityDefects(contourAppr,hull);123. //rearrange the detectSeq in linked sequence124.125. for (int i=0;i<defectSeq->total;i++){126. CvConvexityDefect*dp=(CvConvexityDefect*)CV_GET_SEQ_ELEM(CvConvexityDefect,defectSeq,i);127.128. cvLine(pImgDefects,*(dp->start),*(dp->end),cvScalar(0,0,255));129. cvLine(pImgDefects,*(dp->start),*(dp->depth_point),cvScalar(0x00,0x99,0xff)); 130. cvLine(pImgDefects,*(dp->depth_point),*(dp->end),cvScalar(0xff,0x99,0x00)); 131. cvCircle(pImgDefects,*(dp->depth_point),2,cvScalar(0xff,0x99,0x00));132. cout<<i<<" :("<<dp->start->x<<","<<dp->start->y<<")"<<endl; 133. }134.135.136. cvShowImage("original",pImgColor);137. cvShowImage("canny",pImgGray);138. cvShowImage("contour all",pImgContourAll);139. cvShowImage("contour appr",pImgContourAppr);140. cvShowImage("ConvexityDefects",pImgDefects);141. //cvShowImage("hull",pImgHull);142.143.144. cvWaitKey(0);145.146. cvDestroyAllWindows();147.148. cvReleaseImage(&pImgColor);149. cvReleaseImage(&pImgGray);150. cvReleaseImage(&pImgContourAll);151. cvReleaseImage(&pImgContourAppr);152. cvReleaseImage(&pImgDefects);153. cvReleaseImage(&pImgHull);154.155. cvReleaseMemStorage(&storage); 156. cvReleaseMemStorage(&storageAppr);。