OpenCV下肤色检测代码

合集下载

基于肤色的人脸检测matlab代码

基于肤色的人脸检测matlab代码

基于肤色的人脸检测matlab代码mainclose allclear allclc% 输入图像名字img_name = input('请输入图像名字(图像必须为RGB图像,输入0结束):','s'); % 当输入0时结束while ~strcmp(img_name,'0')% 进行人脸识别facedetection(img_name);img_name = input('请输入图像名字(图像必须为RGB图像,输入0结束):','s'); endfacedetectionfunctionfacedetection(img_name)% 读取RGB图像I = imread(img_name);% 转换为灰度图像gray = rgb2gray(I);% 将图像转化为YCbCr颜色空间YCbCr = rgb2ycbcr(I);% 获得图像宽度和高度heigth = size(gray,1);width = size(gray,2);% 根据肤色模型将图像二值化fori = 1:heigthfor j = 1:widthY = YCbCr(i,j,1);Cb = YCbCr(i,j,2);Cr = YCbCr(i,j,3);if(Y < 80)gray(i,j) = 0;elseif(skin(Y,Cb,Cr) == 1)gray(i,j) = 255;elsegray(i,j) = 0;endendendend% 二值图像形态学处理SE=strel('arbitrary',eye(5));%gray = bwmorph(gray,'erode');% imopen先腐蚀再膨胀gray = imopen(gray,SE);% imclose先膨胀再腐蚀%gray = imclose(gray,SE);imshow(gray);% 取出图片中所有包含白色区域的最小矩形[L,num] = bwlabel(gray,8);STATS = regionprops(L,'BoundingBox'); % 存放经过筛选以后得到的所有矩形块n = 1;result = zeros(n,4);figure,imshow(I);hold on;fori = 1:numbox = STATS(i).BoundingBox;x = box(1); %矩形坐标xy = box(2); %矩形坐标yw = box(3); %矩形宽度wh = box(4); %矩形高度h% 宽度和高度的比例ratio = h/w;ux = uint8(x);uy = uint8(y);ifux> 1ux = ux - 1;endifuy> 1uy = uy - 1;end% 可能是人脸区域的矩形应满足以下条件:% 1、高度和宽度必须都大于20,且矩形面积大于400 % 2、高度和宽度比率应该在范围(0.6,2)内% 3、函数findeye返回值为1if w < 20 || h < 20 || w*h < 400continueelseif ratio < 2 && ratio > 0.6 &&findeye(gray,ux,uy,w,h) == 1 % 记录可能为人脸的矩形区域result(n,:) = [uxuy w h];n = n+1;endend% 对可能是人脸的区域进行标记if size(result,1) == 1 && result(1,1) > 0rectangle('Position',[result(1,1),result(1,2),result(1,3),result(1, 4)],'EdgeColor','r'); else% 如果满足条件的矩形区域大于1则再根据其他信息进行筛选for m = 1:size(result,1)m1 = result(m,1);m2 = result(m,2);m3 = result(m,3);m4 = result(m,4);% 标记最终的人脸区域if m1 + m3 < width && m2 + m4 <heigth< p=""> rectangle('Position',[m1,m2,m3,m4],'EdgeColor','r');endendendfindeye% 判断二值图像中是否含有可能是眼睛的块% bImage----二值图像% x---------矩形左上角顶点X坐标% y---------矩形左上角顶点Y坐标% w---------矩形宽度% h---------矩形长度% 如果有则返回值eye等于1,否则为0function eye = findeye(bImage,x,y,w,h)% 根据矩形相关属性得到二值图像中矩形区域中的数据% 存放矩形区域二值图像信息part = zeros(h,w);% 二值化fori = y:(y+h)for j = x:(x+w)ifbImage(i,j) == 0part(i-y+1,j-x+1) = 255;elsepart(i-y+1,j-x+1) = 0;endendend[L,num] = bwlabel(part,8);% 如果区域中有两个以上的矩形则认为有眼睛ifnum< 2eye = 0;elseeye = 1;endskin% Anil K.Jain提出的基于YCbCr颜色空间的肤色模型% 根据当前点的Cb Cr值判断是否为肤色function result = skin(Y,Cb,Cr)% 参数% a = 25.39;a = 28;% b = 14.03;b=18;ecx = 1.60;ecy = 2.41;sita = 2.53;cx = 109.38;cy = 152.02;xishu = [cos(sita) sin(sita);-sin(sita) cos(sita)];% 如果亮度大于230,则将长短轴同时扩大为原来的1.1倍if(Y > 230)a = 1.1*a;b = 1.1*b;end% 根据公式进行计算Cb = double(Cb);Cr = double(Cr);t = [(Cb-cx);(Cr-cy)];temp = xishu*t;value = (temp(1) - ecx)^2/a^2 + (temp(2) - ecy)^2/b^2; % 大于1则不是肤色,返回0;否则为肤色,返回1if value > 1result = 0;elseresult = 1;end</heigth<>。

OpenCV探索之路(二十七):皮肤检测技术

OpenCV探索之路(二十七):皮肤检测技术

OpenCV探索之路(二十七):皮肤检测技术好久没写博客了,因为最近都忙着赶项目和打比赛==| 好吧,今天我打算写一篇关于使用opencv做皮肤检测的技术总结。

那首先列一些现在主流的皮肤检测的方法都有哪些:1.RGB color space2.Ycrcb之cr分量+otsu阈值化3.YCrCb中133<=Cr<=173 77<=Cb<=1274.HSV中 7<H<20 28<S<256 50<V<2565.基于椭圆皮肤模型的皮肤检测6.opencv自带肤色检测类AdaptiveSkinDetector那我们今天就来一一实现它吧!方法一:基于RGB的皮肤检测根据RGB颜色模型找出定义好的肤色范围内的像素点,范围外的像素点设为黑色。

查阅资料后可以知道,前人做了大量研究,肤色在RGB模型下的范围基本满足以下约束:在均匀光照下应满足以下判别式:R>95 AND G>40 B>20 AND MAX(R,G,B)-MIN(R,G,B)>15 AND ABS(R-G)>15 AND R>G AND R>B在侧光拍摄环境下:R>220 AND G>210 AND B>170 AND ABS(R-G)<=15 AND R>B AND G>B既然判别式已经确定了,所以按照判别式写程序就很简单了。

/*基于RGB范围的皮肤检测*/Mat RGB_detect(Mat& img){/*R>95 AND G>40 B>20 AND MAX(R,G,B)-MIN(R,G,B)>15 AND ABS(R-G)>15 AND R>G AND R>BORR>220 AND G>210 AND B>170 AND ABS(R-G)<=15 AND R>B AND G>B*/Mat detect = img.clone();detect.setT o(0);if (img.empty() || img.channels() != 3){return detect;}for (int i = 0; i < img.rows; i++){for (int j = 0; j < img.cols; j++){uchar *p_detect = detect.ptr<uchar>(i, j);uchar *p_img = img.ptr<uchar>(i, j);if ((p_img[2] > 95 && p_img[1]>40 && p_img[0] > 20 && (MAX(p_img[0], MAX(p_img[1], p_img[2])) - MIN(p_img[0], MIN(p_img[1], p_img[2])) > 15) &&abs(p_img[2] - p_img[1]) > 15 && p_img[2] > p_img[1] && p_img[1] > p_img[0]) ||(p_img[2] > 200 && p_img[1] > 210 && p_img[0] > 170 && abs(p_img[2] - p_img[1]) <= 15 &&p_img[2] > p_img[0] && p_img[1] > p_img[0])){p_detect[0] = p_img[0];p_detect[1] = p_img[1];p_detect[2] = p_img[2];}}}return detect;}检测效果如下:从检测结果可以看出,皮肤的检测效果并不好,首先皮肤检测的完整性并不高,一些稍微光线不好的区域也没法检测出皮肤来。

OpenCV实现人脸检测功能

OpenCV实现人脸检测功能

OpenCV实现⼈脸检测功能本⽂实例为⼤家分享了OpenCV实现⼈脸检测功能的具体代码,供⼤家参考,具体内容如下1、HAAR级联检测#include <opencv2/opencv.hpp>#include <iostream>using namespace cv;#include <iostream>#include <cstdlib>using namespace std;int main(int artc, char** argv) {face_detect_haar();waitKey(0);return 0;}void face_detect_haar() {CascadeClassifier faceDetector;std::string haar_data_file = "./models/haarcascades/haarcascade_frontalface_alt_tree.xml";faceDetector.load(haar_data_file);vector<Rect> faces;//VideoCapture capture(0);VideoCapture capture("./video/test.mp4");Mat frame, gray;int count=0;while (capture.read(frame)) {int64 start = getTickCount();if (frame.empty()){break;}// ⽔平镜像调整// flip(frame, frame, 1);imshow("input", frame);if (frame.channels() == 4)cvtColor(frame, frame, COLOR_BGRA2BGR);cvtColor(frame, gray, COLOR_BGR2GRAY);equalizeHist(gray, gray);faceDetector.detectMultiScale(gray, faces, 1.2, 1, 0, Size(30, 30), Size(400, 400));for (size_t t = 0; t < faces.size(); t++) {count++;rectangle(frame, faces[t], Scalar(0, 255, 0), 2, 8, 0);}float fps = getTickFrequency() / (getTickCount() - start);ostringstream ss;ss.str("");ss << "FPS: " << fps << " ; inference time: " << time << " ms";putText(frame, ss.str(), Point(20, 20), 0, 0.75, Scalar(0, 0, 255), 2, 8);imshow("haar_face_detection", frame);if (waitKey(1) >= 0) break;}printf("total face: %d\n", count);}2、 DNN⼈脸检测#include <opencv2/dnn.hpp>#include <opencv2/opencv.hpp>using namespace cv;using namespace cv::dnn;#include <iostream>#include <cstdlib>using namespace std;const size_t inWidth = 300;const size_t inHeight = 300;const double inScaleFactor = 1.0;const Scalar meanVal(104.0, 177.0, 123.0);const float confidenceThreshold = 0.7;void face_detect_dnn();void mtcnn_demo();int main(int argc, char** argv){face_detect_dnn();waitKey(0);return 0;}void face_detect_dnn() {//这⾥采⽤tensorflow模型std::string modelBinary = "./models/dnn/face_detector/opencv_face_detector_uint8.pb"; std::string modelDesc = "./models/dnn/face_detector/opencv_face_detector.pbtxt";// 初始化⽹络dnn::Net net = readNetFromTensorflow(modelBinary, modelDesc);net.setPreferableBackend(DNN_BACKEND_OPENCV);net.setPreferableTarget(DNN_TARGET_CPU);if (net.empty()){printf("Load models fail...\n");return;}// 打开摄像头// VideoCapture capture(0);VideoCapture capture("./video/test.mp4");if (!capture.isOpened()) {printf("Don't find video...\n");return;}Mat frame;int count=0;while (capture.read(frame)) {int64 start = getTickCount();if (frame.empty()){break;}// ⽔平镜像调整// flip(frame, frame, 1);imshow("input", frame);if (frame.channels() == 4)cvtColor(frame, frame, COLOR_BGRA2BGR);// 输⼊数据调整Mat inputBlob = blobFromImage(frame, inScaleFactor,Size(inWidth, inHeight), meanVal, false, false);net.setInput(inputBlob, "data");// ⼈脸检测Mat detection = net.forward("detection_out");vector<double> layersTimings;double freq = getTickFrequency() / 1000;double time = net.getPerfProfile(layersTimings) / freq;Mat detectionMat(detection.size[2], detection.size[3], CV_32F, detection.ptr<float>()); ostringstream ss;for (int i = 0; i < detectionMat.rows; i++){// 置信度 0~1之间float confidence = detectionMat.at<float>(i, 2);if (confidence > confidenceThreshold){count++;int xLeftBottom = static_cast<int>(detectionMat.at<float>(i, 3) * frame.cols);int yLeftBottom = static_cast<int>(detectionMat.at<float>(i, 4) * frame.rows);int xRightTop = static_cast<int>(detectionMat.at<float>(i, 5) * frame.cols);int yRightTop = static_cast<int>(detectionMat.at<float>(i, 6) * frame.rows);Rect object((int)xLeftBottom, (int)yLeftBottom,(int)(xRightTop - xLeftBottom),(int)(yRightTop - yLeftBottom));rectangle(frame, object, Scalar(0, 255, 0));ss << confidence;std::string conf(ss.str());std::string label = "Face: " + conf;int baseLine = 0;Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);rectangle(frame, Rect(Point(xLeftBottom, yLeftBottom - labelSize.height),Size(labelSize.width, labelSize.height + baseLine)),Scalar(255, 255, 255), FILLED);putText(frame, label, Point(xLeftBottom, yLeftBottom),FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 0, 0));}}float fps = getTickFrequency() / (getTickCount() - start);ss.str("");ss << "FPS: " << fps << " ; inference time: " << time << " ms";putText(frame, ss.str(), Point(20, 20), 0, 0.75, Scalar(0, 0, 255), 2, 8);imshow("dnn_face_detection", frame);if (waitKey(1) >= 0) break;}printf("total face: %d\n", count);}以上就是本⽂的全部内容,希望对⼤家的学习有所帮助,也希望⼤家多多⽀持。

OpenCV学习笔记人脸检测的代码分析

OpenCV学习笔记人脸检测的代码分析

OpenCV学习笔记人脸检测的代码分析一、预备知识:1、动态内存存储及操作函数CvMemStoragetypedef struct CvMemStorage{struct CvMemBlock* bottom;/* first allocated block */struct CvMemBlock* top; /* the current memory block - top of the stack */struct CvMemStorage* parent; /* borrows new blocks from */int block_size; /* block size */int free_space; /* free space in the top block (in bytes) */} CvMemStorage;内存存储器是一个可用来存储诸如序列,轮廓,图形,子划分等动态增长数据结构的底层结构。

它是由一系列以同等大小的内存块构成,呈列表型---bottom 域指的是列首,top 域指的是当前指向的块但未必是列尾.在bottom和top之间所有的块(包括bottom, 不包括top)被完全占据了空间;在top和列尾之间所有的块(包括块尾,不包括top)则是空的;而top 块本身则被占据了部分空间-- free_space 指的是top块剩余的空字节数。

新分配的内存缓冲区(或显示的通过cvMemStorageAlloc 函数分配,或隐示的通过cvSeqPush, cvGraphAddEdge等高级函数分配)总是起始于当前块(即top块)的剩余那部分,如果剩余那部分能满足要求(够分配的大小)。

分配后,free_space 就减少了新分配的那部分内存大小,外加一些用来保存适当列型的附加大小。

当top块的剩余空间无法满足被分配的块(缓冲区)大小时,top块的下一个存储块被置为当前块(新的top块)-- free_space 被置为先前分配的整个块的大小。

Opencv之人脸检测

Opencv之人脸检测

Opencv之人脸检测如之前所说的,运动物体检测的工作已经告一段落了,目前正式进入无所事事的阶段。

也罢,索性就Opencv进一步学习。

这里,就不得不说一下,百度确实是一个强大的工具。

我自学Opencv的资料都来自于网上,图书馆借来的书基本上都是冷宫状态。

为什么呢?用户体验感呗。

不得不喷一下,书上的理论公式啥的,哪对得上我这急性子的胃口啊!这里,推荐几个人的博客给大家,这些资料对于要从Opencv的角度学习图像处理的童鞋应该还是很有帮助的。

1、/index.php/%e9%a6%96%e9%a1%b52、/blog/morewindows/82257833、/column/details/opencv-tutorial.html这三个网站上有部分内容将会重复出现,但只要仔细观察,会发现每位博主的重点都不一样,毕竟每个人编程过程中遇到的问题都不同。

好了,讲讲人脸识别吧,我其实对于这项新起的技术没有多学术化的认识。

结合这次项目中的进度:检测出了运动物体,把它提取出来,再进行人脸检测判断是否为人类,如果是,进行跟踪判断其行为是否异常。

所以我也有必要了解它。

刚开始对于这项功能的实现还是很期待的,总觉得人的属性那么复杂,要想这么立竿见影的对它进行技术约束,应该很不容易。

结果,强大的opencv让这项功能的实现难度指数将至负。

且不说这个,他检测的结果也真是让人哭笑不得。

这里附上一图吧。

1.蓝色圆圈示意出的区域即为系统检测到的“人脸”,我初看到这个结果也甚是惊讶,难道这项技术就是把人脸的五官做成相对坐标进行备份,设置一系列阈值即可。

虽然,我的猜测有点剑走偏锋,但并不是没有依据。

因为之前我用“奶茶妹”卖萌的一张照片做检测,因为其脸是横着地,就是那种依偎在东哥肩膀上的姿势,可能这种脸的姿势违背了Opencv对于人脸五官相对坐标的分布,所以,可怜的奶茶妹没有被人脸检测出来。

(虽然事先我一直以为是长的太萌就,,,技术失效),看来是我想多了。

肤色分割人脸检测matlab代码

肤色分割人脸检测matlab代码

image = imread('im.jpg');figure,imshow(image);red = double(image(:,:,1));green = double(image(:,:,2));blue = double(image(:,:,3));[m n]=size(red);Y = zeros(m,n);Cb = zeros(m,n);Cr = zeros(m,n);I = zeros(m,n);Q = zeros(m,n);red_gama = zeros(m,n);green_gama = zeros(m,n);blue_gama = zeros(m,n);for i=1:m %gamma矫正for j=1:nif red(i,j)>0 && red(i,j)<90fai=pi*red(i,j)/180;gama=1+0.5*cos(fai);red_gama(i,j)=255*(red(i,j)/255)^(1/gama);elseif red(i,j)>=90 && red(i,j)<=170fai=pi/2;gama=1+0.5*cos(fai);red_gama(i,j)=255*(red(i,j)/255)^(1/gama);elseif red(i,j)>170 && red(i,j)<=255fai=pi-pi*(255-red(i,j))/170;gama=1+0.5*cos(fai);red_gama(i,j)=255*(red(i,j)/255)^(1/gama);endif green(i,j)>0 && green(i,j)<90fai=pi*green(i,j)/180;gama=1+0.5*cos(fai);green_gama(i,j)=255*(green(i,j)/255)^(1/gama);elseif green(i,j)>=90 && green(i,j)<=170fai=pi/2;gama=1+0.5*cos(fai);green_gama(i,j)=255*(green(i,j)/255)^(1/gama);elseif green(i,j)>170 && green(i,j)<=255fai=pi-pi*(255-green(i,j))/170;gama=1+0.5*cos(fai);green_gama(i,j)=255*(green(i,j)/255)^(1/gama);endif blue(i,j)>0 && blue(i,j)<90fai=pi*blue(i,j)/180;gama=1+0.5*cos(fai);blue_gama(i,j)=255*(blue(i,j)/255)^(1/gama);elseif blue(i,j)>=90 && blue(i,j)<=170fai=pi/2;gama=1+0.5*cos(fai);blue_gama(i,j)=255*(blue(i,j)/255)^(1/gama);elseif blue(i,j)>170 && blue(i,j)<=255fai=pi-pi*(255-blue(i,j))/170;gama=1+0.5*cos(fai);blue_gama(i,j)=255*(blue(i,j)/255)^(1/gama);endendendfor i=1:mfor j=1:nY(i,j)=0.2989*red_gama(i,j)+0.5866*green_gama(i,j)+0.1145*blue_gama(i,j) ;Cb(i,j)=-0.1688*red_gama(i,j)-0.3312*green_gama(i,j)+0.5000*blue_gama(iCr(i,j)=0.5000*red_gama(i,j)-0.4184*green_gama(i,j)-0.0817*blue_gama(i,j) ;endendemp=zeros(m,n);sita=zeros(m,n);for i=1:mfor j=1:nif Cr(i,j)>0 && Cb(i,j)>0sita(i,j)=atan(abs(Cr(i,j))/abs(Cb(i,j)))*180/pi;elseif Cr(i,j)>0 && Cb(i,j)<0sita(i,j)=180-atan(abs(Cr(i,j))/abs(Cb(i,j)))*180/pi;elseif Cr(i,j)<0 && Cb(i,j)<0sita(i,j)=180 + atan(abs(Cr(i,j))/abs(Cb(i,j)))*180/pi;elsesita(i,j)=0;endendendfor i=1:mfor j=1:nif sita(i,j)>105 && sita(i,j)<150emp(i,j)=sita(i,j);elseemp(i,j)=0;Y(i,j)=0;endendfigure,imshow(emp); figure,imshow(uint8(Y));原图像分割结果分割结果。

OpenCV Haar分类器人脸检测部分代码注释

OpenCV Haar分类器人脸检测部分代码注释

OpenCV Haar分类器人脸检测部分代码注释cvHaarDetectObjectsForROC1 CvSeq*2 cvHaarDetectObjectsForROC( const CvArr* _img,3 CvHaarClassif ie rCascade* cascade, CvMemStorage* storage,4 std::vector<int>& rejectLevels, std::vector<double>& levelWeight s,5 double scaleFactor, int minNeighbors, int flags,6 CvSize minSize, CvSize maxSize, bool outputRejectLevels )7 {8 const double GROUP_EPS = 0.2;9 CvMat stub, *img = (CvMat*)_img;10 cv::Ptr<CvMat> temp, sum, tilted, sqsum, normImg, sumcanny, imgSmall;11 CvSeq* result_seq = 0;12 cv::Ptr<CvMemStorage> temp_storage;1314 cv::ConcurrentRectV ector allCandidates;15 std::vector<cv::Rect> rectList;16 std::vector<int> rweights;17 double factor;18 int coi;19 bool doCanny Pr uning = (flags & CV_HAAR_DO_CANNY_PRUNING) != 0;//平滑区域过滤标识20 bool findBiggestObject = (flags & CV_HAAR_FIND_BIGGEST_OBJECT) != 0;//返回最大目标标识21 bool roughSearch = (flags & CV_HAAR_DO_ROUGH_SEARCH) != 0;//只返回第一个目标标识2223 //错误24 if( !CV_IS_HAAR_CLASSIFIER(cascade) )25 CV_Error( !cascade ? CV_StsNullPtr : CV_StsBadArg, "Invalid classifier cascade" );2627 if( !storage )28 CV_Error( CV_StsNullPtr, "Null storage pointer" );2930 img = cvGetMat( img, &stub, &coi );31 if( coi )32 CV_Error( CV_BadCOI, "COI is not supported" );3334 if( CV_MA T_DEPTH(img->type) != CV_8U )35 CV_Error( CV_StsUnsupportedFormat, "Only 8-bit images are supported" );3637 if( scaleFactor <= 1 )38 CV_Error( CV_StsOutOfRange, "scale factor must be > 1" );3940 //如果存在查找最大目标属性则去除SCALE_IMAGE属性41 if( findBiggestObject )42 flags &= ~CV_HAAR_SCALE_IMAGE;4344 if( maxSize.height == 0 || maxSize.width == 0 )45 {46 maxSize.height = img->rows;47 maxSize.width = img->cols;48 }4950 temp = cvCreateMat( img->rows, img->cols, CV_8UC1 );51 //注意这里rows+1.cols+1是为了建立一个0值的缓存区以提高计算效率52 sum = cvCreateMat( img->rows + 1, img->cols + 1, CV_32SC1 );//积分图求和的结果53 sqsum = cvCreateMat( img->rows + 1, img->cols + 1, CV_64FC1 );//积分图求和的平方的结果5455 if( !cascade->hid_cascade )56 icvCreateHidHaarClassifierCascade(cascade);;//创建级联分类器5758 if( cascade->hid_cascade->has_tilted_features )59 tilted = cvCreateMat( img->rows + 1, img->cols + 1, CV_32SC1 );//积分图求和并倾斜45度的结果6061 result_seq = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvA vg Com p), storage );//创建用于存放检测结果的数组6263 if( CV_MA T_CN(img->type) > 1 )64 {65 cvCvtColor( img, temp, CV_BGR2GRAY );//灰度处理66 img = temp;67 }68 //如果存在查找最大目标属性则去除SCALE_IMAGE和平滑过滤属性69 if( findBiggestObject )70 flags &= ~(CV_HAAR_SCALE_IMAGE|CV_HAAR_DO_CANNY_PRUNING);7172 //缩放图片而不改变检测窗口大小73 /* if( flags & CV_HAAR_S CALE_IMAGE )74 {75 CvSize winSize0 = cascade->orig_window_size;76 #ifdef HA VE_IP P77 int use_ipp = cascade->hid_cascade->ipp_stages != 0;7879 if( use_ipp )80 normImg = cvCreateMat( img->rows, img->cols, CV_32FC1 );81 #endif82 imgSmall = cvCreateMat( img->rows + 1, img->cols + 1, CV_8UC1 );8384 for( factor = 1; ; factor *= scaleFactor )85 {86 CvSize winSize = { cvRound(winSize0.width*factor),87 cvRound(winSize0.height*factor) };88 CvSize sz = { cvRound( img->cols/factor ), cvRound( img->rows/factor ) };89 CvSize sz1 = { sz.width - winSize0.width + 1, sz.height - winSize0.height + 1 };9091 CvRect equRect = { icv_object_win_border, icv_object_win_border,92 winSize0.width - icv_object_win_border*2,93 winSize0.height - icv_object_win_border*2 };9495 CvMat img1, sum1, sqsum1, norm1, tilted1, mask1;96 CvMat* _tilted = 0;9798 if( sz1.width <= 0 || sz1.height <= 0 )99 break;100 if( winSize.width > maxSize.width || winSize.height > maxSize.height )101 break;102 if( winSize.width < minSize.width || winSize.height < minSize.height )103 continue;104105 img1 = cvMat( sz.height, sz.width, CV_8UC1, imgSmall->data.ptr );106 sum1 = cvMat( sz.height+1, sz.width+1, CV_32SC1, sum->data.ptr );107 sqsum1 = cvMat( sz.height+1, sz.width+1, CV_64FC1, sqsum->data.ptr );108 if( tilted )109 {110 tilted1 = cvMat( sz.height+1, sz.width+1, CV_32SC1, tilted->data.ptr );111 _tilted = &tilted1;112 }113 norm1 = cvMat( sz1.height, sz1.width, CV_32FC1, normImg ? normImg->data.ptr : 0 );114 mask1 = cvMat( sz1.height, sz1.width, CV_8UC1, temp->data.ptr );115116 cvResize( img, &img1, CV_INTER_LINEAR );117 cvIntegral( &img1, &sum1, &sqsum1, _tilted );118119 int ystep = factor > 2 ? 1 : 2;120 #ifdef HA VE_TBB121 const int LOCS_PER_THREAD = 1000;122 int stripCount = ((sz1.width/ystep)*(sz1.height + ystep-1)/ystep + LOCS_PER_T HREAD/2)/LOCS_P ER_T HREAD;123 stripCount = std::min(std::max(stripCount, 1), 100);124 #else125 const int stripCount = 1;126 #endif127128 #ifdef HA VE_IPP129 if( use_ipp )130 {131 cv::Mat fsum(sum1.rows, sum1.cols, CV_32F, sum1.data.ptr, sum1.step);132 cv::Mat(&sum1).convertTo(fsum, CV_32F, 1, -(1<<24));133 }134 else135 #endif136 cvSetImagesForHaarClassifierCascade( cascade, &sum1, &sqsum1, _tilted, 1. );137138 cv::Mat _norm1(&norm1), _mask1(&mask1);139 cv::parallel_for(cv::BlockedRange(0, stripCount),140 cv::HaarDetectObjects_ScaleImage_Invoker(cascade,141 (((sz1.height + stripCount - 1)/stripCount + ystep-1)/ystep)*ystep,142 factor, cv::Mat(&sum1), cv::Mat(&sqsum1), &_norm1, &_mask1,143 cv::Rect(equRect), allCandidates, rejectLevels, levelWeight s, outputRejectLevels)); 144 }145 }146 else */147 {148 int n_factors = 0;//代表检测窗口有多少种尺寸149 cv::Rect scanROI;//感兴趣区域150151 cvIntegral( img, sum, sqsum, tilted );//积分图计算152153 //存在平滑过滤属性154 if( doCannyPruning )155 {156 sumcanny = cvCreateMat( img->rows + 1, img->cols + 1, CV_32SC1 );157 cvCanny( img, temp, 0, 50, 3 );158 cvIntegral( temp, sumcanny );159 }160161 //得到检测窗口有多少种尺寸,即n_factors值162 for( n_factors = 0, factor = 1;163 factor*cascade->orig_window_size.width < img->cols - 10 &&164 factor*cascade->orig_window_size.height < img->rows - 10;165 n_factors++, factor *= scaleFactor )166 ;167 //初始化检测窗口的比例168 if( findBiggestObject )169 {170 scaleFactor = 1./scaleFactor;171 factor *= scaleFactor;172 }173 else174 factor = 1;175176 for( ; n_factors-- > 0; factor *= scaleFactor )177 {178 const double ystep = std::max( 2., factor );179 //得到新的检测窗口size180 CvSize winSize = { cvRound( cascade->orig_window_size.width * factor ),181 cvRound( cascade->orig_window_size.height * factor )};182 CvRect equRect = { 0, 0, 0, 0 };183 int *p[4] = {0,0,0,0};184 int *pq[4] = {0,0,0,0};185 int startX = 0, startY = 0;186 int endX = cvRound((img->cols - winSize.width) / ystep);187 int endY = cvRound((img->rows - winSize.height) / ystep);188189 //忽略小于最小窗口限制的检测窗口size190 if( winSize.width < minSize.width || winSize.height < minSize.height )191 {192 if( findBiggestObject )193 break;194 continue;195 }196197 //为分类器设置积分图及检测窗口比例198 cvSetImagesForHaarClassifierCascade( cascade, sum, sqsum, tilted, factor );199 cvZero( temp );200201 if( doCannyPruning )202 {203 equRect.x = cvRound(winSize.width*0.15);204 equRect.y = cvRound(winSize.height*0.15);205 equRect.width = cvRound(winSize.width*0.7);206 equRect.height = cvRound(winSize.height*0.7);207208 p[0] = (int*)(sumcanny->data.ptr + equRect.y*sumcanny->step) + equRect.x;209 p[1] = (int*)(sumcanny->data.ptr + equRect.y*sumcanny->step)210 + equRect.x + equRect.width;211 p[2] = (int*)(sumcanny->data.ptr + (equRect.y + equRect.height)*sumcanny->step) + equRect.x; 212 p[3] = (int*)(sumcanny->data.ptr + (equRect.y + equRect.height)*sumcanny->step)213 + equRect.x + equRect.width;214215 pq[0] = (int*)(sum->data.ptr + equRect.y*sum->step) + equRect.x;216 pq[1] = (int*)(sum->data.ptr + equRect.y*sum->step)217 + equRect.x + equRect.width;218 pq[2] = (int*)(sum->data.ptr + (equRect.y + equRect.height)*sum->step) + equRect.x; 219 pq[3] = (int*)(sum->data.ptr + (equRect.y + equRect.height)*sum->step)220 + equRect.x + equRect.width;221 }222223 //根据ROI重置224 if( scanROI.area() > 0 )225 {226 //adjust start_height and stop_height227 startY = cvRound(scanROI.y / ystep);228 endY = cvRound((scanROI.y + scanROI.height - winSize.height) / ystep);229230 startX = cvRound(scanROI.x / ystep);231 endX = cvRound((scanROI.x + scanROI.width - winSize.width) / ystep);232 }233 //parallel_for?并行运算?234 //在当前尺寸下对每一个位置进行分类器的检测235 cv::parallel_for(cv::BlockedRange(startY, endY),236 cv::HaarDetectObjects_ScaleCascade_Invoker(cascade, winSize, cv::Range(startX, endX), 237 ystep, sum->step, (const int**)p,238 (const int**)pq, allCandidates ));239240 if( findBiggestObject && !allCandidates.empty() && scanROI.area() == 0 )241 {242 rectList.resize(allCandidates.size());243 std::copy(allCandidates.begin(), allCandidates.end(), rectList.begin());244245 groupRectangles(rectList, std::max(minNeighbors, 1), GROUP_EPS);246247 if( !rectList.empty() )248 {249 size_t i, sz = rectList.size();250 cv::Rect maxRect;251252 for( i = 0; i < sz; i++ )253 {254 if( rectList[i].area() > maxRect.area() )255 maxRect = rectList[i];256 }257258 allCandidates.push_back(maxRect);259260 scanROI = maxRect;261 int dx = cvRound(maxRect.width*GROUP_EPS);262 int dy = cvRound(maxRect.height*GROUP_EPS);263 scanROI.x = std::max(scanROI.x - dx, 0);264 scanROI.y = std::max(scanROI.y - dy, 0);265 scanROI.width = std::min(scanROI.width + dx*2, img->cols-1-scanROI.x); 266 scanROI.height = std::min(scanROI.height + dy*2, img->rows-1-scanROI.y); 267268 double minScale = roughSearch ? 0.6 : 0.4;269 minSize.width = cvRound(maxRect.width*minScale);270 minSize.height = cvRound(maxRect.height*minScale);271 }272 }273 }274 }275276 rectList.resize(allCandidates.size());277 if(!allCandidates.empty())278 std::copy(allCandidates.begin(), allCandidates.end(), rectList.begin());279280 //相邻重复检测区域进行组合281 if( minNeighbors != 0 || findBiggestObject )282 {283 if( outputRejectLevels )284 {285 groupRectangles(rectList, rejectLevels, leve lWeight s, minNeighbors, GROUP_EPS ); 286 }287 else288 {289 groupRectangles(rectList, rweights, std::max(minNeighbors, 1), GROUP_EPS);290 }291 }292 else293 rweights.resize(rectList.size(),0);294295 //获得检测结果296 if( findBiggestObject && rectList.size() )297 {298 CvA vgComp result_comp = {{0,0,0,0},0};299300 for( size_t i = 0; i < rectList.size(); i++ )301 {302 cv::Rect r = rectList[i];303 if( r.area() > cv::Rect(result_comp.rect).area() )304 {305 result_comp.rect = r;306 result_comp.neighbors = rweight s[i];307 }308 }309 cvSeqPush( result_seq, &result_comp );310 }311 else312 {313 for( size_t i = 0; i < rectList.size(); i++ )314 {315 CvA vgComp c;316 c.rect = rectList[i];317 c.neighbors = !rweights.empty() ? rweight s[i] : 0; 318 cvSeqPush( result_seq, &c );319 }320 }321322 return result_seq;。

android java opencv facedetectyn 用法

android java opencv facedetectyn 用法

android java opencv facedetectyn用法在Android平台上使用OpenCV库进行人脸识别是一项涉及多个步骤的任务。

以下是一个关于如何使用OpenCV的Java接口进行人脸检测的基本指南。

1. 导入OpenCV库首先,你需要在你的Android项目中导入OpenCV库。

这通常通过在你的build.gradle 文件中添加依赖来完成。

确保你已经下载并安装了OpenCV的Android SDK。

2. 初始化OpenCV在你的应用程序中,你需要初始化OpenCV库。

这通常在主活动(MainActivity)的onCreate方法中完成。

你可以调用OpenCVLoader.initDebug()来初始化OpenCV库。

3. 加载人脸检测器OpenCV提供了多种人脸检测方法,其中最常见的是Haar特征级联分类器。

你需要加载一个预训练的XML文件,该文件包含了用于人脸检测的特征和参数。

javaCascadeClassifier faceDetector = new CascadeClassifier(getContext().getResources().getIdentifier("lbpcascade_frontal face", "raw", getContext().getPackageName()));if (faceDetector.empty()) {Log.e(TAG, "Failed to load cascade classifier");// 处理加载失败的情况}4. 处理图像接下来,你需要从摄像头或图像库获取图像,并将其转换为OpenCV可以处理的格式(通常是Mat对象)。

5. 进行人脸检测使用加载的人脸检测器在图像上检测人脸。

这通常通过调用detectMultiScale方法完成。

javaMatOfRect faceDetections = new MatOfRect();faceDetector.detectMultiScale(grayImage, faceDetections);其中grayImage是一个灰度图像,因为Haar特征级联分类器通常在灰度图像上运行得更快。

  1. 1、下载文档前请自行甄别文档内容的完整性,平台不提供额外的编辑、内容补充、找答案等附加服务。
  2. 2、"仅部分预览"的文档,不可在线预览部分如存在完整性等问题,可反馈申请退款(可完整预览的文档不适用该条件!)。
  3. 3、如文档侵犯您的权益,请联系客服反馈,我们会尽快为您处理(人工客服工作时间:9:00-18:30)。

1.void cvSkinSegment(IplImage* img, IplImage* mask){
2. CvSize imageSize = cvSize(img->width, img->height);
3. IplImage *imgY = cvCreateImage(imageSize, IPL_DEPTH_8U, 1);
4. IplImage *imgCr = cvCreateImage(imageSize, IPL_DEPTH_8U, 1);
5. IplImage *imgCb = cvCreateImage(imageSize, IPL_DEPTH_8U, 1);
6.
7.
8. IplImage *imgYCrCb = cvCreateImage(imageSize, img->depth, img->nChannels
);
9. cvCvtColor(img,imgYCrCb,CV_BGR2YCrCb);
10. cvSplit(imgYCrCb, imgY, imgCr, imgCb, 0);
11.int y, cr, cb, l, x1, y1, value;
12. unsigned char *pY, *pCr, *pCb, *pMask;
13.
14. pY = (unsigned char *)imgY->imageData;
15. pCr = (unsigned char *)imgCr->imageData;
16. pCb = (unsigned char *)imgCb->imageData;
17. pMask = (unsigned char *)mask->imageData;
18. cvSetZero(mask);
19. l = img->height * img->width;
20.for (int i = 0; i < l; i++){
21. y = *pY;
22. cr = *pCr;
23. cb = *pCb;
24. cb -= 109;
25. cr -= 152
26. ;
27. x1 = (819*cr-614*cb)/32 + 51;
28. y1 = (819*cr+614*cb)/32 + 77;
29. x1 = x1*41/1024;
30. y1 = y1*73/1024;
31. value = x1*x1+y1*y1;
32.if(y<100) (*pMask)=(value<700) ? 255:0;
33.else (*pMask)=(value<850)? 255:0;
34. pY++;
35. pCr++;
36. pCb++;
37. pMask++;
38. }
39. cvReleaseImage(&imgY);
40. cvReleaseImage(&imgCr);
41. cvReleaseImage(&imgCb);
42. cvReleaseImage(&imgYCrCb);
43.}
主要原理就是通过在Cb Cr空间上找到一个可以拟合常规肤色分布的椭圆形,然后把在椭圆形区域内的像素点标记为肤色
图1.1 椭圆模板示例
以上插图来源于《一种基于KL变换的椭圆模型肤色检测方法》,具体参数参考的那篇文献时间久远找不到了
以下是代码运行后的效果图
图1.2 运行效果1
图1.3 运行效果2
从上面两图可以看出,在光线条件比较理想的情况下,肤色检测的效果还是不错的(1.2就比1.3效果好),但是对于一些似肤色区域(比如图1.3后面的木质门),还是会被误检,但这是肤色检测无法解决的问题。

关于效果图里面一些类似噪点的部分,可以通过膨胀腐蚀模糊再二值化的方法取得比较圆润的肤色图(就是可以做mask的)
1.cvErode(pSkin, pSkin, NULL, 1);
2.cvDilate(pSkin, pSkin, NULL, 1);
3.cvSmooth(pSkin, pSkin, CV_GAUSSIAN, 21, 0, 0);
4.cvThreshold(pSkin, pSkin,130, 255, CV_THRESH_BINARY);
当然有时候效果也不是特别好,这个要靠自己调参数的。

总体而言,与OpenCV2.0的adapativeskindetector.cpp相比的话,效果要好(其实我改进的代码就是参照里面CvAdaptiveSkinDetector类里的process函数的),当然也有可能是因为我的肤色检测是根据我所处环境的光照条件和摄像头特性调节的缘故。

最后,小小地对代码作一个说明。

其实代码很简单,就是把Y Cb Cr三个通道分开,然后用指针分别对这三个通道的每一个像素进行处理。

需要作修改的就是if(y<100) (*pMask)=(value<700) ? 255:0; else (*pMask)=(value<850)? 255:0; 这条做阈值判断的命令
由于光照和摄像头性能的不同,这里的阈值需要根据自己的摄像头调节出最合适的效果才可以
另外的话,对于质量不是很好的WebCam 建议在输入图像上加一个小点的高斯模糊以去除噪点
以上算法还曾经作为我的大作业在Matlab和Xilinx FPGA上实现,具体可以参考我的答辩PPT:Skin Segmentation on FPGA。

相关文档
最新文档