基于Opencv的视频人脸检测程序源代码(可运行)

合集下载

关于Opencv实现人脸检测的阐述

关于Opencv实现人脸检测的阐述

关于Opencv实现人脸检测的阐述最近用Opencv开发一个人脸检测的小程序,结构并不复杂,但对于Opencv初学者来说还是具有一定的引导意义。

接下来对于程序开发中出现的一些问题进行简单讨论。

一、图像采集。

图像既可以从摄像头设备中读取,也可以从磁盘中加载,两者方法大同小异。

以摄像头为例,Opencv对于摄像头的操作同matlab中一样,是通过一个简单的API 函数实现的,具体如下:CvCapture* m_pCapture;m_pCapture = cvCreateCameraCapture(0);IplImage* m_pFrameImage;m_pFrameImage = cvQueryFrame(m_pCapture);首先是创建一个视频流结构体指针m_pCapture,之后调用cvCreateCameraCapture(0)函数将结构体与相应视频输入设备关联,若只有一个视频输入设备(如笔记本摄像头),建议参数给0,若有多个视频设备则应给对应的ID号。

关联完成后,调用cvQueryFrame(m_pCapture);得到m_pCapture结构体中视频流的下一帧图像,存储在图像指针对应的区域,至此,完成摄像头图像采集。

从磁盘中读取图像过程相对复杂一点,需要用到MFC中关于文件及文件夹读取的知识。

具体讲用两条途径,一是定位指定文件夹,继而读取文件夹下的所有图像文件;二是直接定位文件,继而读取相应文件。

读取文件夹的具体代码如下:BROWSEINFO bi;//用来存储用户选中的目录信息TCHAR name[MAX_PATH];//存储路径name[0]='d';ZeroMemory(&bi,sizeof(BROWSEINFO));//清空目录对应的内存bi.hwndOwner=GetSafeHwnd();//得到窗口句柄bi.pszDisplayName=name;BIF_BROWSEINCLUDEFILES;//这句话是什么意思bi.lpszTitle=_T("Select folder");//对话框标题bi.ulFlags=0x80;//设置对话框形式LPITEMIDLIST idl=SHBrowseForFolder(&bi);//返回所选中文件夹的IDif(idl==NULL)return;SHGetPathFromIDList(idl,str.GetBuffer(MAX_PATH));//将文件信息格式化存储到对应缓冲区中str.ReleaseBuffer();//与GerBuffer配合使用,清空内存m_Path=str;//将路径存储在m_path中if(str.GetAt(str.GetLength()-1)!='\\')m_Path+="\\";UpdateData(FALSE);文件夹读取过程中关键函数为SHBrowseForFolder,这个函数有什么样作用以及具体用法网上都有具体的帖子和博客进行说明,这里不做赘述,最终文件夹路径存储在变量m_Path中。

OpenCV实现人脸检测功能

OpenCV实现人脸检测功能

OpenCV实现⼈脸检测功能本⽂实例为⼤家分享了OpenCV实现⼈脸检测功能的具体代码,供⼤家参考,具体内容如下1、HAAR级联检测#include <opencv2/opencv.hpp>#include <iostream>using namespace cv;#include <iostream>#include <cstdlib>using namespace std;int main(int artc, char** argv) {face_detect_haar();waitKey(0);return 0;}void face_detect_haar() {CascadeClassifier faceDetector;std::string haar_data_file = "./models/haarcascades/haarcascade_frontalface_alt_tree.xml";faceDetector.load(haar_data_file);vector<Rect> faces;//VideoCapture capture(0);VideoCapture capture("./video/test.mp4");Mat frame, gray;int count=0;while (capture.read(frame)) {int64 start = getTickCount();if (frame.empty()){break;}// ⽔平镜像调整// flip(frame, frame, 1);imshow("input", frame);if (frame.channels() == 4)cvtColor(frame, frame, COLOR_BGRA2BGR);cvtColor(frame, gray, COLOR_BGR2GRAY);equalizeHist(gray, gray);faceDetector.detectMultiScale(gray, faces, 1.2, 1, 0, Size(30, 30), Size(400, 400));for (size_t t = 0; t < faces.size(); t++) {count++;rectangle(frame, faces[t], Scalar(0, 255, 0), 2, 8, 0);}float fps = getTickFrequency() / (getTickCount() - start);ostringstream ss;ss.str("");ss << "FPS: " << fps << " ; inference time: " << time << " ms";putText(frame, ss.str(), Point(20, 20), 0, 0.75, Scalar(0, 0, 255), 2, 8);imshow("haar_face_detection", frame);if (waitKey(1) >= 0) break;}printf("total face: %d\n", count);}2、 DNN⼈脸检测#include <opencv2/dnn.hpp>#include <opencv2/opencv.hpp>using namespace cv;using namespace cv::dnn;#include <iostream>#include <cstdlib>using namespace std;const size_t inWidth = 300;const size_t inHeight = 300;const double inScaleFactor = 1.0;const Scalar meanVal(104.0, 177.0, 123.0);const float confidenceThreshold = 0.7;void face_detect_dnn();void mtcnn_demo();int main(int argc, char** argv){face_detect_dnn();waitKey(0);return 0;}void face_detect_dnn() {//这⾥采⽤tensorflow模型std::string modelBinary = "./models/dnn/face_detector/opencv_face_detector_uint8.pb"; std::string modelDesc = "./models/dnn/face_detector/opencv_face_detector.pbtxt";// 初始化⽹络dnn::Net net = readNetFromTensorflow(modelBinary, modelDesc);net.setPreferableBackend(DNN_BACKEND_OPENCV);net.setPreferableTarget(DNN_TARGET_CPU);if (net.empty()){printf("Load models fail...\n");return;}// 打开摄像头// VideoCapture capture(0);VideoCapture capture("./video/test.mp4");if (!capture.isOpened()) {printf("Don't find video...\n");return;}Mat frame;int count=0;while (capture.read(frame)) {int64 start = getTickCount();if (frame.empty()){break;}// ⽔平镜像调整// flip(frame, frame, 1);imshow("input", frame);if (frame.channels() == 4)cvtColor(frame, frame, COLOR_BGRA2BGR);// 输⼊数据调整Mat inputBlob = blobFromImage(frame, inScaleFactor,Size(inWidth, inHeight), meanVal, false, false);net.setInput(inputBlob, "data");// ⼈脸检测Mat detection = net.forward("detection_out");vector<double> layersTimings;double freq = getTickFrequency() / 1000;double time = net.getPerfProfile(layersTimings) / freq;Mat detectionMat(detection.size[2], detection.size[3], CV_32F, detection.ptr<float>()); ostringstream ss;for (int i = 0; i < detectionMat.rows; i++){// 置信度 0~1之间float confidence = detectionMat.at<float>(i, 2);if (confidence > confidenceThreshold){count++;int xLeftBottom = static_cast<int>(detectionMat.at<float>(i, 3) * frame.cols);int yLeftBottom = static_cast<int>(detectionMat.at<float>(i, 4) * frame.rows);int xRightTop = static_cast<int>(detectionMat.at<float>(i, 5) * frame.cols);int yRightTop = static_cast<int>(detectionMat.at<float>(i, 6) * frame.rows);Rect object((int)xLeftBottom, (int)yLeftBottom,(int)(xRightTop - xLeftBottom),(int)(yRightTop - yLeftBottom));rectangle(frame, object, Scalar(0, 255, 0));ss << confidence;std::string conf(ss.str());std::string label = "Face: " + conf;int baseLine = 0;Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);rectangle(frame, Rect(Point(xLeftBottom, yLeftBottom - labelSize.height),Size(labelSize.width, labelSize.height + baseLine)),Scalar(255, 255, 255), FILLED);putText(frame, label, Point(xLeftBottom, yLeftBottom),FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 0, 0));}}float fps = getTickFrequency() / (getTickCount() - start);ss.str("");ss << "FPS: " << fps << " ; inference time: " << time << " ms";putText(frame, ss.str(), Point(20, 20), 0, 0.75, Scalar(0, 0, 255), 2, 8);imshow("dnn_face_detection", frame);if (waitKey(1) >= 0) break;}printf("total face: %d\n", count);}以上就是本⽂的全部内容,希望对⼤家的学习有所帮助,也希望⼤家多多⽀持。

Python+OpenCV进行人脸面部表情识别

Python+OpenCV进行人脸面部表情识别

Python+OpenCV进⾏⼈脸⾯部表情识别⽬录前⾔⼀、图⽚预处理⼆、数据集划分三、识别笑脸四、Dlib提取⼈脸特征识别笑脸和⾮笑脸前⾔环境搭建可查看数据如下:⼀、图⽚预处理import dlib # ⼈脸识别的库dlibimport numpy as np # 数据处理的库numpyimport cv2 # 图像处理的库OpenCvimport os# dlib预测器detector = dlib.get_frontal_face_detector()predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')# 读取图像的路径path_read = ".\ImageFiles\\files"num=0for file_name in os.listdir(path_read):#aa是图⽚的全路径aa=(path_read +"/"+file_name)#读⼊的图⽚的路径中含⾮英⽂img=cv2.imdecode(np.fromfile(aa, dtype=np.uint8), cv2.IMREAD_UNCHANGED)#获取图⽚的宽⾼img_shape=img.shapeimg_height=img_shape[0]img_width=img_shape[1]# ⽤来存储⽣成的单张⼈脸的路径path_save=".\ImageFiles\\files1"# dlib检测dets = detector(img,1)print("⼈脸数:", len(dets))for k, d in enumerate(dets):if len(dets)>1:continuenum=num+1# 计算矩形⼤⼩# (x,y), (宽度width, ⾼度height)pos_start = tuple([d.left(), d.top()])pos_end = tuple([d.right(), d.bottom()])# 计算矩形框⼤⼩height = d.bottom()-d.top()width = d.right()-d.left()# 根据⼈脸⼤⼩⽣成空的图像img_blank = np.zeros((height, width, 3), np.uint8)for i in range(height):if d.top()+i>=img_height:# 防⽌越界continuefor j in range(width):if d.left()+j>=img_width:# 防⽌越界continueimg_blank[i][j] = img[d.top()+i][d.left()+j]img_blank = cv2.resize(img_blank, (200, 200), interpolation=cv2.INTER_CUBIC)cv2.imencode('.jpg', img_blank)[1].tofile(path_save+"\\"+"file"+str(num)+".jpg") # 正确⽅法运⾏结果:⼆、数据集划分import os, shutil# 原始数据集路径original_dataset_dir = '.\ImageFiles\\files1'# 新的数据集base_dir = '.\ImageFiles\\files2'os.mkdir(base_dir)# 训练图像、验证图像、测试图像的⽬录train_dir = os.path.join(base_dir, 'train')os.mkdir(train_dir)validation_dir = os.path.join(base_dir, 'validation')os.mkdir(validation_dir)test_dir = os.path.join(base_dir, 'test')os.mkdir(test_dir)train_cats_dir = os.path.join(train_dir, 'smile')os.mkdir(train_cats_dir)train_dogs_dir = os.path.join(train_dir, 'unsmile')os.mkdir(train_dogs_dir)validation_cats_dir = os.path.join(validation_dir, 'smile')os.mkdir(validation_cats_dir)validation_dogs_dir = os.path.join(validation_dir, 'unsmile')os.mkdir(validation_dogs_dir)test_cats_dir = os.path.join(test_dir, 'smile')os.mkdir(test_cats_dir)test_dogs_dir = os.path.join(test_dir, 'unsmile')os.mkdir(test_dogs_dir)# 复制1000张笑脸图⽚到train_c_dirfnames = ['file{}.jpg'.format(i) for i in range(1,900)]for fname in fnames:src = os.path.join(original_dataset_dir, fname)dst = os.path.join(train_cats_dir, fname)shutil.copyfile(src, dst)fnames = ['file{}.jpg'.format(i) for i in range(900, 1350)]for fname in fnames:src = os.path.join(original_dataset_dir, fname)dst = os.path.join(validation_cats_dir, fname)shutil.copyfile(src, dst)# Copy next 500 cat images to test_cats_dirfnames = ['file{}.jpg'.format(i) for i in range(1350, 1800)]for fname in fnames:src = os.path.join(original_dataset_dir, fname)dst = os.path.join(test_cats_dir, fname)shutil.copyfile(src, dst)fnames = ['file{}.jpg'.format(i) for i in range(2127,3000)]for fname in fnames:src = os.path.join(original_dataset_dir, fname)dst = os.path.join(train_dogs_dir, fname)shutil.copyfile(src, dst)# Copy next 500 dog images to validation_dogs_dirfnames = ['file{}.jpg'.format(i) for i in range(3000,3304)]for fname in fnames:src = os.path.join(original_dataset_dir, fname)dst = os.path.join(validation_dogs_dir, fname)shutil.copyfile(src, dst)# # Copy next 500 dog images to test_dogs_dir# fnames = ['file{}.jpg'.format(i) for i in range(3000,3878)]# for fname in fnames:# src = os.path.join(original_dataset_dir, fname)# dst = os.path.join(test_dogs_dir, fname)# shutil.copyfile(src, dst)运⾏结果:三、识别笑脸模式构建:#创建模型from keras import layersfrom keras import modelsmodel = models.Sequential()model.add(layers.Conv2D(32, (3, 3), activation='relu',input_shape=(150, 150, 3))) model.add(layers.MaxPooling2D((2, 2)))model.add(layers.Conv2D(64, (3, 3), activation='relu'))model.add(layers.MaxPooling2D((2, 2)))model.add(layers.Conv2D(128, (3, 3), activation='relu'))model.add(layers.MaxPooling2D((2, 2)))model.add(layers.Conv2D(128, (3, 3), activation='relu'))model.add(layers.MaxPooling2D((2, 2)))model.add(layers.Flatten())model.add(layers.Dense(512, activation='relu'))model.add(layers.Dense(1, activation='sigmoid'))model.summary()#查看进⾏归⼀化#归⼀化from keras import optimizerspile(loss='binary_crossentropy',optimizer=optimizers.RMSprop(lr=1e-4),metrics=['acc'])from keras.preprocessing.image import ImageDataGeneratortrain_datagen = ImageDataGenerator(rescale=1./255)validation_datagen=ImageDataGenerator(rescale=1./255)test_datagen = ImageDataGenerator(rescale=1./255)train_generator = train_datagen.flow_from_directory(# ⽬标⽂件⽬录train_dir,#所有图⽚的size必须是150x150target_size=(150, 150),batch_size=20,# Since we use binary_crossentropy loss, we need binary labelsclass_mode='binary')validation_generator = test_datagen.flow_from_directory(validation_dir,target_size=(150, 150),batch_size=20,class_mode='binary')test_generator = test_datagen.flow_from_directory(test_dir,target_size=(150, 150),batch_size=20,class_mode='binary')for data_batch, labels_batch in train_generator:print('data batch shape:', data_batch.shape)print('labels batch shape:', labels_batch)break#'smile': 0, 'unsmile': 1增强数据#数据增强datagen = ImageDataGenerator(rotation_range=40,width_shift_range=0.2,height_shift_range=0.2,shear_range=0.2,zoom_range=0.2,horizontal_flip=True,fill_mode='nearest')#数据增强后图⽚变化import matplotlib.pyplot as plt# This is module with image preprocessing utilitiesfrom keras.preprocessing import imagetrain_smile_dir = './ImageFiles//files2//train//smile/'fnames = [os.path.join(train_smile_dir, fname) for fname in os.listdir(train_smile_dir)] img_path = fnames[3]img = image.load_img(img_path, target_size=(150, 150))x = image.img_to_array(img)x = x.reshape((1,) + x.shape)i = 0for batch in datagen.flow(x, batch_size=1):plt.figure(i)imgplot = plt.imshow(image.array_to_img(batch[0]))i += 1if i % 4 == 0:breakplt.show()创建⽹络:#创建⽹络model = models.Sequential()model.add(layers.Conv2D(32, (3, 3), activation='relu',input_shape=(150, 150, 3))) model.add(layers.MaxPooling2D((2, 2)))model.add(layers.Conv2D(64, (3, 3), activation='relu'))model.add(layers.MaxPooling2D((2, 2)))model.add(layers.Conv2D(128, (3, 3), activation='relu'))model.add(layers.MaxPooling2D((2, 2)))model.add(layers.Conv2D(128, (3, 3), activation='relu'))model.add(layers.MaxPooling2D((2, 2)))model.add(layers.Flatten())model.add(layers.Dropout(0.5))model.add(layers.Dense(512, activation='relu'))model.add(layers.Dense(1, activation='sigmoid'))pile(loss='binary_crossentropy',optimizer=optimizers.RMSprop(lr=1e-4),metrics=['acc'])#归⼀化处理train_datagen = ImageDataGenerator(rescale=1./255,rotation_range=40,width_shift_range=0.2,height_shift_range=0.2,shear_range=0.2,zoom_range=0.2,horizontal_flip=True,)test_datagen = ImageDataGenerator(rescale=1./255)train_generator = train_datagen.flow_from_directory(# This is the target directorytrain_dir,# All images will be resized to 150x150target_size=(150, 150),batch_size=32,# Since we use binary_crossentropy loss, we need binary labelsclass_mode='binary')validation_generator = test_datagen.flow_from_directory(validation_dir,target_size=(150, 150),batch_size=32,class_mode='binary')history = model.fit_generator(train_generator,steps_per_epoch=100,epochs=60,validation_data=validation_generator,validation_steps=50)model.save('smileAndUnsmile1.h5')#数据增强过后的训练集与验证集的精确度与损失度的图形acc = history.history['acc']val_acc = history.history['val_acc']loss = history.history['loss']val_loss = history.history['val_loss']epochs = range(len(acc))plt.plot(epochs, acc, 'bo', label='Training acc')plt.plot(epochs, val_acc, 'b', label='Validation acc')plt.title('Training and validation accuracy')plt.legend()plt.figure()plt.plot(epochs, loss, 'bo', label='Training loss')plt.plot(epochs, val_loss, 'b', label='Validation loss')plt.title('Training and validation loss')plt.legend()plt.show()单张图⽚测试:# 单张图⽚进⾏判断是笑脸还是⾮笑脸import cv2from keras.preprocessing import imagefrom keras.models import load_modelimport numpy as np#加载模型model = load_model('smileAndUnsmile1.h5')#本地图⽚路径img_path='test.jpg'img = image.load_img(img_path, target_size=(150, 150))img_tensor = image.img_to_array(img)/255.0img_tensor = np.expand_dims(img_tensor, axis=0)prediction =model.predict(img_tensor)print(prediction)if prediction[0][0]>0.5:result='⾮笑脸'else:result='笑脸'print(result)摄像头测试:#检测视频或者摄像头中的⼈脸import cv2from keras.preprocessing import imagefrom keras.models import load_modelimport numpy as npimport dlibfrom PIL import Imagemodel = load_model('smileAndUnsmile1.h5')detector = dlib.get_frontal_face_detector()video=cv2.VideoCapture(0)font = cv2.FONT_HERSHEY_SIMPLEXdef rec(img):gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)dets=detector(gray,1)if dets is not None:for face in dets:left=face.left()top=face.top()right=face.right()bottom=face.bottom()cv2.rectangle(img,(left,top),(right,bottom),(0,255,0),2)img1=cv2.resize(img[top:bottom,left:right],dsize=(150,150))img1=cv2.cvtColor(img1,cv2.COLOR_BGR2RGB)img1 = np.array(img1)/255.img_tensor = img1.reshape(-1,150,150,3)prediction =model.predict(img_tensor)if prediction[0][0]>0.5:result='unsmile'else:result='smile'cv2.putText(img, result, (left,top), font, 2, (0, 255, 0), 2, cv2.LINE_AA)cv2.imshow('Video', img)while video.isOpened():res, img_rd = video.read()if not res:breakrec(img_rd)if cv2.waitKey(1) & 0xFF == ord('q'):breakvideo.release()cv2.destroyAllWindows()运⾏结果:四、Dlib提取⼈脸特征识别笑脸和⾮笑脸import cv2 # 图像处理的库 OpenCvimport dlib # ⼈脸识别的库 dlibimport numpy as np # 数据处理的库 numpyclass face_emotion():def __init__(self):self.detector = dlib.get_frontal_face_detector()self.predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")self.cap = cv2.VideoCapture(0)self.cap.set(3, 480)t = 0def learning_face(self):line_brow_x = []line_brow_y = []while(self.cap.isOpened()):flag, im_rd = self.cap.read()k = cv2.waitKey(1)# 取灰度img_gray = cv2.cvtColor(im_rd, cv2.COLOR_RGB2GRAY)faces = self.detector(img_gray, 0)font = cv2.FONT_HERSHEY_SIMPLEX# 如果检测到⼈脸if(len(faces) != 0):# 对每个⼈脸都标出68个特征点for i in range(len(faces)):for k, d in enumerate(faces):cv2.rectangle(im_rd, (d.left(), d.top()), (d.right(), d.bottom()), (0,0,255)) self.face_width = d.right() - d.left()shape = self.predictor(im_rd, d)mouth_width = (shape.part(54).x - shape.part(48).x) / self.face_widthmouth_height = (shape.part(66).y - shape.part(62).y) / self.face_width brow_sum = 0frown_sum = 0for j in range(17, 21):brow_sum += (shape.part(j).y - d.top()) + (shape.part(j + 5).y - d.top()) frown_sum += shape.part(j + 5).x - shape.part(j).xline_brow_x.append(shape.part(j).x)line_brow_y.append(shape.part(j).y)tempx = np.array(line_brow_x)tempy = np.array(line_brow_y)z1 = np.polyfit(tempx, tempy, 1)self.brow_k = -round(z1[0], 3)brow_height = (brow_sum / 10) / self.face_width # 眉⽑⾼度占⽐brow_width = (frown_sum / 5) / self.face_width # 眉⽑距离占⽐eye_sum = (shape.part(41).y - shape.part(37).y + shape.part(40).y - shape.part(38).y +shape.part(47).y - shape.part(43).y + shape.part(46).y - shape.part(44).y)eye_hight = (eye_sum / 4) / self.face_widthif round(mouth_height >= 0.03) and eye_hight<0.56:cv2.putText(im_rd, "smile", (d.left(), d.bottom() + 20), cv2.FONT_HERSHEY_SIMPLEX, 2,(0,255,0), 2, 4)if round(mouth_height<0.03) and self.brow_k>-0.3:cv2.putText(im_rd, "unsmile", (d.left(), d.bottom() + 20), cv2.FONT_HERSHEY_SIMPLEX, 2,(0,255,0), 2, 4)cv2.putText(im_rd, "Face-" + str(len(faces)), (20,50), font, 0.6, (0,0,255), 1, cv2.LINE_AA)else:cv2.putText(im_rd, "No Face", (20,50), font, 0.6, (0,0,255), 1, cv2.LINE_AA)im_rd = cv2.putText(im_rd, "S: screenshot", (20,450), font, 0.6, (255,0,255), 1, cv2.LINE_AA)im_rd = cv2.putText(im_rd, "Q: quit", (20,470), font, 0.6, (255,0,255), 1, cv2.LINE_AA)if (cv2.waitKey(1) & 0xFF) == ord('s'):t += 1cv2.imwrite("screenshoot" + str(t) + ".jpg", im_rd)# 按下 q 键退出if (cv2.waitKey(1)) == ord('q'):break# 窗⼝显⽰cv2.imshow("Face Recognition", im_rd)self.cap.release()cv2.destroyAllWindows()if __name__ == "__main__":my_face = face_emotion()my_face.learning_face()运⾏结果:以上就是Python+OpenCV进⾏⼈脸⾯部表情识别的详细内容,更多关于Python OpenCV 表情识别的资料请关注其它相关⽂章!。

python使用opencv进行人脸识别

python使用opencv进行人脸识别

python使用opencv进行人脸识别欢迎来到小码哥的博客博客搬家啦/RvFZs2cpython使用opencv进行人脸识别环境ubuntu 12.04 LTSpython 2.7.3opencv 2.3.1-7安装依赖sudo apt-get install libopencv-*sudo apt-get install python-opencvsudo apt-get install python-numpy示例代码#!/usr/bin/env python#coding=utf-8import osfrom PIL import Image, ImageDrawimport cvdef detect_object(image):'''检测图片,获取人脸在图片中的坐标'''grayscale = cv.CreateImage((image.width, image.height), 8, 1)cv.CvtColor(image, grayscale, cv.CV_BGR2GRAY)cascade =cv.Load("/usr/share/opencv/haarcascades/haarcascade_frontalfa ce_alt_tree.xml")rect = cv.HaarDetectObjects(grayscale, cascade,cv.CreateMemStorage(), 1.1, 2,cv.CV_HAAR_DO_CANNY_PRUNING, (20,20))result = []for r in rect:result.append((r[0][0], r[0][1], r[0][0]+r[0][2],r[0][1]+r[0][3]))return resultdef process(infile):'''在原图上框出头像并且截取每个头像到单独文件夹''' image = cv.LoadImage(infile);if image:faces = detect_object(image)im = Image.open(infile)path = os.path.abspath(infile)save_path = os.path.splitext(path)[0]+"_face"try:os.mkdir(save_path)except:passif faces:draw = ImageDraw.Draw(im)count = 0for f in faces:count += 1draw.rectangle(f, outline=(255, 0, 0))a = im.crop(f)file_name =os.path.join(save_path,str(count)+".jpg")# print file_namea.save(file_name)drow_save_path = os.path.join(save_path,"out.jpg")im.save(drow_save_path, "JPEG", quality=80) else:print "Error: cannot detect faces on %s" % infileif __name__ == "__main__":process("./opencv_in.jpg")转换效果原图:转换后使用感受对于大部分图像来说,只要是头像是正面的,没有被阻挡,识别基本没问题,准确性还是很高的。

OpenCV人脸识别C++实例代码

OpenCV人脸识别C++实例代码

OpenCV⼈脸识别C++实例代码#include <opencv2/highgui/highgui.hpp>#include <opencv2/imgproc/imgproc.hpp>#include <opencv2/core/core.hpp>#include <opencv2/objdetect/objdetect.hpp>using namespace cv;using namespace std;void detectAndDraw( Mat& img, CascadeClassifier& cascade,CascadeClassifier& nestedCascade,double scale, bool tryflip );int main(){//VideoCapture cap(0); //打开默认摄像头//if(!cap.isOpened())//{// return -1;//}Mat frame;Mat edges;CascadeClassifier cascade, nestedCascade;bool stop = false;//训练好的⽂件名称,放置在可执⾏⽂件同⽬录下cascade.load("D:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt.xml");nestedCascade.load("D:\\opencv\\sources\\data\\haarcascades\\haarcascade_eye.xml");frame = imread("E:\\tmpimg\\hezhao.jpg");detectAndDraw( frame, cascade, nestedCascade,2,0 );waitKey();//while(!stop)//{// cap>>frame;// detectAndDraw( frame, cascade, nestedCascade,2,0 );// if(waitKey(30) >=0)// stop = true;//}return0;}void detectAndDraw( Mat& img, CascadeClassifier& cascade,CascadeClassifier& nestedCascade,double scale, bool tryflip ){int i = 0;double t = 0;//建⽴⽤于存放⼈脸的向量容器vector<Rect> faces, faces2;//定义⼀些颜⾊,⽤来标⽰不同的⼈脸const static Scalar colors[] = {CV_RGB(0,0,255),CV_RGB(0,128,255),CV_RGB(0,255,255),CV_RGB(0,255,0),CV_RGB(255,128,0),CV_RGB(255,255,0),CV_RGB(255,0,0),CV_RGB(255,0,255)} ;//建⽴缩⼩的图⽚,加快检测速度//nt cvRound (double value) 对⼀个double型的数进⾏四舍五⼊,并返回⼀个整型数!Mat gray, smallImg( cvRound (img.rows/scale), cvRound(img.cols/scale), CV_8UC1 );//转成灰度图像,Harr特征基于灰度图cvtColor( img, gray, CV_BGR2GRAY );imshow("灰度",gray);//改变图像⼤⼩,使⽤双线性差值resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR );imshow("缩⼩尺⼨",smallImg);//变换后的图像进⾏直⽅图均值化处理equalizeHist( smallImg, smallImg );imshow("直⽅图均值处理",smallImg);//程序开始和结束插⼊此函数获取时间,经过计算求得算法执⾏时间t = (double)cvGetTickCount();//检测⼈脸//detectMultiScale函数中smallImg表⽰的是要检测的输⼊图像为smallImg,faces表⽰检测到的⼈脸⽬标序列,1.1表⽰//每次图像尺⼨减⼩的⽐例为1.1,2表⽰每⼀个⽬标⾄少要被检测到3次才算是真的⽬标(因为周围的像素和不同的窗⼝⼤//⼩都可以检测到⼈脸),CV_HAAR_SCALE_IMAGE表⽰不是缩放分类器来检测,⽽是缩放图像,Size(30, 30)为⽬标的//最⼩最⼤尺⼨cascade.detectMultiScale( smallImg, faces,1.1, 2, 0//|CV_HAAR_FIND_BIGGEST_OBJECT//|CV_HAAR_DO_ROUGH_SEARCH|CV_HAAR_SCALE_IMAGE,Size(30, 30));//如果使能,翻转图像继续检测if( tryflip ){flip(smallImg, smallImg, 1);imshow("反转图像",smallImg);cascade.detectMultiScale( smallImg, faces2,1.1, 2, 0//|CV_HAAR_FIND_BIGGEST_OBJECT//|CV_HAAR_DO_ROUGH_SEARCH|CV_HAAR_SCALE_IMAGE,Size(30, 30) );for( vector<Rect>::const_iterator r = faces2.begin(); r != faces2.end(); r++ ){faces.push_back(Rect(smallImg.cols - r->x - r->width, r->y, r->width, r->height));}}t = (double)cvGetTickCount() - t;// qDebug( "detection time = %g ms\n", t/((double)cvGetTickFrequency()*1000.) );for( vector<Rect>::const_iterator r = faces.begin(); r != faces.end(); r++, i++ ){Mat smallImgROI;vector<Rect> nestedObjects;Point center;Scalar color = colors[i%8];int radius;double aspect_ratio = (double)r->width/r->height;if( 0.75 < aspect_ratio && aspect_ratio < 1.3 ){//标⽰⼈脸时在缩⼩之前的图像上标⽰,所以这⾥根据缩放⽐例换算回去center.x = cvRound((r->x + r->width*0.5)*scale);center.y = cvRound((r->y + r->height*0.5)*scale);radius = cvRound((r->width + r->height)*0.25*scale);circle( img, center, radius, color, 3, 8, 0 );}elserectangle( img, cvPoint(cvRound(r->x*scale), cvRound(r->y*scale)),cvPoint(cvRound((r->x + r->width-1)*scale), cvRound((r->y + r->height-1)*scale)),color, 3, 8, 0);if( nestedCascade.empty() )continue;smallImgROI = smallImg(*r);//同样⽅法检测⼈眼nestedCascade.detectMultiScale( smallImgROI, nestedObjects,1.1, 2, 0//|CV_HAAR_FIND_BIGGEST_OBJECT//|CV_HAAR_DO_ROUGH_SEARCH//|CV_HAAR_DO_CANNY_PRUNING|CV_HAAR_SCALE_IMAGE,Size(30, 30) );for( vector<Rect>::const_iterator nr = nestedObjects.begin(); nr != nestedObjects.end(); nr++ ) {center.x = cvRound((r->x + nr->x + nr->width*0.5)*scale);center.y = cvRound((r->y + nr->y + nr->height*0.5)*scale);radius = cvRound((nr->width + nr->height)*0.25*scale);circle( img, center, radius, color, 3, 8, 0 );}}imshow( "识别结果", img );}opencv 连接器配置[debug]opencv_ml2413d.libopencv_calib3d2413d.libopencv_contrib2413d.libopencv_core2413d.libopencv_features2d2413d.lib opencv_flann2413d.libopencv_gpu2413d.libopencv_highgui2413d.libopencv_imgproc2413d.libopencv_legacy2413d.libopencv_objdetect2413d.lib opencv_ts2413d.libopencv_video2413d.libopencv_nonfree2413d.libopencv_ocl2413d.libopencv_photo2413d.libopencv_stitching2413d.lib opencv_superres2413d.lib opencv_videostab2413d.lib [release]opencv_ml2413.libopencv_calib3d2413.libopencv_contrib2413.libopencv_core2413.libopencv_features2d2413.lib opencv_flann2413.libopencv_gpu2413.libopencv_highgui2413.libopencv_imgproc2413.libopencv_legacy2413.libopencv_objdetect2413.libopencv_ts2413.libopencv_video2413.libopencv_nonfree2413.libopencv_ocl2413.libopencv_photo2413.libopencv_stitching2413.libopencv_superres2413.libopencv_videostab2413.lib// 根据你的版本批量替换2413版本号。

python基于opencv实现人脸识别

python基于opencv实现人脸识别

python基于opencv实现⼈脸识别将opencv中haarcascade_frontalface_default.xml⽂件下载到本地,我们调⽤它辅助进⾏⼈脸识别。

识别图像中的⼈脸#coding:utf-8import cv2 as cv# 读取原始图像img = cv.imread('face.png')# 调⽤熟悉的⼈脸分类器识别特征类型# ⼈脸 - haarcascade_frontalface_default.xml# ⼈眼 - haarcascade_eye.xml# 微笑 - haarcascade_smile.xmlface_detect = cv.CascadeClassifier('haarcascade_frontalface_default.xml')gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)# 检查⼈脸按照1.1倍放到周围最⼩像素为5face_zone = face_detect.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)print ('识别⼈脸的信息:',face_zone)# 绘制矩形和圆形检测⼈脸for x, y, w, h in face_zone:# 绘制矩形⼈脸区域 thickness表⽰线的粗细cv.rectangle(img, pt1=(x, y), pt2=(x+w, y+h),color=[0,0,255], thickness=2)# 绘制圆形⼈脸区域 radius表⽰半径cv.circle(img, center=(x+w//2, y+h//2), radius=w//2, color=[0,255,0], thickness=2)# 设置图⽚可以⼿动调节⼤⼩dWindow("Easmount-CSDN", 0)# 显⽰图⽚cv.imshow("Easmount-CSDN", img)# 等待显⽰设置任意键退出程序cv.waitKey(0)cv.destroyAllWindows()注意,此算法只能检测正脸,并且任何算法都有⼀定的准确率。

基于OpenCV的人脸识别技术研究与实现(C语言)

基于OpenCV的人脸识别技术研究与实现(C语言)

基于OpenCV的人脸识别技术研究与实现(C语言)一、引言人脸识别技术是近年来备受关注的领域之一,随着计算机视觉和人工智能的发展,人脸识别技术在各个领域得到了广泛的应用。

本文将重点介绍基于OpenCV库的人脸识别技术研究与实现,使用C语言进行编程实现。

二、OpenCV简介OpenCV是一个开源的计算机视觉库,提供了丰富的图像处理和计算机视觉算法。

它支持多种编程语言,包括C++、Python等。

在本文中,我们将使用C语言结合OpenCV库来实现人脸识别技术。

三、人脸检测人脸检测是人脸识别技术的第一步,通过检测输入图像中的人脸位置来进行后续的识别工作。

OpenCV提供了Haar级联分类器来进行人脸检测,我们可以利用该分类器来实现简单而有效的人脸检测功能。

四、人脸特征提取在进行人脸识别之前,需要对检测到的人脸进行特征提取。

常用的方法包括主成分分析(PCA)和线性判别分析(LDA)等。

通过提取人脸的特征向量,可以将其表示为一个高维向量,便于后续的比对和识别。

五、人脸识别算法在得到人脸特征向量后,我们可以使用不同的算法来进行人脸识别。

常见的算法包括最近邻算法(KNN)、支持向量机(SVM)和深度学习方法等。

这些算法可以根据实际需求选择合适的模型进行训练和测试。

六、实验设计与实现在本节中,我们将介绍如何使用OpenCV库和C语言来实现基于人脸识别技术的实验。

首先,我们需要准备训练数据集和测试数据集,并对数据集进行预处理和特征提取。

然后,我们可以选择合适的算法模型进行训练,并对测试数据集进行验证和评估。

七、实验结果与分析通过实验我们可以得到不同算法在人脸识别任务上的表现结果,并对比它们的准确率、召回率等指标。

通过分析实验结果,可以帮助我们选择最适合当前任务需求的人脸识别算法,并对其性能进行优化和改进。

八、应用与展望人脸识别技术在安防监控、人机交互、身份认证等领域有着广泛的应用前景。

未来随着技术的不断发展,人脸识别技术将会变得更加智能化和便捷化,为社会生活带来更多便利。

OpenCV Haar人脸检测的代码分析

OpenCV Haar人脸检测的代码分析

OpenCV Haar人脸检测的代码分析/* cascade or tree of stage classifiers */int flags; /* signature */int count; /* number of stages */CvSize orig_window_size; /* original object size (the cascade is trained for) *//* these two parameters are set by cvSetImagesForHaarClassifierCascade */CvSize real_window_size; /* current object size */double scale; /* current scale */CvHaarStageClassifier* stage_classifier; /* array of stage classifiers */ CvHidHaarClassifierCascade* hid_cascade; /* hidden optimized representation of the cascade, created by cvSetImagesForHaarClassifierCascade */所有的结构都代表一个级联boosted Haar分类器。

级联有下面的等级结构:Cascade:Stage1:Classifier11:Feature11Classifier12:Feature12...Stage2:Classifier21:Feature21......全部等级可以手农构修,也可以应用函数cvLoadHaarClassifierCascade从已有的磁盘文件或嵌进式基中导进。

特征检测用到的函数:cvLoadHaarClassifierCascade从文件中装载练习佳的级联分类器或者从OpenCV中嵌入的分类器数据库中导入CvHaarClassifierCascade* cvLoadHaarClassifierCascade(const char* directory,CvSize orig_window_size );directory :练习的级联分类器的门路orig_window_size:级联分类器训练中采取的检测目标的尺寸。

  1. 1、下载文档前请自行甄别文档内容的完整性,平台不提供额外的编辑、内容补充、找答案等附加服务。
  2. 2、"仅部分预览"的文档,不可在线预览部分如存在完整性等问题,可反馈申请退款(可完整预览的文档不适用该条件!)。
  3. 3、如文档侵犯您的权益,请联系客服反馈,我们会尽快为您处理(人工客服工作时间:9:00-18:30)。

1.打开Microsoft Visual Studio 2008,新建一个Win32控制台项目;
2.配置好项目的包含文件和库文件;
3.将……\OpenCV\data\haarcascades中的haarcascade_frontalface_alt.xml拷贝到所建项目的文件夹中;
4.然后添加代码:
#include"stdafx.h"
#include"cv.h"
#include"highgui.h"
#include<stdio.h>
int_tmain(int argc, _TCHAR* argv[])
{
CvCapture* capture=0;
/*初始化一个视频捕获操作。

告诉底层的捕获api我想从Capture1.avi中捕获图片,底层api将检测并选择相应的解码器并做好准备工作*/
capture = cvCaptureFromFile( "F:\\1.avi"); //设置要读的视频(avi格式)
static CvMemStorage* storage = 0;
static CvHaarClassifierCascade* cascade = 0;
cascade = (CvHaarClassifierCascade*)cvLoad("haarcascade_frontalface_alt.xml",0,0,0);
if( !cascade || !capture )
return -1;
storage = cvCreateMemStorage(0);
/*创建一个窗口,用“Video”作为窗口的标识符*/
cvNamedWindow( "Video",1);
/*如果初始化失败,那么capture为空指针,程序停止,否则进入捕获循环*/
if( capture )
{
for(;;)
{
IplImage* frame = cvQueryFrame( capture );
IplImage* img = NULL;
CvSeq* faces;
if( !frame )
break;
img = cvCloneImage(frame);
img->origin = 0;
if( frame->origin )
cvFlip(img,img);
cvClearMemStorage( storage );
//目标检测
faces = cvHaarDetectObjects( img, cascade, storage,1.1, 2,
CV_HAAR_DO_CANNY_PRUNING, cvSize(20, 20) );
for( int i = 0; i < (faces ? faces->total : 0); i++ )
{
CvRect* r = (CvRect*)cvGetSeqElem( faces, i );
cvRectangle( img, cvPoint(r->x,r->y),
cvPoint(r->x+r->width,r->y+r->height), CV_RGB(255,0,0), 1);
}
cvShowImage( "Video", img );
//设置每帧图像的间隔
Sleep(50);
/*如果你敲了键盘,就退出程序,否则继续捕获下一帧*/
if( cvWaitKey(10)>0 )
break;
}
/*退出之前结束底层api的捕获操作,比如会使得别的程序无法访问已经被它们打开的文件*/ cvReleaseCapture( &capture);
}
/*销毁窗口*/
cvDestroyWindow("Video");
return 0;
}。

相关文档
最新文档