ios OpenCv的配置和人脸识别技术

作为一个好奇心非常重的人,面对未知的世界都想去一探到底。

于是做了个人脸识别的demo。

眼下国内的关于opencv技术文章非常少。都是互相抄袭。关键是抄个一小部分还不全。时间又是非常久之前的了,和如今的一些东西对不上。

没事,我是个实在人,啥也不多说,直接上開始。期间參考了国内很多opencv的文章,代码部分參考http://m.blog.csdn.net/blog/u013810454/27868973。大家能够查看。只是他那个项目下载下来有问题。

我这个融合了全部的长处,更加全面。从配置到使用。

首先我们来配置opencv在xcodeproject。

1.opencv官网下载ios下的框架,先把opencv2.framework下载下来。

然后直接拖到先前创建好的工程中。


然后


然后


如今主要的配置已经完毕。是时候展现真正的技术了。当然别忘记把.m改成.mm。以便使用c++。

#import "ViewController.h"
#import <Foundation/Foundation.h>


int currentvalue = 9;

@interface ViewController ()<UIImagePickerControllerDelegate,UINavigationControllerDelegate>

{
    //显示图片
    UIImageView *_imageView;
    UIImage *image;
}
@end

@implementation ViewController

- (void)viewDidLoad {
    [super viewDidLoad];
    // Do any additional setup after loading the view.
    [self createButton];
    //创建一个UIImagePickerController对象
    UIImagePickerController *ctrl = [[UIImagePickerController alloc] init];
    //设置类型
    ctrl.sourceType = UIImagePickerControllerSourceTypePhotoLibrary;
    //设置代理
    ctrl.delegate = self;
    
    //显示
    [self presentViewController:ctrl animated:YES completion:nil];
    
    self.view.backgroundColor = [UIColor whiteColor];
    
    
    //创建一个UIImageView,用来显示选择的图片
    _imageView = [[UIImageView alloc] initWithFrame:CGRectMake(50, 100, 300, 400)];
    [self.view addSubview:_imageView];
    
}



#pragma mark -  UIImagePickerController代理
-(void)imagePickerController:(UIImagePickerController *)picker didFinishPickingMediaWithInfo:(NSDictionary *)info
{
    //取到选择的图片
    image = info[UIImagePickerControllerOriginalImage];

    UIImageOrientation imageOrientation=image.imageOrientation;
    if(imageOrientation!=UIImageOrientationUp)
    {
        // 原始图片能够依据照相时的角度来显示,但UIImage无法判定,于是出现获取的图片会向左转90度的现象。
        // 下面为调整图片角度的部分
        UIGraphicsBeginImageContext(image.size);
        [image drawInRect:CGRectMake(0, 0, image.size.width, image.size.height)];
        image = UIGraphicsGetImageFromCurrentImageContext();
        UIGraphicsEndImageContext();
        // 调整图片角度完成
    }
    //处理图片
    _imageView.image = image;
    
    [picker dismissViewControllerAnimated:YES completion:nil];
    
    
}

-(void)imagePickerControllerDidCancel:(UIImagePickerController *)picker
{
    [picker dismissViewControllerAnimated:YES completion:nil];
}

//把image转化成opencv的图片格式
- (IplImage *)CreateIplImageFromUIImage:(UIImage *)image {
    CGImageRef imageRef = image.CGImage;
    
    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
    IplImage *iplimage = cvCreateImage(cvSize(image.size.width, image.size.height), IPL_DEPTH_8U, 4);
    CGContextRef contextRef = CGBitmapContextCreate(iplimage->imageData, iplimage->width, iplimage->height,
                                                    iplimage->depth, iplimage->widthStep,
                                                    colorSpace, kCGImageAlphaPremultipliedLast|kCGBitmapByteOrderDefault);
    CGContextDrawImage(contextRef, CGRectMake(0, 0, image.size.width, image.size.height), imageRef);
    CGContextRelease(contextRef);
    CGColorSpaceRelease(colorSpace);
    
    IplImage *ret = cvCreateImage(cvGetSize(iplimage), IPL_DEPTH_8U, 3);
    cvCvtColor(iplimage, ret, CV_RGBA2BGR);
    cvReleaseImage(&iplimage);
    
    return ret;
}
- (void) opencvFaceDetect  {

    UIImage* img = [image copy];
    if(img) {

        
        cvSetErrMode(CV_ErrModeParent);
        IplImage *image = [self CreateIplImageFromUIImage:img];
        
        IplImage *grayImg = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1); //先转为灰度图
        cvCvtColor(image, grayImg, CV_BGR2GRAY);
        
        //将输入图像缩小4倍以加快处理速度
        int scale = 4;
        IplImage *small_image = cvCreateImage(cvSize(image->width/scale,image->height/scale), IPL_DEPTH_8U, 1);
        cvResize(grayImg, small_image);
        
        //载入分类器
        NSString *path = [[NSBundle mainBundle] pathForResource:@"haarcascade_frontalface_alt2" ofType:@"xml"];
        CvHaarClassifierCascade* cascade = (CvHaarClassifierCascade*)cvLoad([path cStringUsingEncoding:NSASCIIStringEncoding], NULL, NULL, NULL);
        CvMemStorage* storage = cvCreateMemStorage(0);
        cvClearMemStorage(storage);
        
        //关键部分。使用cvHaarDetectObjects进行检測。得到一系列方框
        CvSeq* faces = cvHaarDetectObjects(small_image, cascade, storage ,1.1, currentvalue, CV_HAAR_DO_CANNY_PRUNING, cvSize(0,0), cvSize(0, 0));
        
        NSLog(@"faces:%d",faces->total);

        
        //创建画布将人脸部分标记出
        CGImageRef imageRef = img.CGImage;
        CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
        CGContextRef contextRef = CGBitmapContextCreate(NULL, img.size.width, img.size.height,8, img.size.width * 4,colorSpace, kCGImageAlphaPremultipliedLast|kCGBitmapByteOrderDefault);
        
        CGContextDrawImage(contextRef, CGRectMake(0, 0, img.size.width, img.size.height), imageRef);
        
        CGContextSetLineWidth(contextRef, 4);
        CGContextSetRGBStrokeColor(contextRef, 1.0, 0.0, 0.0, 1);
        
        //对人脸进行标记
        for(int i = 0; i < faces->total; i++) {
            // Calc the rect of faces
            CvRect cvrect = *(CvRect*)cvGetSeqElem(faces, i);
            CGRect face_rect = CGContextConvertRectToDeviceSpace(contextRef, CGRectMake(cvrect.x*scale, cvrect.y*scale , cvrect.width*scale, cvrect.height*scale));
            

                CGContextStrokeRect(contextRef, face_rect);
            
        
        }
        
       _imageView.image = [UIImage imageWithCGImage:CGBitmapContextCreateImage(contextRef)];
        

    }
    
}


//检測略耗时,开一个新线程来处理吧
-(void)btn
{
    [NSThread detachNewThreadSelector:@selector(opencvFaceDetect) toTarget:self withObject:nil];
}



-(void)createButton
{
    
    
    UIButton *btn = [[UIButton alloc]init];
    btn.backgroundColor = [UIColor redColor];
    btn.frame = CGRectMake(0, 100, 30, 30);
    [btn addTarget:self action:@selector(btn) forControlEvents:UIControlEventTouchUpInside];
    [self.view addSubview:btn];
    
}
@end

ok,如今能够检測人脸了。


是不是非常奇妙。非常好玩?赶紧动手试一试吧。

原文地址:https://www.cnblogs.com/lxjshuju/p/7029087.html