iOS关于openCV学习资料整理

链接OpenCV iOS
  1. 首先当然是下载openvc框架。
  2. 创建项目然后把opencv拖进项目。
  3. 需要将用到opencv的文件后缀改成.mm。
  4. 手动添加opencv框架的框架依赖关系。
  5. 在引入UIKit和Foundation前引用库。
  6. 然后可以开始写代码了。
在OpenCV中,所有的图像处理操作通常在Mat结构上进行。然而,在iOS中,为了在屏幕上渲染图像,它必须是UIImage类的一个实例。要将转换的OpenCV垫成的UIImage我们使用的核芯显卡在iOS中使用框架。以下是Mat和UIImage之间来回转换的代码。
 
//这是UIImage转Mat方法
- (cv::Mat)cvMatFromUIImage:(UIImage *)image
{
  CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
  CGFloat cols = image.size.width;
  CGFloat rows = image.size.height;
  cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels (color channels + alpha)
  CGContextRef contextRef = CGBitmapContextCreate(cvMat.data,                 // Pointer to  data
                                                 cols,                       // Width of bitmap
                                                 rows,                       // Height of bitmap
                                                 8,                          // Bits per component
                                                 cvMat.step[0],              // Bytes per row
                                                 colorSpace,                 // Colorspace
                                                 kCGImageAlphaNoneSkipLast |
                                                 kCGBitmapByteOrderDefault); // Bitmap info flags
  CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
  CGContextRelease(contextRef);
  return cvMat;
}

  

//这是Mat转图片的方法
-(UIImage *)UIImageFromCVMat:(cv::Mat)greyMat
{
    cv::Mat cvMat;
    cv::cvtColor(greyMat, cvMat, cv::COLOR_BGR2GRAY);
    
    
    NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize()*cvMat.total()];
    CGColorSpaceRef colorSpace;
    if (cvMat.elemSize() == 1) {
        colorSpace = CGColorSpaceCreateDeviceGray();
    } else {
        colorSpace = CGColorSpaceCreateDeviceRGB();
    }
    CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
    CGImageRef imageRef = CGImageCreate(cvMat.cols,                                 //width
                                        cvMat.rows,                                 //height
                                        8,                                          //bits per component
                                        8 * cvMat.elemSize(),                       //bits per pixel
                                        cvMat.step[0],                            //bytesPerRow
                                        colorSpace,                                 //colorspace
                                        kCGImageAlphaNone|kCGBitmapByteOrderDefault,// bitmap info
                                        provider,                                   //CGDataProviderRef
                                        NULL,                                       //decode
                                        false,                                      //should interpolate
                                        kCGRenderingIntentDefault                   //intent
                                        );
    UIImage *finalImage = [UIImage imageWithCGImage:imageRef];
    CGImageRelease(imageRef);
    CGDataProviderRelease(provider);
    CGColorSpaceRelease(colorSpace);
    return finalImage;
}

  

//下面的代码可以处理灰度和彩色图像转换(由传入的通道数决定)
cv::Mat greyMat;
cv::cvtColor(inputMat, greyMat, COLOR_BGR2GRAY);

  

//图像 二值化函数
CV_EXPORTS_W void adaptiveThreshold( InputArray src, OutputArray dst,
                                     double maxValue, int adaptiveMethod,
                                     int thresholdType, int blockSize, double C );
                             

  

//// 腐蚀,填充(腐蚀是让黑点变大) 返回Mat
CV_EXPORTS_W Mat getStructuringElement(int shape, Size ksize, Point anchor = Point(-1,-1));

  

//图像中查找轮廓
- (UIImage *)findImageBordleImage:(UIImage *)imag{
    cv::Mat src = [self cvMatFromUIImage:imag];
    cv::Mat src_gray;
    cv::cvtColor(src, src_gray,cv::COLOR_BGR2GRAY);
    cv::blur(src_gray, src_gray, cv::Size(3,3));
    cv::Mat canny_output;
    std::vector<std::vector<Point> > contours;
    std::vector<cv::Vec4i> hierarchy;
    cv::Canny(src_gray, canny_output, 100, 200,3);
    cv::findContours(canny_output, contours, hierarchy,CV_RETR_TREE, cv::CHAIN_APPROX_SIMPLE,cvPoint(0, 0));
    cv::Mat drawing = cv::Mat::zeros(canny_output.size(), CV_8UC3 );
    cv::RNG rng(12345);

    for( size_t i = 0; i< contours.size(); i++ )
    {
        cv::Scalar color = cv::Scalar(rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
        cv::drawContours(drawing, contours, (int)i, color, 2, 8, hierarchy, 0, cv::Point() );
    }
    return [self UIImageFromCVMat:canny_output];
}

  

//合并两张图片
- (UIImage *)image1:(UIImage *)imag1 image2:(UIImage *)imag2 with:(CGFloat)input{
    double beta;
    cv::Mat src1, src2, dst;
    src1 = [self cvMatFromUIImage:imag1];
    src2 = [self cvMatFromUIImage:imag2];
    beta = (1.0 - input);
    addWeighted( src1, input, src2, beta, 0.0, dst);
    return  [self UIImageFromCVMat:dst];
}

  

原文地址:https://www.cnblogs.com/neverMore-face/p/11629784.html