OPENCV(6) —— 角点检测

图像特征的类型通常指边界、角点(兴趣点)、斑点(兴趣区域)。角点就是图像的一个局部特征,应用广泛。harris角点检测是一种直接基于灰度图像的角点提取算法,稳定性高,尤其对L型角点检测精度高,但由于采用了高斯滤波,运算速度相对较慢,角点信息有丢失和位置偏移的现象,而且角点提取有聚簇现象。

#include "stdafx.h"

#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"

using namespace cv;

void readme();

/** @function main */
int main( int argc, char** argv )
{
    /*
    if( argc != 3 )
    { readme(); return -1; } */


    Mat img_1 = imread( "zhang.jpg", CV_LOAD_IMAGE_GRAYSCALE );
    Mat img_2 = imread( "guo.jpg", CV_LOAD_IMAGE_GRAYSCALE );

    if( !img_1.data || !img_2.data )
    { std::cout<< " --(!) Error reading images " << std::endl; return -1; }

    //-- Step 1: Detect the keypoints using SURF Detector
    int minHessian = 400;

    SurfFeatureDetector detector( minHessian );

    std::vector<KeyPoint> keypoints_1, keypoints_2;

    detector.detect( img_1, keypoints_1 );    // 特征点向量
    detector.detect( img_2, keypoints_2 );

    //-- Draw keypoints
    Mat img_keypoints_1; Mat img_keypoints_2;

    drawKeypoints( img_1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
    drawKeypoints( img_2, keypoints_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT );

    //-- Show detected (drawn) keypoints
    imshow("Keypoints 1", img_keypoints_1 );
    imshow("Keypoints 2", img_keypoints_2 );

    waitKey(0);

    return 0;
}

/** @function readme */
void readme()
{ std::cout << " Usage: ./SURF_detector <img1> <img2>" << std::endl; }

检测keypoints点的检测器是SURF,获取描述子也是用到SURF来描述,而用到的匹配器是FlannBased,最后通过findHomography寻找单映射矩阵,perspectiveTransform获得最终的目标

findHomography 函数是求两幅图像的单应性矩阵,它是一个3*3的矩阵

#include "stdafx.h"
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <opencv2calib3dcalib3d.hpp>

using namespace cv;

void readme();

int main( int argc, char** argv )
{

    /*
  if( argc != 3 )
  { return -1; }*/

  Mat img_1 = imread( "test1.jpg", CV_LOAD_IMAGE_GRAYSCALE );
  Mat img_2 = imread( "test2.jpg", CV_LOAD_IMAGE_GRAYSCALE );
  
  if( !img_1.data || !img_2.data )
  { return -1; }

  //-- Step 1: Detect the keypoints using SURF Detector
  int minHessian = 400;

  SurfFeatureDetector detector( minHessian );

  std::vector<KeyPoint> keypoints_1, keypoints_2;

  detector.detect( img_1, keypoints_1 );
  detector.detect( img_2, keypoints_2 );    // 角点集合 —— 数目确定

  //-- Step 2: Calculate descriptors (feature vectors)
  SurfDescriptorExtractor extractor;    // 角点描述子

  Mat descriptors_1, descriptors_2;

  extractor.compute( img_1, keypoints_1, descriptors_1 );
  extractor.compute( img_2, keypoints_2, descriptors_2 );

  /*
  //-- Step 3: Matching descriptor vectors with a brute force matcher
  BruteForceMatcher< L2<float> > matcher;
  std::vector< DMatch > matches;
  matcher.match( descriptors_1, descriptors_2, matches );

  //-- Draw matches
  Mat img_matches;
  drawMatches( img_1, keypoints_1, img_2, keypoints_2, matches, img_matches ); 

  //-- Show detected matches
  imshow("Matches", img_matches );
  */

  
  //-- Step 3: Matching descriptor vectors using FLANN matcher
  FlannBasedMatcher matcher;
  std::vector< DMatch > matches;
  matcher.match( descriptors_1, descriptors_2, matches );

  double max_dist = 0; double min_dist = 100;

  //-- Quick calculation of max and min distances between keypoints
  for( int i = 0; i < descriptors_1.rows; i++ )
  { 
        double dist = matches[i].distance;
        if( dist < min_dist ) min_dist = dist;
        if( dist > max_dist ) max_dist = dist;
  }

  printf("-- Max dist : %f 
", max_dist );
  printf("-- Min dist : %f 
", min_dist );

  //-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )  ——  阈值
  //-- PS.- radiusMatch can also be used here.
  std::vector< DMatch > good_matches;

  for( int i = 0; i < descriptors_1.rows; i++ )
  { 
      if( matches[i].distance < 2*min_dist )
        { 
            good_matches.push_back( matches[i]);    // 在匹配源头限制
        }
  }  

  //-- Draw only "good" matches
  Mat img_matches;
  drawMatches( img_1, keypoints_1, img_2, keypoints_2, 
      good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), 
      vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); 

  //-- Show detected matches
  imshow( "Good Matches", img_matches );

  //-- Localize the object from img_1 in img_2 
  std::vector<Point2f> obj;
  std::vector<Point2f> scene;

  for( int i = 0; i < good_matches.size(); i++ )
  {
      //-- Get the keypoints from the good matches
      obj.push_back( keypoints_1[ good_matches[i].queryIdx ].pt );
      scene.push_back( keypoints_2[ good_matches[i].trainIdx ].pt ); 
  }

  Mat H = findHomography( obj, scene, CV_RANSAC );    // findHomography 函数是求两幅图像的单应性矩阵,它是一个3*3的矩阵

  //-- Get the corners from the image_1 ( the object to be "detected" )
  Point2f obj_corners[4] = { cvPoint(0,0), cvPoint( img_1.cols, 0 ), cvPoint( img_1.cols, img_1.rows ), cvPoint( 0, img_1.rows ) };
  Point scene_corners[4];

  //-- Map these corners in the scene ( image_2)
  for( int i = 0; i < 4; i++ )
  {
      double x = obj_corners[i].x; 
      double y = obj_corners[i].y;

      double Z = 1./( H.at<double>(2,0)*x + H.at<double>(2,1)*y + H.at<double>(2,2) );
      double X = ( H.at<double>(0,0)*x + H.at<double>(0,1)*y + H.at<double>(0,2) )*Z;
      double Y = ( H.at<double>(1,0)*x + H.at<double>(1,1)*y + H.at<double>(1,2) )*Z;
      scene_corners[i] = cvPoint( cvRound(X) + img_1.cols, cvRound(Y) );
  }  

  //-- Draw lines between the corners (the mapped object in the scene - image_2 )
  line( img_matches, scene_corners[0], scene_corners[1], Scalar(0, 255, 0), 2 );
  line( img_matches, scene_corners[1], scene_corners[2], Scalar( 0, 255, 0), 2 );
  line( img_matches, scene_corners[2], scene_corners[3], Scalar( 0, 255, 0), 2 );
  line( img_matches, scene_corners[3], scene_corners[0], Scalar( 0, 255, 0), 2 );

  //-- Show detected matches
  imshow( "Good Matches & Object detection", img_matches );
  

  waitKey(0);

  return 0;
}

/**
 * @function readme
 */
void readme()
{ std::cout << " Usage: ./SURF_descriptor <img1> <img2>" << std::endl; }

利用findHomography函数利用匹配的关键点找出相应的变换,再利用perspectiveTransform函数映射点群。

转自:http://blog.csdn.net/yang_xian521/article/details/6901762

原文地址:https://www.cnblogs.com/sprint1989/p/4071468.html