find object

#include "stdafx.h"
#include "cv.h"
#include "highgui.h"

using namespace cv;

int _tmain(int argc, _TCHAR* argv[])
{
    //const cv::Mat object = cv::imread("lenaface.JPG", 0); //Load as grayscale
    
//const cv::Mat image = cv::imread("lena.jpg", 0);
    const cv::Mat object = cv::imread("box.png"0); //Load as grayscale
    const cv::Mat image = cv::imread("box_in_scene.png"0);
    //1.detect keypoints of object and image using SIFT
    cv::SiftFeatureDetector *detector;
    detector = new SiftFeatureDetector(0.04/5/2.0,0.5);
    //SurfFeatureDetector detector;
    std::vector<cv::KeyPoint> objectKeypoints;
    std::vector<cv::KeyPoint> imageKeypoints;
    detector->detect(object, objectKeypoints);
    //show
    
//cv::Mat output_object;
    
//cv::drawKeypoints(object, objectKeypoints, output_object);
    ////cv::imwrite("output_object.jpg", output_object);
    //cv::namedWindow("object", CV_WINDOW_AUTOSIZE);
    
//cv::imshow("object", output_object);
    
//
    detector->detect(image,imageKeypoints);
    //show
    
//cv::Mat output_image;
    
//cv::drawKeypoints(image, imageKeypoints, output_image);
    ////cv::imwrite("output_image.jpg", output_image);
    //cv::namedWindow("image", CV_WINDOW_AUTOSIZE);
    
//cv::imshow("image", output_image);

    
//2.get Descriptors
    Mat objectDescriptor,imageDescriptor;
    cv::SiftDescriptorExtractor descriptorExtractor;
    //cv::SurfDescriptorExtractor descriptorExtractor;
    descriptorExtractor.compute(object,  objectKeypoints, objectDescriptor);
    descriptorExtractor.compute(image , imageKeypoints, imageDescriptor );

    //3.
    
// Match descriptors of 2 images (find pairs of corresponding points)
    BruteForceMatcher<L2<float>> matcher;// Use FlannBasedMatcher matcher. It is better
    vector<DMatch> matches;
    matcher.match(objectDescriptor, imageDescriptor, matches);


    // Extract pairs of points
    vector<int> pairOfsrcKP(matches.size()), pairOfdstKP(matches.size());
    for( size_t i = 0; i < matches.size(); i++ ){
        pairOfsrcKP[i] = matches[i].queryIdx;
        pairOfdstKP[i] = matches[i].trainIdx;
    }

    vector<Point2f> sPoints; KeyPoint::convert(objectKeypoints, sPoints,pairOfsrcKP);
    vector<Point2f> dPoints; KeyPoint::convert(imageKeypoints, dPoints,pairOfdstKP);

    // Matched pairs of 2D points. Those pairs will be used to calculate homography
    Mat src2Dfeatures;
    Mat dst2Dfeatures;
    Mat(sPoints).copyTo(src2Dfeatures);
    Mat(dPoints).copyTo(dst2Dfeatures);

    // Calculate homography
    vector<uchar> outlierMask;
    
       double h[9];
    //CvMat H = cvMat(3, 3, CV_64F, h);
    Mat H;
    H = findHomography( src2Dfeatures, dst2Dfeatures, outlierMask, RANSAC, 3);
    Mat outimg;
    drawMatches(object, objectKeypoints,image, imageKeypoints, matches, outimg, Scalar::all(-1), Scalar::all(-1),
        reinterpret_cast<const vector<char>&> (outlierMask));
    //imshow("Matches: Src image (left) to dst (right)", outimg);

    
//

    CvPoint src_corners[4] = {{0,0}, {object.cols,0}, {object.cols, object.rows}, {0object.rows}};
    CvPoint dst_corners[4];

    int k = 0;
    for(int i = 0; i < H.rows; i++)
    for(int j = 0; j < H.cols; j++)
        h[k++]= H.at<double>(i,j);

    forint i = 0; i < 4; i++ )
    {
        double x = src_corners[i].x, y = src_corners[i].y;
        double Z = 1./(h[6]*x + h[7]*y + h[8]);
        double X = (h[0]*x + h[1]*y + h[2])*Z;
        double Y = (h[3]*x + h[4]*y + h[5])*Z;
        dst_corners[i] = cvPoint(cvRound(X), cvRound(Y));
    }
    Mat lineBox(outimg);
    forint i = 0; i < 4; i++ )
    {
        CvPoint r1 = dst_corners[i%4];
        CvPoint r2 = dst_corners[(i+1)%4];
        line( lineBox, cvPoint(r1.x+object.cols, r1.y ), cvPoint(r2.x+object.cols , r2.y), Scalar(0,125,255), 3, CV_AA);
    }
    imshow("Matches: Src image (left) to dst (right)", lineBox);
    waitKey();
}
原文地址:https://www.cnblogs.com/smartvessel/p/2222079.html