QingfengLee
1/13/2016 - 6:45 AM

opencv sift 特征点检测及匹配样例。资料参见http://www.cnblogs.com/cj695/p/4041478.html 与 http://read.pudn.com/downloads93/ebook/367060/SIFT_match_teaching_m

#include "opencvCmLib.h"
#include <opencv2/opencv.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/features2d.hpp>
#include<opencv2/legacy/legacy.hpp>
#include <opencv2/core/core.hpp>
using namespace std;
using namespace cv;

int main()
{
    //需要检测的内容
    Mat imgObject = imread("../data/henan4.jpg", CV_LOAD_IMAGE_ANYCOLOR);
    //场景
    Mat imgScene = imread("../data/jietu1.JPG", CV_LOAD_IMAGE_ANYCOLOR);
    
    //Step 1: 使用SIFT算子检测特征点  
    SiftFeatureDetector detector;
    vector<KeyPoint> keypointsObject, keypointsScene;
    detector.detect(imgObject, keypointsObject);
    detector.detect(imgScene, keypointsScene);
    cout << "object--number of keypoints: " << keypointsObject.size() << endl;
    cout << "scene--number of keypoints: " << keypointsScene.size() << endl;
    //drawKeypoints(input1, keypoint1, output1);
    
    //Step 2: 使用SIFT算子提取特征(计算特征向量)  
    SiftDescriptorExtractor extractor;
    Mat descriptorsObject, descriptorsScene;
    extractor.compute(imgObject, keypointsObject, descriptorsObject);
    extractor.compute(imgScene, keypointsScene, descriptorsScene);

    //Step 3: 使用FLANN法进行匹配  
    FlannBasedMatcher matcher;
    vector< DMatch > allMatches;
    matcher.match(descriptorsObject, descriptorsScene, allMatches);
    cout << "number of matches before filtering: " << allMatches.size() << endl;

    //-- 计算关键点间的最大最小距离
    double maxDist = std::numeric_limits<double>::min();
    double minDist = std::numeric_limits<double>::max();
    for (int i = 0; i < descriptorsObject.rows; i++)
    {
        double dist = allMatches[i].distance;
        if (dist < minDist)
            minDist = dist;
        if (dist > maxDist)
            maxDist = dist;
    }
    printf("    max dist : %f \n", maxDist);
    printf("    min dist : %f \n", minDist);

    //-- 过滤匹配点,保留好的匹配点(这里采用的标准:distance<3*minDist)  
    vector< DMatch > goodMatches;
    for (int i = 0; i < descriptorsObject.rows; i++)
    {
        if (allMatches[i].distance < 2 * minDist /*2 * minDist*/)
            goodMatches.push_back(allMatches[i]);
    }
    cout << "number of matches after filtering: " << goodMatches.size() << endl;
    if (goodMatches.size() < 4) {
        cout << "good match points less 4" << endl;
        return 0;
    }

    //-- 显示匹配结果  
    Mat resultImg;
    drawMatches(imgObject, keypointsObject, imgScene, keypointsScene,
        goodMatches, resultImg, Scalar::all(-1), Scalar::all(-1), vector<char>(),
        DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS //不显示未匹配的点  
        );

    //-- 输出匹配点的对应关系  
    for (int i = 0; i < goodMatches.size(); i++)
        printf("   good match %d: keypointsObject [%d]  -- keypointsScene [%d]\n", i,
        goodMatches[i].queryIdx, goodMatches[i].trainIdx);


    ///-- Step 4: 使用findHomography找出相应的透视变换  
    vector<Point2f> object;
    vector<Point2f> scene;
    for (size_t i = 0; i < goodMatches.size(); i++)
    {
        //-- 从好的匹配中获取关键点: 匹配关系是关键点间具有的一 一对应关系,可以从匹配关系获得关键点的索引  
        //-- e.g. 这里的goodMatches[i].queryIdx和goodMatches[i].trainIdx是匹配中一对关键点的索引  
        object.push_back(keypointsObject[goodMatches[i].queryIdx].pt);
        scene.push_back(keypointsScene[goodMatches[i].trainIdx].pt);
    }
    Mat H = findHomography(object, scene, CV_RANSAC);

    ///-- Step 5: 使用perspectiveTransform映射点群,在场景中获取目标位置  
    std::vector<Point2f> objCorners(4);
    objCorners[0] = cvPoint(0, 0);
    objCorners[1] = cvPoint(imgObject.cols, 0);
    objCorners[2] = cvPoint(imgObject.cols, imgObject.rows);
    objCorners[3] = cvPoint(0, imgObject.rows);
    std::vector<Point2f> sceneCorners(4);
    perspectiveTransform(objCorners, sceneCorners, H);

    //-- 在被检测到的目标四个角之间划线  
    line(resultImg, sceneCorners[0] + Point2f(imgObject.cols, 0), sceneCorners[1] + Point2f(imgObject.cols, 0), Scalar(0, 255, 0), 4);
    line(resultImg, sceneCorners[1] + Point2f(imgObject.cols, 0), sceneCorners[2] + Point2f(imgObject.cols, 0), Scalar(0, 255, 0), 4);
    line(resultImg, sceneCorners[2] + Point2f(imgObject.cols, 0), sceneCorners[3] + Point2f(imgObject.cols, 0), Scalar(0, 255, 0), 4);
    line(resultImg, sceneCorners[3] + Point2f(imgObject.cols, 0), sceneCorners[0] + Point2f(imgObject.cols, 0), Scalar(0, 255, 0), 4);

    //-- 显示检测结果  
    imshow("detection result", resultImg);

    line(imgScene, sceneCorners[0], sceneCorners[1], Scalar(0, 255, 0), 4);
    line(imgScene, sceneCorners[1], sceneCorners[2], Scalar(0, 255, 0), 4);
    line(imgScene, sceneCorners[2], sceneCorners[3], Scalar(0, 255, 0), 4);
    line(imgScene, sceneCorners[3], sceneCorners[0], Scalar(0, 255, 0), 4);
    imshow("imgScene", imgScene);

    waitKey();
    return 0;
}
#include "opencvCmLib.h"

#include <opencv2/opencv.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/features2d.hpp>
#include<opencv2/legacy/legacy.hpp>

using namespace std;
using namespace cv;

int main()
{
    Mat imgInput1 = imread("beaver.png", CV_LOAD_IMAGE_ANYCOLOR);
    Mat imgInput2 = imread("beaver_xform.png", CV_LOAD_IMAGE_ANYCOLOR);

    //Step 1: 使用SIFT算子检测特征点  
    SiftFeatureDetector detector;
    vector<KeyPoint> keypoints1, keypoints2;
    detector.detect(imgInput1, keypoints1);
    detector.detect(imgInput2, keypoints2);
    cout << "input1--number of keypoints: " << keypoints1.size() << endl;
    cout << "input2--number of keypoints: " << keypoints2.size() << endl;
    Mat output1, output2;
    drawKeypoints(imgInput1, keypoints1, output1);
    imshow("output1",output1);
    drawKeypoints(imgInput2, keypoints2, output2);
    imshow("output2", output2);

    //Step 2: 使用SIFT算子提取特征(计算特征向量)  
    SiftDescriptorExtractor extractor;
    Mat descriptors1, descriptors2;
    extractor.compute(imgInput1, keypoints1, descriptors1);
    extractor.compute(imgInput2, keypoints2, descriptors2);

    //Step 3: 特征点匹配
    BruteForceMatcher<L2<float>> matcher;
    vector<DMatch> matches;
    Mat img_matches;
    matcher.match(descriptors1, descriptors2, matches);

    drawMatches(imgInput1, keypoints1, imgInput2, keypoints2, matches, img_matches);
    imshow("matches", img_matches);

    waitKey();
    return 0;
}
#ifndef _OPENCV_CMLIB_H_
#define _OPENCV_CMLIB_H_

#ifdef _DEBUG
#define lnkLIB(name) name "d"
#else
#define lnkLIB(name) name
#endif

#include <opencv2/opencv.hpp> 
#define CV_VERSION_ID CVAUX_STR(CV_MAJOR_VERSION) CVAUX_STR(CV_MINOR_VERSION) CVAUX_STR(CV_SUBMINOR_VERSION)
#define cvLIB(name) lnkLIB("opencv_" name CV_VERSION_ID)

#pragma comment(lib, cvLIB("core"))
#pragma comment(lib, cvLIB("imgproc"))
#pragma comment(lib, cvLIB("highgui"))
#pragma comment(lib, cvLIB("contrib"))
#pragma comment(lib, cvLIB("nonfree"))
#pragma comment(lib, cvLIB("features2d"))
#pragma comment(lib, cvLIB("legacy"))

#endif //_OPENCV_CMLIB_H_