OpenCV:如何使用AffineTransformer

问题描述 投票:0回答:1

您好,感谢您的帮助。

我想在OpenCV中测试使用形状进行匹配,并设法做匹配部分。为了找到旋转的形状,我认为AffineTransformer Class将是正确的选择。由于我不知道匹配如何在内部工作,如果有人有描述程序的链接会很好。

正如肖申克提到我的下面的代码抛出一个断言失败错误,因为传递给estimateTransformation函数时变量匹配是空的。有没有人知道如何以正确的方式使用这个功能 - 它究竟是什么呢?

#include<opencv2/opencv.hpp>
#include<algorithm>
#include<iostream>
#include<string>
#include<opencv2/highgui/highgui.hpp>

using namespace std;
using namespace cv;

bool rotateImage(Mat src, Mat &dst, double angle)
{
    // get rotation matrix for rotating the image around its center
    cv::Point2f center(src.cols/2.0, src.rows/2.0);
    cv::Mat rot = cv::getRotationMatrix2D(center, angle, 1.0);
    // determine bounding rectangle
    cv::Rect bbox = cv::RotatedRect(center,src.size(), angle).boundingRect();
    // adjust transformation matrix
    rot.at<double>(0,2) += bbox.width/2.0 - center.x;
    rot.at<double>(1,2) += bbox.height/2.0 - center.y;

    cv::warpAffine(src, dst, rot, bbox.size());
    return 1;
}


static vector<Point> sampleContour( const Mat& image, int n=300 )
{

    vector<vector<Point>> contours;           
    vector<Point> all_points;                      
    findContours(image, contours, cv::RETR_LIST, cv::CHAIN_APPROX_NONE);
    for (size_t i=0; i <contours.size(); i++)
    {
        for (size_t j=0; j<contours[i].size(); j++)
        {
           all_points.push_back(contours[i][j]);
        }
    }

    int dummy=0;
    for (int add=(int)all_points.size(); add<n; add++)
    {
        all_points.push_back(all_points[dummy++]);
    }

    // shuffel
    random_shuffle(all_points.begin(), all_points.end());
    vector<Point> sampled;
    for (int i=0; i<n; i++)
    {
        sampled.push_back(all_points[i]);
    }
    return sampled;
}


int main(void)
{
    Mat img1, img2;
    vector<Point> img1Points, img2Points;
    float distSC, distHD;

    // read images
    string img1Path = "testimage.jpg";
    img1 = imread(img1Path, IMREAD_GRAYSCALE);
    rotateImage(img1, img2, 45);
    imshow("original", img1);
    imshow("transformed", img2);
    waitKey();

    // Contours
    img1Points = sampleContour(img1);
    img2Points = sampleContour(img2);

    //Calculate Distances
    Ptr<ShapeContextDistanceExtractor> mysc = createShapeContextDistanceExtractor();
    Ptr<HausdorffDistanceExtractor> myhd = createHausdorffDistanceExtractor();

    distSC = mysc->computeDistance( img1Points, img2Points );
    distHD = myhd -> computeDistance( img1Points, img2Points );

    cout << distSC << endl << distHD << endl;
    vector<DMatch> matches;
    Ptr<AffineTransformer> transformerHD = createAffineTransformer(0);
    transformerHD -> estimateTransformation(img1Points, img2Points, matches);
    return 0;
}
opencv transformation matching shapes
1个回答
2
投票

我在2D图像上使用了AffineTransformer类。下面是基本代码,可以让您了解它的作用。

// My OpenCv AffineTransformer demo code
// I have tested this on a 500 x 500 resolution image
#include <iostream>
#include "opencv2/opencv.hpp"
#include <vector>
using namespace cv;
using namespace std;

int arrSize = 10;
int sourcePx[]={154,155,159,167,182,209,238,265,295,316};
int sourcePy[]={190,222,252,285,314,338,344,340,321,290};
int tgtPx[]={120,127,137,150,188,230,258,285,305,313};
int tgtPy[]={207,245,275,305,336,345,342,332,305,274};

int main()
{
    // Prepare 'vector of points' from above hardcoded points
    int sInd=0, eInd=arrSize;
    vector<Point2f> sourceP; for(int i=sInd; i<eInd; i++) sourceP.push_back(Point2f(sourcePx[i], sourcePy[i]));
    vector<Point2f> tgtP; for(int i=sInd; i<eInd; i++) tgtP.push_back(Point2f(tgtPx[i], tgtPy[i]));

    // Create object of AffineTransformer
    bool fullAffine = true; // change its value and see difference in result
    auto aft = cv::createAffineTransformer(fullAffine);

    // Prepare vector<cv::DMatch> - this is just mapping of corresponding points indices
    std::vector<cv::DMatch> matches;
    for(int i=0; i<sourceP.size(); ++i) matches.push_back(cv::DMatch(i, i, 0));

    // Read image
    Mat srcImg = imread("image1.jpg");
    Mat tgtImg;

    // estimate points transformation
    aft->estimateTransformation(sourceP, tgtP, matches);
    // apply transformation
    aft->applyTransformation(sourceP, tgtP);
    // warp image
    aft->warpImage(srcImg, tgtImg);
    // show generated output
    imshow("warped output", tgtImg);
    waitKey(0);

    return 0;
}
© www.soinside.com 2019 - 2024. All rights reserved.