OpenCV:calibrateHandEye 函数结果错误

问题描述 投票:0回答:1

我正在开发一个机器人应用程序,其中我将一个摄像头固定在机器人抓手上。为了计算相机和夹具 Hcg 之间的矩阵变换,我使用 OpenCV 版本 4.1.0 中提供的 calibrateHandEye 新函数

我用安装在夹具上的相机拍摄了 10 张棋盘照片,同时记录了机器人的位置。

我正在编写的代码:

// My_handeye.cpp : This file contains the 'main' function. Program execution begins and ends there.
//
#include <iostream>
#include <sstream>
#include <string>
#include <ctime>
#include <cstdio>
#include "pch.h"

#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#include <opencv2/core/utility.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>

using namespace cv;
using namespace std;

Mat eulerAnglesToRotationMatrix(Vec3f &theta);
Vec3f rotationMatrixToEulerAngles(Mat &R);
float rad2deg(float radian);
float deg2rad(float degree);

int main()
{

    // Camera calibration information

    std::vector<double> distortionCoefficients(5);  // camera distortion
    distortionCoefficients[0] = 2.4472856611074989e-01;
    distortionCoefficients[1] = -8.1042032574246325e-01;
    distortionCoefficients[2] = 0;
    distortionCoefficients[3] = 0;
    distortionCoefficients[4] = 7.8769462320821060e-01;

    double f_x = 1.3624172121852105e+03; // Focal length in x axis
    double f_y = 1.3624172121852105e+03; // Focal length in y axis (usually the same?)
    double c_x = 960; // Camera primary point x
    double c_y = 540; // Camera primary point y

    cv::Mat cameraMatrix(3, 3, CV_32FC1);
    cameraMatrix.at<float>(0, 0) = f_x;
    cameraMatrix.at<float>(0, 1) = 0.0;
    cameraMatrix.at<float>(0, 2) = c_x;
    cameraMatrix.at<float>(1, 0) = 0.0;
    cameraMatrix.at<float>(1, 1) = f_y;
    cameraMatrix.at<float>(1, 2) = c_y;
    cameraMatrix.at<float>(2, 0) = 0.0;
    cameraMatrix.at<float>(2, 1) = 0.0;
    cameraMatrix.at<float>(2, 2) = 1.0;

    Mat rvec(3, 1, CV_32F), tvec(3, 1, CV_32F);
    //

    std::vector<Mat> R_gripper2base;
    std::vector<Mat> t_gripper2base;
    std::vector<Mat> R_target2cam;
    std::vector<Mat> t_target2cam;
    Mat R_cam2gripper = (Mat_<float>(3, 3));
    Mat t_cam2gripper = (Mat_<float>(3, 1));

    vector<String> fn;
    glob("images/*.bmp", fn, false);

    vector<Mat> images;
    size_t num_images = fn.size(); //number of bmp files in images folder
    Size patternsize(6, 8); //number of centers
    vector<Point2f> centers; //this will be filled by the detected centers
    float cell_size = 30;
    vector<Point3f> obj_points;

    R_gripper2base.reserve(num_images);
    t_gripper2base.reserve(num_images);
    R_target2cam.reserve(num_images);
    t_target2cam.reserve(num_images);

    for (int i = 0; i < patternsize.height; ++i)
        for (int j = 0; j < patternsize.width; ++j)
            obj_points.push_back(Point3f(float(j*cell_size),
                float(i*cell_size), 0.f));

    for (size_t i = 0; i < num_images; i++)
        images.push_back(imread(fn[i]));

    Mat frame;

    for (size_t i = 0; i < num_images; i++)
    {
        frame = imread(fn[i]); //source image
        bool patternfound = findChessboardCorners(frame, patternsize, centers);
        if (patternfound)
        {
            drawChessboardCorners(frame, patternsize, Mat(centers), patternfound);
            //imshow("window", frame);
            //int key = cv::waitKey(0) & 0xff;
            solvePnP(Mat(obj_points), Mat(centers), cameraMatrix, distortionCoefficients, rvec, tvec);

            Mat R;
            Rodrigues(rvec, R); // R is 3x3
            R_target2cam.push_back(R);
            t_target2cam.push_back(tvec);
            Mat T = Mat::eye(4, 4, R.type()); // T is 4x4
            T(Range(0, 3), Range(0, 3)) = R * 1; // copies R into T
            T(Range(0, 3), Range(3, 4)) = tvec * 1; // copies tvec into T

            cout << "T = " << endl << " " << T << endl << endl;

        }
        cout << patternfound << endl;
    }

    Vec3f theta_01{ deg2rad(-153.61), deg2rad(8.3),   deg2rad(-91.91) };
    Vec3f theta_02{ deg2rad(-166.71), deg2rad(3.04),  deg2rad(-93.31) };
    Vec3f theta_03{ deg2rad(-170.04), deg2rad(24.92), deg2rad(-88.29) };
    Vec3f theta_04{ deg2rad(-165.71), deg2rad(24.68), deg2rad(-84.85) };
    Vec3f theta_05{ deg2rad(-160.18), deg2rad(-15.94),deg2rad(-56.24) };
    Vec3f theta_06{ deg2rad(175.68),  deg2rad(10.95), deg2rad(180) };
    Vec3f theta_07{ deg2rad(175.73),  deg2rad(45.78), deg2rad(-179.92) };
    Vec3f theta_08{ deg2rad(-165.34), deg2rad(47.37), deg2rad(-166.25) };
    Vec3f theta_09{ deg2rad(-165.62), deg2rad(17.95), deg2rad(-166.17) };
    Vec3f theta_10{ deg2rad(-151.99), deg2rad(-14.59),deg2rad(-94.19) };

    Mat robot_rot_01 = eulerAnglesToRotationMatrix(theta_01);
    Mat robot_rot_02 = eulerAnglesToRotationMatrix(theta_02);
    Mat robot_rot_03 = eulerAnglesToRotationMatrix(theta_03);
    Mat robot_rot_04 = eulerAnglesToRotationMatrix(theta_04);
    Mat robot_rot_05 = eulerAnglesToRotationMatrix(theta_05);
    Mat robot_rot_06 = eulerAnglesToRotationMatrix(theta_06);
    Mat robot_rot_07 = eulerAnglesToRotationMatrix(theta_07);
    Mat robot_rot_08 = eulerAnglesToRotationMatrix(theta_08);
    Mat robot_rot_09 = eulerAnglesToRotationMatrix(theta_09);
    Mat robot_rot_10 = eulerAnglesToRotationMatrix(theta_10);

    const Mat robot_tr_01 = (Mat_<float>(3, 1) << 781.2, 338.59, 903.48);
    const Mat robot_tr_02 = (Mat_<float>(3, 1) << 867.65, 382.52, 884.42);
    const Mat robot_tr_03 = (Mat_<float>(3, 1) << 856.91, 172.99, 964.61);
    const Mat robot_tr_04 = (Mat_<float>(3, 1) << 748.81, 146.75, 1043.29);
    const Mat robot_tr_05 = (Mat_<float>(3, 1) << 627.66, 554.08, 920.85);
    const Mat robot_tr_06 = (Mat_<float>(3, 1) << 715.06, 195.96, 889.38);
    const Mat robot_tr_07 = (Mat_<float>(3, 1) << 790.9, 196.29, 1117.38);
    const Mat robot_tr_08 = (Mat_<float>(3, 1) << 743.5, 283.93, 1131.92);
    const Mat robot_tr_09 = (Mat_<float>(3, 1) << 748.9, 288.19, 910.58);
    const Mat robot_tr_10 = (Mat_<float>(3, 1) << 813.18, 400.44, 917.16);

    R_gripper2base.push_back(robot_rot_01);
    R_gripper2base.push_back(robot_rot_02);
    R_gripper2base.push_back(robot_rot_03);
    R_gripper2base.push_back(robot_rot_04);
    R_gripper2base.push_back(robot_rot_05);
    R_gripper2base.push_back(robot_rot_06);
    R_gripper2base.push_back(robot_rot_07);
    R_gripper2base.push_back(robot_rot_08);
    R_gripper2base.push_back(robot_rot_09);
    R_gripper2base.push_back(robot_rot_10);

    t_gripper2base.push_back(robot_tr_01);
    t_gripper2base.push_back(robot_tr_02);
    t_gripper2base.push_back(robot_tr_03);
    t_gripper2base.push_back(robot_tr_04);
    t_gripper2base.push_back(robot_tr_05);
    t_gripper2base.push_back(robot_tr_06);
    t_gripper2base.push_back(robot_tr_07);
    t_gripper2base.push_back(robot_tr_08);
    t_gripper2base.push_back(robot_tr_09);
    t_gripper2base.push_back(robot_tr_10);

    calibrateHandEye(R_gripper2base, t_gripper2base, R_target2cam, t_target2cam, R_cam2gripper, t_cam2gripper, CALIB_HAND_EYE_TSAI);

    Vec3f R_cam2gripper_r = rotationMatrixToEulerAngles(R_cam2gripper);

    cout << "R_cam2gripper = " << endl << " " << R_cam2gripper << endl << endl;
    cout << "R_cam2gripper_r = " << endl << " " << R_cam2gripper_r << endl << endl;
    cout << "t_cam2gripper = " << endl << " " << t_cam2gripper << endl << endl;
}

Mat eulerAnglesToRotationMatrix(Vec3f &theta)
{
    // Calculate rotation about x axis
    Mat R_x = (Mat_<double>(3, 3) <<
        1, 0, 0,
        0, cos(theta[0]), -sin(theta[0]),
        0, sin(theta[0]), cos(theta[0])
        );

    // Calculate rotation about y axis
    Mat R_y = (Mat_<double>(3, 3) <<
        cos(theta[1]), 0, sin(theta[1]),
        0, 1, 0,
        -sin(theta[1]), 0, cos(theta[1])
        );

    // Calculate rotation about z axis
    Mat R_z = (Mat_<double>(3, 3) <<
        cos(theta[2]), -sin(theta[2]), 0,
        sin(theta[2]), cos(theta[2]), 0,
        0, 0, 1);


    // Combined rotation matrix
    Mat R = R_z * R_y * R_x;

    return R;

}

float rad2deg(float radian) {
    double pi = 3.14159;
    return(radian * (180 / pi));
}

float deg2rad(float degree) {
    double pi = 3.14159;
    return(degree * (pi / 180));
}

// Checks if a matrix is a valid rotation matrix.
bool isRotationMatrix(Mat &R)
{
    Mat Rt;
    transpose(R, Rt);
    Mat shouldBeIdentity = Rt * R;
    Mat I = Mat::eye(3, 3, shouldBeIdentity.type());

    return  norm(I, shouldBeIdentity) < 1e-6;

}

// Calculates rotation matrix to euler angles
// The result is the same as MATLAB except the order
// of the euler angles ( x and z are swapped ).
Vec3f rotationMatrixToEulerAngles(Mat &R)
{

    assert(isRotationMatrix(R));

    float sy = sqrt(R.at<double>(0, 0) * R.at<double>(0, 0) + R.at<double>(1, 0) * R.at<double>(1, 0));

    bool singular = sy < 1e-6; // If

    float x, y, z;
    if (!singular)
    {
        x = atan2(R.at<double>(2, 1), R.at<double>(2, 2));
        y = atan2(-R.at<double>(2, 0), sy);
        z = atan2(R.at<double>(1, 0), R.at<double>(0, 0));
    }
    else
    {
        x = atan2(-R.at<double>(1, 2), R.at<double>(1, 1));
        y = atan2(-R.at<double>(2, 0), sy);
        z = 0;
    }
    return Vec3f(x, y, z);

}

该函数给我的结果是下一个:

R_cam2gripper =
 [0.3099803593003124, -0.8923086952824562, -0.3281727733547833;
 0.7129271761196039, 0.4465219155360299, -0.5406967916458927;
 0.6290047840821058, -0.0663579028402444, 0.7745641421680119]

R_cam2gripper_r =
 [-0.0854626, -0.680272, 1.16065]

t_cam2gripper =
 [-35.02063730299775;
 -74.80633768251272;
 -307.6725851251873]

我得到了其他软件提供的“良好”结果。有了它们,机器人就到达了我在相机中指向的确切点(我有一个 3D 相机,我可以从相机世界中获取 x、y、z),所以它们当然是正确的,但我遇到了麻烦使用 OpenCV 函数重复相同的结果。

很抱歉对我的问题进行了冗长的介绍。是否理解为什么解决方案不是应有的样子?我的猜测是,我在理解角度或转换角度时遇到问题,但我找不到任何方法来解决这个问题。任何提示都将受到欢迎!

c++ opencv computer-vision camera-calibration robotics
1个回答
0
投票

我实际上设法解决了这个问题。总体想法是正确的,但是:

  1. 我没有正确理解机器人给出的矢量旋转符号。有必要将实际值乘以一个系数。
  2. 我创建了一个新程序,可以直接从机器人和图片中提取算法所需的矩阵,并将这些值写入 YML 文件。
  3. CALIB_HAND_EYE_TSAI 方法没有给我正确的值。但对于其他四个,这些值似乎收敛于实际值

无论如何,谢谢你的帮助。我坚持要在算法中获得更高的精度,但这是另一个问题。

© www.soinside.com 2019 - 2024. All rights reserved.