如何使用CalibrateHandEye在图像中找到机器人TCP

问题描述 投票:0回答:2

我目前正在开发一个机器人程序,该机器人的末端安装有摄像头。 目标是计算机器人 TCP 在相机帧中出现的位置。我在c++中使用opencv。 该机器人是优傲机器人 (Universal Robots) 的 UR5。

我的计划:

  1. 收集多个 (8) 数据集:
  • 机器人位姿(XYZ,以米为单位,直接来自机器人控制器)
  • 机器人旋转(rx ry rz,以弧度表示,直接来自机器人控制器)
  • 为每个步骤拍摄校准图案的照片
  1. 在一组图片上运行 calibrateCamera 以获取每一步的 tvec 和 rvec

  2. 运行校准HandEye

  • 对于 t_gripper2base 我使用机器人姿势
  • 对于 R_gripper2base 我使用机器人旋转
  • 对于 t_target2cam,我使用 calibrateCamera 的 tvec
  • 对于 R_target2cam,我使用 calibrateCamera 中的 rvec

我似乎得到了正确的值(我测量了从凸轮到 TCP 的距离,t_cam2gripper 似乎是正确的。

Translation vector target to cam: 
[-0.0001052803107026547;
 -0.0780872727019615;
 -0.1243323507069755]

Rotation matrix target to cam: 
[0.9999922523048892, 0.002655868335207422, -0.002905459271957709;
 -0.001229768871633805, 0.9119334002787367, 0.4103363999508009;
 0.003739384804660116, -0.4103296477461107, 0.9119296010009958]

将点从 TCP 坐标转换到图像的公式应如下所示:

  • (u,v,w) = C * T * (X,Y,Z,1) 但除以 w 之后,我的值仍然相差很大(应该在 (600,600) 左右
Actual image position vector homogenized: 
[1778.542462313536;
 -1626.72483032188;
 1]
#include <QCoreApplication>

#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/opencv.hpp>

using namespace std;
using namespace cv;

int main(int argc, char *argv[])
{
    QCoreApplication a(argc, argv);

    Mat defaultCameraMatrix = (Mat_<double>(3, 3));
    Mat defaultDistortion = (Mat_<double>(1, 5));
    defaultCameraMatrix.at<double>(0, 0) = 1739.3749;   // default values from previous intrinsic camera calibration
    defaultCameraMatrix.at<double>(0, 1) = 0;
    defaultCameraMatrix.at<double>(0, 2) = 639.5;

    defaultCameraMatrix.at<double>(1, 0) = 0;
    defaultCameraMatrix.at<double>(1, 1) = 1739.3749;
    defaultCameraMatrix.at<double>(1, 2) = 479.5;

    defaultCameraMatrix.at<double>(2, 0) = 0;
    defaultCameraMatrix.at<double>(2, 1) = 0;
    defaultCameraMatrix.at<double>(2, 2) = 1;

    defaultDistortion.at<double>(0, 0) = -0.165909;
    defaultDistortion.at<double>(0, 1) = 0.303675;
    defaultDistortion.at<double>(0, 2) = 0.0;
    defaultDistortion.at<double>(0, 3) = 0.0;
    defaultDistortion.at<double>(0, 4) = 0.0;

    vector<Mat> R_gripper2base, t_gripper2base, R_target2cam, t_target2cam;
    Mat actualRobotPos1 = (Mat_<double>(3, 1)),
            actualRobotPos2 = (Mat_<double>(3, 1)),
            actualRobotPos3 = (Mat_<double>(3, 1)),
            actualRobotPos4 = (Mat_<double>(3, 1)),
            actualRobotPos5 = (Mat_<double>(3, 1)),
            actualRobotPos6 = (Mat_<double>(3, 1)),
            actualRobotPos7 = (Mat_<double>(3, 1)),
            actualRobotPos8 = (Mat_<double>(3, 1));

    actualRobotPos1.at<double>(0,0) = -0.193139;
    actualRobotPos1.at<double>(1,0) = 0.463823;
    actualRobotPos1.at<double>(2,0) = -0.025;
    t_gripper2base.push_back(actualRobotPos1);

    actualRobotPos2.at<double>(0,0) = -0.193139;
    actualRobotPos2.at<double>(1,0) = 0.463823;
    actualRobotPos2.at<double>(2,0) = -0.025;
    t_gripper2base.push_back(actualRobotPos2);

    actualRobotPos3.at<double>(0,0) = -0.21273;
    actualRobotPos3.at<double>(1,0) = 0.4426;
    actualRobotPos3.at<double>(2,0) = -0.0288;
    t_gripper2base.push_back(actualRobotPos3);

    actualRobotPos4.at<double>(0,0) = -0.17213;
    actualRobotPos4.at<double>(1,0) = 0.4103;
    actualRobotPos4.at<double>(2,0) = 0.014;
    t_gripper2base.push_back(actualRobotPos4);

    actualRobotPos5.at<double>(0,0) = -0.13724;
    actualRobotPos5.at<double>(1,0) = 0.45;
    actualRobotPos5.at<double>(2,0) = 0.02978;
    t_gripper2base.push_back(actualRobotPos5);

    actualRobotPos6.at<double>(0,0) = -0.1655;
    actualRobotPos6.at<double>(1,0) = 0.478;
    actualRobotPos6.at<double>(2,0) = -0.0211;
    t_gripper2base.push_back(actualRobotPos6);

    actualRobotPos7.at<double>(0,0) = -0.17018;
    actualRobotPos7.at<double>(1,0) = 0.46458;
    actualRobotPos7.at<double>(2,0) = -0.03761;
    t_gripper2base.push_back(actualRobotPos7);

    actualRobotPos8.at<double>(0,0) = -0.193139;
    actualRobotPos8.at<double>(1,0) = 0.463823;
    actualRobotPos8.at<double>(2,0) = 0.025;
    t_gripper2base.push_back(actualRobotPos8);

    Mat actualRobotRotVec1 = (Mat_<double>(3, 1)),
            actualRobotRotVec2 = (Mat_<double>(3, 1)),
            actualRobotRotVec3 = (Mat_<double>(3, 1)),
            actualRobotRotVec4 = (Mat_<double>(3, 1)),
            actualRobotRotVec5 = (Mat_<double>(3, 1)),
            actualRobotRotVec6 = (Mat_<double>(3, 1)),
            actualRobotRotVec7 = (Mat_<double>(3, 1)),
            actualRobotRotVec8 = (Mat_<double>(3, 1));

    actualRobotRotVec1.at<double>(0,0) = -3.14159;
    actualRobotRotVec1.at<double>(1,0) = 0.00;
    actualRobotRotVec1.at<double>(2,0) = 0.00719124;
    R_gripper2base.push_back(actualRobotRotVec1);

    actualRobotRotVec2.at<double>(0,0) = -2.06;
    actualRobotRotVec2.at<double>(1,0) = -2.36;
    actualRobotRotVec2.at<double>(2,0) = 0.03;
    R_gripper2base.push_back(actualRobotRotVec2);

    actualRobotRotVec3.at<double>(0,0) = 2.39;
    actualRobotRotVec3.at<double>(1,0) = 1.86;
    actualRobotRotVec3.at<double>(2,0) = 0.49;
    R_gripper2base.push_back(actualRobotRotVec3);

    actualRobotRotVec4.at<double>(0,0) = -2.66;
    actualRobotRotVec4.at<double>(1,0) = 0.08;
    actualRobotRotVec4.at<double>(2,0) = 0.09;
    R_gripper2base.push_back(actualRobotRotVec4);

    actualRobotRotVec5.at<double>(0,0) = -2.84;
    actualRobotRotVec5.at<double>(1,0) = 0.19;
    actualRobotRotVec5.at<double>(2,0) = 0.69;
    R_gripper2base.push_back(actualRobotRotVec5);

    actualRobotRotVec6.at<double>(0,0) = 2.1;
    actualRobotRotVec6.at<double>(1,0) = -2.34;
    actualRobotRotVec6.at<double>(2,0) = -0.02;
    R_gripper2base.push_back(actualRobotRotVec6);

    actualRobotRotVec7.at<double>(0,0) = 1.66;
    actualRobotRotVec7.at<double>(1,0) = -2.53;
    actualRobotRotVec7.at<double>(2,0) = -0.23;
    R_gripper2base.push_back(actualRobotRotVec7);

    actualRobotRotVec8.at<double>(0,0) = -3.14159;
    actualRobotRotVec8.at<double>(1,0) = 0.00;
    actualRobotRotVec8.at<double>(2,0) = 0.00719124;
    R_gripper2base.push_back(actualRobotRotVec8);

    //    for(int i = 0; i < t_gripper2base.size(); i++)
    //    {
    //        cout << t_gripper2base[i] << endl << endl;
    //    }

    //    for(int i = 0; i < R_gripper2base.size(); i++)
    //    {
    //        cout << R_gripper2base[i] << endl << endl;
    //    }

    vector<String> fileNames;
    glob("PATH*.png", fileNames, false); // directory of images
    vector<vector<Point2f>> corners(fileNames.size());
    Mat chessboardImg, chessboardImgGray;

    vector<Point3f> objp;
    vector<vector<Point3f>> worldCoordinates;
    int checkerBoard[2] = {9,6};
    double fieldSize = 0.008;

    Mat cameraMatrixHandEye, distortionHandEye;
    vector<Mat> rvecs, tvecs;

    for(int i = 1; i < checkerBoard[1]; i++){
        for(int j = 1; j < checkerBoard[0]; j++){
            objp.push_back(Point3f(j*fieldSize, i*fieldSize, 0));
        }
    }

    for(int i = 0; i < 8; i++)
    {
        chessboardImg = imread(fileNames[i]);
        cvtColor(chessboardImg, chessboardImgGray, COLOR_BGR2GRAY);

        bool patternFound = findChessboardCorners(chessboardImgGray, Size(8,5), corners[i],  CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE);

        if(patternFound)
        {
            cornerSubPix(chessboardImgGray, corners[i],Size(11,11), Size(-1,-1), TermCriteria(TermCriteria::EPS + TermCriteria::MAX_ITER, 30, 0.1));
            drawChessboardCorners(chessboardImg, Size(8,5), corners[i], patternFound);
            worldCoordinates.push_back(objp);
        }
        //***** Check loaded images and detected chessboard *****
        //imshow("source", chessboardImgGray);
        //imshow("chess", chessboardImg);
        //waitKey(0);
        //*******************************************************
    }
    float reprojectionError = calibrateCamera(worldCoordinates, corners, Size(1280,960), cameraMatrixHandEye, distortionHandEye, rvecs, tvecs, CALIB_FIX_ASPECT_RATIO + CALIB_FIX_K3 +
                                              CALIB_ZERO_TANGENT_DIST + CALIB_FIX_PRINCIPAL_POINT);

    //***** Check camera calibration results *****
    //cout << "Reprojection Error CHE: " << endl << reprojectionError << endl << endl;
    //cout << "Camera Matrix CHE: " << endl << cameraMatrixHandEye << endl << endl;
    //cout << "Distortion CHE: " << endl << distortionHandEye << endl << endl;
    //for(int i = 0; i < numberOfPoses; i++)
    //{
    //    cout << "No. " << i+1 << " Target translation: " << endl << tvecs[i] << endl << endl;
    //    cout << "No. " << i+1 << " Target rotation: " << endl << rvecs[i] << endl << endl;
    //}
    //********************************************/

    for(int i = 0; i < rvecs.size(); i++)
    {
        t_target2cam.emplace_back(tvecs[i]);
        R_target2cam.emplace_back(rvecs[i]);
    }

    //    for(int i = 0; i < t_target2cam.size(); i++)
    //    {
    //        cout << t_target2cam[i] << endl << endl;
    //    }

    //    for(int i = 0; i < R_target2cam.size(); i++)
    //    {
    //        cout << R_target2cam[i] << endl << endl;
    //    }

    Mat R_cam2gripper;
    Mat t_cam2gripper = (Mat_<double>(3, 1));
    calibrateHandEye(R_gripper2base, t_gripper2base, R_target2cam, t_target2cam, R_cam2gripper, t_cam2gripper);

    cout << t_cam2gripper << endl << endl;
    cout << R_cam2gripper << endl << endl;

    Mat transformationMat4x4 = (Mat_<double>(4, 4));
    Mat transformationMatInv4x4 = (Mat_<double>(4, 4));
    Mat R_cam2gripperInv = (Mat_<double>(3, 3));
    Mat t_cam2gripperInv = (Mat_<double>(3, 1));

    transformationMat4x4.at<double>(0, 0) = R_cam2gripper.at<double>(0, 0);
    transformationMat4x4.at<double>(0, 1) = R_cam2gripper.at<double>(0, 1);
    transformationMat4x4.at<double>(0, 2) = R_cam2gripper.at<double>(0, 2);
    transformationMat4x4.at<double>(0, 3) = t_cam2gripper.at<double>(0, 0);
    transformationMat4x4.at<double>(1, 0) = R_cam2gripper.at<double>(1, 0);
    transformationMat4x4.at<double>(1, 1) = R_cam2gripper.at<double>(1, 1);
    transformationMat4x4.at<double>(1, 2) = R_cam2gripper.at<double>(1, 2);
    transformationMat4x4.at<double>(1, 3) = t_cam2gripper.at<double>(1, 0);
    transformationMat4x4.at<double>(2, 0) = R_cam2gripper.at<double>(2, 0);
    transformationMat4x4.at<double>(2, 1) = R_cam2gripper.at<double>(2, 1);
    transformationMat4x4.at<double>(2, 2) = R_cam2gripper.at<double>(2, 2);
    transformationMat4x4.at<double>(2, 3) = t_cam2gripper.at<double>(2, 0);
    transformationMat4x4.at<double>(3, 0) = 0;
    transformationMat4x4.at<double>(3, 1) = 0;
    transformationMat4x4.at<double>(3, 2) = 0;
    transformationMat4x4.at<double>(3, 3) = 1;

    transformationMatInv4x4 = transformationMat4x4.inv();
    R_cam2gripperInv.at<double>(0,0) = transformationMatInv4x4.at<double>(0,0);
    R_cam2gripperInv.at<double>(0,1) = transformationMatInv4x4.at<double>(0,1);
    R_cam2gripperInv.at<double>(0,2) = transformationMatInv4x4.at<double>(0,2);

    R_cam2gripperInv.at<double>(1,0) = transformationMatInv4x4.at<double>(1,0);
    R_cam2gripperInv.at<double>(1,1) = transformationMatInv4x4.at<double>(1,1);
    R_cam2gripperInv.at<double>(1,2) = transformationMatInv4x4.at<double>(1,2);

    R_cam2gripperInv.at<double>(2,0) = transformationMatInv4x4.at<double>(2,0);
    R_cam2gripperInv.at<double>(2,1) = transformationMatInv4x4.at<double>(2,1);
    R_cam2gripperInv.at<double>(2,2) = transformationMatInv4x4.at<double>(2,2);

    t_cam2gripperInv.at<double>(0,0) = transformationMatInv4x4.at<double>(0,3);
    t_cam2gripperInv.at<double>(1,0) = transformationMatInv4x4.at<double>(1,3);
    t_cam2gripperInv.at<double>(2,0) = transformationMatInv4x4.at<double>(2,3);

    cout << transformationMatInv4x4 << endl << endl;
    cout << t_cam2gripperInv << endl << endl;


    Point3f objectPoints1, objectPoints2;
    vector<Point3f> objectPoints;

    objectPoints1.x = 0;    //TCP in TCP-Coordinates
    objectPoints1.y = 0;
    objectPoints1.z = 0;
    objectPoints.push_back(objectPoints1);

    vector<Point2f> imagePoints;
    projectPoints(objectPoints, R_cam2gripperInv, t_cam2gripperInv, defaultCameraMatrix, defaultDistortion, imagePoints);
    cout << imagePoints[0] << endl << endl;

    return a.exec();
}
`
c++ opencv camera-calibration robotics
2个回答
0
投票

您需要使用solvepnp分别获取每个图像的rvec和tvec,然后您将获得rvec和tvec的列表。列表长度等于图像数量。要获得用于 Gripper_to_base 转换的类似 rvec 和 tvec 列表,您需要基于以旋转角度作为输入的机器人动力学推导 R 和 T 矩阵。然后,对于每个姿势,您需要将旋转角度数据输入到 R,T 矩阵,以获得每个姿势的 rvec 和 tvec 并制作相同长度的列表。然后将它们输入到 calibrateHandeye 函数中。


0
投票

我终于可以解决这个问题了。 校准是正确的,但我的方法有两个错误:

  • 为了找到我的 TCP,我使用了机器人控制中的 TCP 坐标。相反,我必须使用 (0,0,0),这是 TCP 坐标系中的 TCP。
  • 第二个错误是使用由 calibrateHandEye 中的 R 和 t 构建的变换矩阵。相反,我必须使用逆变换矩阵。
© www.soinside.com 2019 - 2024. All rights reserved.