首先,我只需要捕获RGBstream并按opencv image的顺序对其进行转换。应该没有那么难,但是我在网上发现了一个以上的代码,但是它们不能在我的计算机上运行。我不知道哪里出了错。
您能给我推荐一个教程还是一个非常简单的代码,使我了解如何使用Kinect库?一开始我尝试了Kinect sdk,过了一会儿选择OPENNI。
[帮帮我,谢谢!
ps:我正在使用c ++和VISUAL STUDIO 2010
AFAIK开箱即用,OpenCV支持OpenNI1.5.x。如果尚未安装OpenNI,请先按此特定顺序安装(重要):
此时您应该已经安装了OpenNI,所以继续并运行其中一个示例。
默认情况下,预构建的opencv库未使用OpenCV支持进行编译,因此您需要从源代码构建opencv才能启用OpenNI支持。
如果尚未安装CMakeGUI,请安装。这将使您轻松配置opencv构建的过程。运行它,浏览到opencv源文件夹,选择一个目标目录来放置构建文件,然后单击configure。
您应该有很多选项。如果滚动,您应该看到检测到OpenNI安装文件夹(如果没有,您应该修复路径),并且还应该和WITH_OPENNI
标志可以启用。
完成后,请按生成,这将生成您需要轻松编译opencv库所需的Visual Studio项目文件。
有关在Windows上从源代码构建opencv的更多详细信息,请查阅official documentation
完成编译后,您应该已经在openni的支持下构建了opencv,并且应该能够运行以下简单的程序:
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int main(){
cout << "opening device(s)" << endl;
VideoCapture sensor1;
sensor1.open(CV_CAP_OPENNI);
if( !sensor1.isOpened() ){
cout << "Can not open capture object 1." << endl;
return -1;
}
for(;;){
Mat depth1;
if( !sensor1.grab() ){
cout << "Sensor1 can not grab images." << endl;
return -1;
}else if( sensor1.retrieve( depth1, CV_CAP_OPENNI_DEPTH_MAP ) ) imshow("depth1",depth1);
if( waitKey( 30 ) == 27 ) break;//ESC to exit
}
}
另请参阅this similar answer。如果您需要使用OpenNI 2.x,请参见以下资源:
[我认为这是在OpenCV中使用kinect的最简单,最有效的方法。
该代码使您可以连续收集深度和彩色图像,但是如果您只想要这些流之一,则可以随意打开另一个流。
这里是C ++ OpenCV API和Mat对象的代码:
#include <openni2/OpenNI.h> #include <opencv2/opencv.hpp> using namespace openni; main() { OpenNI::initialize(); puts( "Kinect initialization..." ); Device device; if ( device.open( openni::ANY_DEVICE ) != 0 ) { puts( "Kinect not found !" ); return -1; } puts( "Kinect opened" ); VideoStream depth, color; color.create( device, SENSOR_COLOR ); color.start(); puts( "Camera ok" ); depth.create( device, SENSOR_DEPTH ); depth.start(); puts( "Depth sensor ok" ); VideoMode paramvideo; paramvideo.setResolution( 640, 480 ); paramvideo.setFps( 30 ); paramvideo.setPixelFormat( PIXEL_FORMAT_DEPTH_100_UM ); depth.setVideoMode( paramvideo ); paramvideo.setPixelFormat( PIXEL_FORMAT_RGB888 ); color.setVideoMode( paramvideo ); puts( "Réglages des flux vidéos ok" ); // If the depth/color synchronisation is not necessary, start is faster : device.setDepthColorSyncEnabled( false ); // Otherwise, the streams can be synchronized with a reception in the order of our choice : //device.setDepthColorSyncEnabled( true ); //device.setImageRegistrationMode( openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR ); VideoStream** stream = new VideoStream*[2]; stream[0] = &depth; stream[1] = &color; puts( "Kinect initialization completed" ); if ( device.getSensorInfo( SENSOR_DEPTH ) != NULL ) { VideoFrameRef depthFrame, colorFrame; cv::Mat colorcv( cv::Size( 640, 480 ), CV_8UC3, NULL ); cv::Mat depthcv( cv::Size( 640, 480 ), CV_16UC1, NULL ); cv::namedWindow( "RGB", CV_WINDOW_AUTOSIZE ); cv::namedWindow( "Depth", CV_WINDOW_AUTOSIZE ); int changedIndex; while( device.isValid() ) { OpenNI::waitForAnyStream( stream, 2, &changedIndex ); switch ( changedIndex ) { case 0: depth.readFrame( &depthFrame ); if ( depthFrame.isValid() ) { depthcv.data = (uchar*) depthFrame.getData(); cv::imshow( "Depth", depthcv ); } break; case 1: color.readFrame( &colorFrame ); if ( colorFrame.isValid() ) { colorcv.data = (uchar*) colorFrame.getData(); cv::cvtColor( colorcv, colorcv, CV_BGR2RGB ); cv::imshow( "RGB", colorcv ); } break; default: puts( "Error retrieving a stream" ); } cv::waitKey( 1 ); } cv::destroyWindow( "RGB" ); cv::destroyWindow( "Depth" ); } depth.stop(); depth.destroy(); color.stop(); color.destroy(); device.close(); OpenNI::shutdown(); }
对于那些喜欢使用带有IplImage结构的OpenCV的C API的人:
#include <openni2/OpenNI.h> #include <opencv/cv.h> #include <opencv/highgui.h> using namespace openni; main() { OpenNI::initialize(); puts( "Kinect initialization..." ); Device device; if ( device.open( openni::ANY_DEVICE ) != 0 ) { puts( "Kinect not found !" ); return -1; } puts( "Kinect opened" ); VideoStream depth, color; color.create( device, SENSOR_COLOR ); color.start(); puts( "Camera ok" ); depth.create( device, SENSOR_DEPTH ); depth.start(); puts( "Depth sensor ok" ); VideoMode paramvideo; paramvideo.setResolution( 640, 480 ); paramvideo.setFps( 30 ); paramvideo.setPixelFormat( PIXEL_FORMAT_DEPTH_100_UM ); depth.setVideoMode( paramvideo ); paramvideo.setPixelFormat( PIXEL_FORMAT_RGB888 ); color.setVideoMode( paramvideo ); puts( "Réglages des flux vidéos ok" ); // If the depth/color synchronisation is not necessary, start is faster : device.setDepthColorSyncEnabled( false ); // Otherwise, the streams can be synchronized with a reception in the order of our choice : //device.setDepthColorSyncEnabled( true ); //device.setImageRegistrationMode( openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR ); VideoStream** stream = new VideoStream*[2]; stream[0] = &depth; stream[1] = &color; puts( "Kinect initialization completed" ); if ( device.getSensorInfo( SENSOR_DEPTH ) != NULL ) { VideoFrameRef depthFrame, colorFrame; IplImage* colorcv = cvCreateImageHeader( cvSize( 640, 480 ), IPL_DEPTH_8U, 3 ); IplImage* depthcv = cvCreateImageHeader( cvSize( 640, 480 ), IPL_DEPTH_16U, 1 ); cvNamedWindow( "RGB", CV_WINDOW_AUTOSIZE ); cvNamedWindow( "Depth", CV_WINDOW_AUTOSIZE ); int changedIndex; while( device.isValid() ) { OpenNI::waitForAnyStream( stream, 2, &changedIndex ); switch ( changedIndex ) { case 0: depth.readFrame( &depthFrame ); if ( depthFrame.isValid() ) { depthcv->imageData = (char*) depthFrame.getData(); cvShowImage( "Depth", depthcv ); } break; case 1: color.readFrame( &colorFrame ); if ( colorFrame.isValid() ) { colorcv->imageData = (char*) colorFrame.getData(); cvCvtColor( colorcv, colorcv, CV_BGR2RGB ); cvShowImage( "RGB", colorcv ); } break; default: puts( "Error retrieving a stream" ); } cvWaitKey( 1 ); } cvReleaseImageHeader( &colorcv ); cvReleaseImageHeader( &depthcv ); cvDestroyWindow( "RGB" ); cvDestroyWindow( "Depth" ); } depth.stop(); depth.destroy(); color.stop(); color.destroy(); device.close(); OpenNI::shutdown(); }
我希望它对大多数人有用。
享受!