We captured a 3d Image using Kinect with OpenNI Library and got the rgb and depth images in the form of OpenCV Mat using this code.
main()
{
OpenNI::initialize();
puts( "Kinect initialization..." );
Device device;
if ( device.open( openni::ANY_DEVICE ) != 0 )
{
puts( "Kinect not found !" );
return -1;
}
puts( "Kinect opened" );
VideoStream depth, color;
color.create( device, SENSOR_COLOR );
color.start();
puts( "Camera ok" );
depth.create( device, SENSOR_DEPTH );
depth.start();
puts( "Depth sensor ok" );
VideoMode paramvideo;
paramvideo.setResolution( 640, 480 );
paramvideo.setFps( 30 );
paramvideo.setPixelFormat( PIXEL_FORMAT_DEPTH_100_UM );
depth.setVideoMode( paramvideo );
paramvideo.setPixelFormat( PIXEL_FORMAT_RGB888 );
color.setVideoMode( paramvideo );
puts( "Réglages des flux vidéos ok" );
// If the depth/color synchronisation is not necessary, start is faster :
//device.setDepthColorSyncEnabled( false );
// Otherwise, the streams can be synchronized with a reception in the order of our choice :
device.setDepthColorSyncEnabled( true );
device.setImageRegistrationMode( openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR );
VideoStream** stream = new VideoStream*[2];
stream[0] = &depth;
stream[1] = &color;
puts( "Kinect initialization completed" );
if ( device.getSensorInfo( SENSOR_DEPTH ) != NULL )
{
VideoFrameRef depthFrame, colorFrame;
cv::Mat colorcv( cv::Size( 640, 480 ), CV_8UC3, NULL );
cv::Mat depthcv( cv::Size( 640, 480 ), CV_16UC1, NULL );
cv::namedWindow( "RGB", CV_WINDOW_AUTOSIZE );
cv::namedWindow( "Depth", CV_WINDOW_AUTOSIZE );
int changedIndex;
while( device.isValid() )
{
OpenNI::waitForAnyStream( stream, 2, &changedIndex );
switch ( changedIndex )
{
case 0:
depth.readFrame( &depthFrame );
if ( depthFrame.isValid() )
{
depthcv.data = (uchar*) depthFrame.getData();
cv::imshow( "Depth", depthcv );
}
break;
case 1:
color.readFrame( &colorFrame );
if ( colorFrame.isValid() )
{
colorcv.data = (uchar*) colorFrame.getData();
cv::cvtColor( colorcv, colorcv, CV_BGR2RGB );
cv::imshow( "RGB", colorcv );
}
break;
default:
puts( "Error retrieving a stream" );
}
cv::waitKey( 1 );
}
cv::destroyWindow( "RGB" );
cv::destroyWindow( "Depth" );
}
depth.stop();
depth.destroy();
color.stop();
color.destroy();
device.close();
OpenNI::shutdown();
}
We added some code to above and got the RGB and depth Mat from that and the we processed RGB using OpenCV.
Now we need to display that image in 3D.
We are using :-
1) Windows 8 x64
2) Visual Studio 2012 x64
3) OpenCV 2.4.10
4) OpenNI 2.2.0.33
5) Kinect1
6) Kinect SDK 1.8.0
Questions :-
1) Can we directly display this Image using OpenCV OR we need any external libraries ??
2) If we need to use external Libraries which one is better for this simple task OpenGL, PCL Or any other ??
3) Does PCL support Visual Studio 12 and OpenNI2 and Since PCL comes packed with other version of OpenNI does these two versions conflict??
To improve the answer of antarctician, to display the image in 3D you need to create your point cloud first... The RGB and Depth images give you the necessary data to create an organized colored pointcloud. To do so, you need to calculate the x,y,z values for each point. The z value comes from the depth pixel, but the x and y must be calculated.
to do it you can do something like this:
void Viewer::get_pcl(cv::Mat& color_mat, cv::Mat& depth_mat, pcl::PointCloud<pcl::PointXYZRGBA>& cloud ){
float x,y,z;
for (int j = 0; j< depth_mat.rows; j ++){
for(int i = 0; i < depth_mat.cols; i++){
// the RGB data is created
PCD_BGRA pcd_BGRA;
pcd_BGRA.B = color_mat.at<cv::Vec3b>(j,i)[0];
pcd_BGRA.R = color_mat.at<cv::Vec3b>(j,i)[2];
pcd_BGRA.G = color_mat.at<cv::Vec3b>(j,i)[1];
pcd_BGRA.A = 0;
pcl::PointXYZRGBA vertex;
int depth_value = (int) depth_mat.at<unsigned short>(j,i);
// find the world coordinates
openni::CoordinateConverter::convertDepthToWorld(depth, i, j, (openni::DepthPixel) depth_mat.at<unsigned short>(j,i), &x, &y,&z );
// the point is created with depth and color data
if ( limitx_min <= i && limitx_max >=i && limity_min <= j && limity_max >= j && depth_value != 0 && depth_value <= limitz_max && depth_value >= limitz_min){
vertex.x = (float) x;
vertex.y = (float) y;
vertex.z = (float) depth_value;
} else {
// if the data is outside the boundaries
vertex.x = bad_point;
vertex.y = bad_point;
vertex.z = bad_point;
}
vertex.rgb = pcd_BGRA.RGB_float;
// the point is pushed back in the cloud
cloud.points.push_back( vertex );
}
}
}
and PCD_BGRA is
union PCD_BGRA
{
struct
{
uchar B; // LSB
uchar G; // ---
uchar R; // MSB
uchar A; //
};
float RGB_float;
uint RGB_uint;
};
Of course, this is for the case you want to use PCL, but it is more or less the calculations of the x,y,z values stands. This relies on openni::CoordinateConverter::convertDepthToWorld
to find the position of the point in 3D. You may also do this manually
const float invfocalLength = 1.f / 525.f;
const float centerX = 319.5f;
const float centerY = 239.5f;
const float factor = 1.f / 1000.f;
float dist = factor * (float)(*depthdata);
p.x = (x-centerX) * dist * invfocalLength;
p.y = (y-centerY) * dist * invfocalLength;
p.z = dist;
Where centerX, centerY, and focallength are the intrinsic calibration of the camera (this one is for Kinect). and the factor it is if you need the distance in meters or millimeters... this value depends on your program
For the questions: