The goal of the following code is to project the corner vertices of a cube, and the center point, onto a camera image plane. However, the image created contains a copy of the background screen, and does not display the 9 cube points, or at least I don't see any of the nine points. If this code was working, what I'm trying to understand is the influence of the rotation and translation vectors arguments on the view of cube. What I would like to do is rotate the camera around the cube while looking at the cube. P.S. any information on the mathematics of the projection of 3D points onto a 2d camera image plane would be helpful too. PPS. It would also be helpful to number the vertices in order to make the image clearer.
// baseline: https://www.programmersought.com/article/6279272931/
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/viz/types.hpp"
#include <iostream>
#include <string>
#include <math.h>
using namespace std;
vector<cv::Point3f> Generate3DPoints();
int main(int argc, char* argv[])
{
// Generate 3D points
vector<cv::Point3f> objectPoints = Generate3DPoints();
vector<cv::Point2f> imagePoints;
// Camera settings
cv::Mat intrisicMat(3, 3, cv::DataType<float>::type); // Intrisic matrix
intrisicMat.at<float>(0, 0) = 1.6415318549788924e+003;
intrisicMat.at<float>(1, 0) = 0;
intrisicMat.at<float>(2, 0) = 0;
intrisicMat.at<float>(0, 1) = 0;
intrisicMat.at<float>(1, 1) = 1.7067753507885654e+003;
intrisicMat.at<float>(2, 1) = 0;
intrisicMat.at<float>(0, 2) = 5.3262822453148601e+002;
intrisicMat.at<float>(1, 2) = 3.8095355839052968e+002;
intrisicMat.at<float>(2, 2) = 1;
// Rotation vector
cv::Mat rVec(3, 1, cv::DataType<float>::type);
rVec.at<float>(0) = -3.9277902400761393e-002;
rVec.at<float>(1) = 3.7803824407602084e-002;
rVec.at<float>(2) = 2.6445674487856268e-002;
// Translation vector
cv::Mat tVec(3, 1, cv::DataType<float>::type);
tVec.at<float>(0) = 2.1158489381208221e+000;
tVec.at<float>(1) = -7.6847683212704716e+000;
tVec.at<float>(2) = 2.6169795190294256e+001;
// Distortion vector
cv::Mat distCoeffs(5, 1, cv::DataType<float>::type);
distCoeffs.at<float>(0) = -7.9134632415085826e-001;
distCoeffs.at<float>(1) = 1.5623584435644169e+000;
distCoeffs.at<float>(2) = -3.3916502741726508e-002;
distCoeffs.at<float>(3) = -1.3921577146136694e-002;
distCoeffs.at<float>(4) = 1.1430734623697941e-002;
cout << "Intrisic matrix: " << intrisicMat << endl << endl;
cout << "Rotation vector: " << rVec << endl << endl;
cout << "Translation vector: " << tVec << endl << endl;
cout << "Distortion coef: " << distCoeffs << endl << endl;
// Generate the points as viewed from the camera
std::vector<cv::Point2f> projectedPoints;
cv::projectPoints(objectPoints, rVec, tVec, intrisicMat, distCoeffs, projectedPoints);
// Display the points in an image
cv::Mat image(480, 640, CV_8UC3);
const uint black_r(0), black_g(0), black_b(0);
const uint silver_r(192), silver_g(192), silver_b(192);
// image = cv::Scalar(redVal,greenVal,blueVal);
image = cv::Scalar(black_b, black_g, black_r);
// cv::viz::COLOR blk(cv::viz::Color::black());
cv::Vec3b color(silver_b, silver_g, silver_r);
for (unsigned int i = 0; i < projectedPoints.size(); ++i)
{
cout << "Project point " << objectPoints[i] << " to " << projectedPoints[i];
cv::Point2f pt = projectedPoints[i];
if (0<= (pt.x) && (pt.x) <= image.cols && 0<= (-pt.y) && (-pt.y) <= image.rows )
{
unsigned int ix(std::floor(pt.x)), iy(std::ceil(-pt.y));
cout << ", and set image.at(" << ix << ", " << iy << ") = " << color;
image.at<cv::Vec3b>(ix, iy) = color;
}
cout << endl;
}
cout << "calling imshow" << endl;
cv::namedWindow("Projection", cv::WINDOW_AUTOSIZE);
cv::imshow("Projection", image);
cout << "return from imshow" << endl;
cout << "Press any key to exit.";
cin.ignore();
cin.get();
return 0;
}
vector<cv::Point3f> Generate3DPoints()
{
vector<cv::Point3f> points;
float x, y, z;
// 8 corners of a cube
// +0.5 z face
z = .5;
x = .5; y = .5;
points.push_back(cv::Point3f(x, y, z));
y = -.5;
points.push_back(cv::Point3f(x, y, z));
x = -.5; y = .5;
points.push_back(cv::Point3f(x, y, z));
y = -.5;
points.push_back(cv::Point3f(x, y, z));
// -0.5 z face
z = -.5;
x = .5; y = .5;
points.push_back(cv::Point3f(x, y, z));
y = -.5;
points.push_back(cv::Point3f(x, y, z));
x = -.5; y = .5;
points.push_back(cv::Point3f(x, y, z));
y = -.5;
points.push_back(cv::Point3f(x, y, z));
// mid point
x = 0; y = 0; z = 0;
points.push_back(cv::Point3f(x, y, z));
return points;
}
Your code has several problems:
From intrisicMat
you can see that the principle point is (532.63, 380.95)
. So the image size should be about 1000*800
, but the image you used is only 640*480
. In fact, 5 in 9 points are project out of the image area (x>640).
The arguments of cv::Mat::at()
is (row, col)
, so
image.at<cv::Vec3b>(ix, iy)
should be changed to image.at<cv::Vec3b>(iy, ix)
cv::waitKey()
You should call cv::waitKey(0);
after cv::imshow()
, otherwise you won't see the image.
With these problems fixed, you will see the projected 9 points.