184 lines
6.0 KiB
C++
184 lines
6.0 KiB
C++
#include <Eigen/Eigen>
|
|
#include <iostream>
|
|
#include <opencv2/opencv.hpp>
|
|
|
|
void transform(const std::vector<cv::Point3f> &src, std::vector<cv::Point3f> &dst,
|
|
const cv::Mat &rvec, const cv::Mat &tvec) {
|
|
cv::Mat r;
|
|
cv::Rodrigues(rvec, r);
|
|
|
|
dst.clear();
|
|
for (auto &pt : src) {
|
|
cv::Mat point(3, 1, CV_64FC1);
|
|
point.at<double>(0, 0) = pt.x;
|
|
point.at<double>(1, 0) = pt.y;
|
|
point.at<double>(2, 0) = pt.z;
|
|
|
|
cv::Mat rt = r * point + tvec;
|
|
|
|
dst.emplace_back(rt.at<double>(0, 0), rt.at<double>(1, 0), rt.at<double>(2, 0));
|
|
}
|
|
}
|
|
|
|
int main() {
|
|
auto src = cv::imread(std::string(IMG_DIR) + "/src.bmp", cv::IMREAD_GRAYSCALE);
|
|
if (src.empty()) {
|
|
return -1;
|
|
}
|
|
|
|
int width = 22;
|
|
int height = 16;
|
|
float gridSize = 1.f;
|
|
cv::Size patternSize(width, height);
|
|
|
|
std::vector<cv::Point3f> obj;
|
|
obj.reserve(static_cast<std::size_t>(width * height));
|
|
for (int y = 0; y < height; y++) {
|
|
for (int x = 0; x < width; x++) {
|
|
obj.emplace_back(static_cast<float>(x) * gridSize, static_cast<float>(y) * gridSize,
|
|
0.f);
|
|
}
|
|
}
|
|
|
|
std::vector<cv::Point2f> corners;
|
|
auto found = cv::findChessboardCornersSB(src, patternSize, corners);
|
|
if (!found) {
|
|
return -1;
|
|
}
|
|
|
|
cv::Mat color;
|
|
cv::cvtColor(src, color, cv::COLOR_GRAY2RGB);
|
|
cv::drawChessboardCorners(color, patternSize, corners, found);
|
|
// cv::imshow("pattern", color);
|
|
|
|
cv::Mat cameraMatrix;
|
|
cv::Mat distCoeffs;
|
|
std::vector<cv::Mat> tVecs;
|
|
std::vector<cv::Mat> rVecs;
|
|
|
|
auto error = cv::calibrateCamera(std::vector<std::vector<cv::Point3f>>{obj},
|
|
std::vector<std::vector<cv::Point2f>>{corners}, src.size(),
|
|
cameraMatrix, distCoeffs, rVecs, tVecs);
|
|
std::cout << "RMS re-projection error:" << error << std::endl;
|
|
|
|
std::cout << "camera matrix:\n"
|
|
<< cameraMatrix << std::endl
|
|
<< "dist coeff:\n"
|
|
<< distCoeffs << std::endl
|
|
<< "translate:\n"
|
|
<< tVecs[ 0 ] << std::endl
|
|
<< "rotation:\n"
|
|
<< rVecs[ 0 ] << std::endl;
|
|
|
|
cv::Mat undistortImg;
|
|
cv::undistort(src, undistortImg, cameraMatrix, distCoeffs);
|
|
cv::imshow("dst", undistortImg);
|
|
|
|
cv::drawFrameAxes(color, cameraMatrix, distCoeffs, rVecs[ 0 ], tVecs[ 0 ], 10);
|
|
cv::imshow("coord", color);
|
|
|
|
std::vector<cv::Point2f> pts;
|
|
cv::undistortImagePoints(corners, pts, cameraMatrix, distCoeffs);
|
|
|
|
std::vector<cv::Point2f> obj2;
|
|
obj2.reserve(static_cast<std::size_t>(width * height));
|
|
for (int y = 0; y < height; y++) {
|
|
for (int x = 0; x < width; x++) {
|
|
obj2.emplace_back(static_cast<float>(x) * gridSize, static_cast<float>(y) * gridSize);
|
|
}
|
|
}
|
|
auto hom = cv::findHomography(pts, obj2, cv::RANSAC);
|
|
|
|
std::cout << "hom:\n" << hom << std::endl;
|
|
|
|
// image to pattern coord
|
|
std::vector<cv::Point2f> origins{{0.f, 0.f},
|
|
{(float)src.cols - 1, 0},
|
|
{(float)src.cols - 1.f, (float)src.rows - 1.f},
|
|
{0.f, (float)src.rows - 1.f},
|
|
pts[ 0 ]};
|
|
std::vector<cv::Point2f> perf;
|
|
cv::perspectiveTransform(origins, perf, hom);
|
|
|
|
// pattern to camera coord
|
|
std::vector<cv::Point3f> origins2{
|
|
{perf[ 0 ].x, perf[ 0 ].y, 0.f},
|
|
{perf[ 1 ].x, perf[ 1 ].y, 0.f},
|
|
{perf[ 2 ].x, perf[ 2 ].y, 0.f},
|
|
{perf[ 3 ].x, perf[ 3 ].y, 0.f},
|
|
{0.f, 0.f, 0.f},
|
|
{1.f, 0.f, 0.f},
|
|
};
|
|
std::vector<cv::Point3f> trans;
|
|
transform(origins2, trans, rVecs[ 0 ], tVecs[ 0 ]);
|
|
|
|
auto offset0 = trans[ 1 ] - trans[ 0 ];
|
|
Eigen::Vector3f v0(offset0.x, offset0.y, offset0.z);
|
|
|
|
auto offset1 = trans[ 3 ] - trans[ 0 ];
|
|
Eigen::Vector3f v1(offset1.x, offset1.y, offset1.z);
|
|
|
|
Eigen::Vector3f v2 = v0.cross(v1).normalized();
|
|
|
|
Eigen::Vector3f v3(0.f, 0.f, 1.f);
|
|
|
|
auto rot = Eigen::Quaternionf::FromTwoVectors(v2, v3);
|
|
|
|
std::vector<cv::Point3f> rotated;
|
|
Eigen::Vector3f p0(trans[ 0 ].x, trans[ 0 ].y, trans[ 0 ].z);
|
|
for (auto &pt : trans) {
|
|
Eigen::Vector3f point(pt.x, pt.y, pt.z);
|
|
Eigen::Vector3f offset = point - p0;
|
|
Eigen::Vector3f rotPt = rot * offset;
|
|
Eigen::Vector3f p = rotPt + p0;
|
|
|
|
rotated.emplace_back(p.x(), p.y(), p.z());
|
|
}
|
|
|
|
auto v00 = rotated[ 5 ] - rotated[ 4 ];
|
|
auto v01 = Eigen::Vector3f(v00.x, v00.y, v00.z);
|
|
auto v02 = Eigen::Vector3f(-1.f, 0.f, 0.f);
|
|
auto rot2 = Eigen::Quaternionf::FromTwoVectors(v01, v02);
|
|
|
|
std::vector<cv::Point3f> rotated2;
|
|
Eigen::Vector3f p00(rotated[ 0 ].x, rotated[ 0 ].y, rotated[ 0 ].z);
|
|
for (auto &pt : rotated) {
|
|
Eigen::Vector3f point(pt.x, pt.y, pt.z);
|
|
Eigen::Vector3f offset = point - p00;
|
|
Eigen::Vector3f rotPt = rot2 * offset;
|
|
Eigen::Vector3f p = rotPt + p00;
|
|
|
|
rotated2.emplace_back(p.x(), p.y(), p.z());
|
|
}
|
|
|
|
std::vector<cv::Point2f> projected;
|
|
for (std::size_t i = 0; i < 4; i++) {
|
|
auto &pt = rotated2[ i ];
|
|
cv::Mat point(3, 1, CV_64FC1);
|
|
point.at<double>(0, 0) = pt.x;
|
|
point.at<double>(1, 0) = pt.y;
|
|
point.at<double>(2, 0) = pt.z;
|
|
|
|
cv::Mat proj = cameraMatrix * point;
|
|
cv::Mat proj2 = proj / proj.at<double>(2, 0);
|
|
projected.emplace_back(proj2.at<double>(0, 0), proj2.at<double>(1, 0));
|
|
}
|
|
|
|
std::vector<cv::Vec2f> org;
|
|
std::vector<cv::Vec2f> proj;
|
|
for (std::size_t i = 0; i < 4; i++) {
|
|
auto &o = origins[ i ];
|
|
auto &p = projected[ i ];
|
|
|
|
org.emplace_back(o.x, o.y);
|
|
proj.emplace_back(p.x, p.y);
|
|
}
|
|
auto persperct = cv::getPerspectiveTransform(org, proj);
|
|
|
|
cv::Mat warpImg;
|
|
cv::warpPerspective(undistortImg, warpImg, persperct, src.size(), cv::INTER_LINEAR);
|
|
cv::imshow("warp", warpImg);
|
|
|
|
cv::waitKey();
|
|
}
|