算法
- 计算第一张图和第二张图的关键点并匹配
- 以第一张图的相机坐标为世界坐标,计算第二张图相对第一张图的旋转矩阵、平移矩阵
- 不断更新第一张图,在进行第二次计算时,以第二张图为第一张图,以第二张图的相机坐标系为世界坐标系
代码
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <chrono>
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
using namespace std;
using namespace cv;
void find_feature_matches(
Mat &img_1,Mat &img_2,
std::vector<KeyPoint> &keypoints_1,
std::vector<KeyPoint> &keypoints_2,
std::vector<DMatch> &matches
);
void pose_estimation_2d2d(
std::vector<KeyPoint> &keypoints_1,
std::vector<KeyPoint> &keypoints_2,
std::vector<DMatch> &matches,
Mat &R, Mat &t
);
Point2d pixel2cam(const Point2d &p,const Mat &K);
int main(){
cv::String path = "/home/automobile/wcm/database/img_data" ;
std::vector<cv::String> filenames;
cv::glob(path, filenames);
if (filenames.size() == 0){
cout << "NO image files [png]" << endl;
}
chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
vector<KeyPoint> keypoints_1, keypoints_2;
vector<DMatch> matches;
Mat img_1, img_2;
Mat R, t;
Mat K = (Mat_<double>(3,3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
for (int size = filenames.size() - 1 ; size > 0; size-- ){
Mat img_1 = cv::imread(filenames[size]);
Mat img_2 = cv::imread(filenames[size - 1]);
assert(img_1.data && img_2.data && "Cannot load images");
find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches);
pose_estimation_2d2d(keypoints_1, keypoints_2, matches, R, t);
cout << "位姿估计 i= " << size << endl;
Mat t_x=(
Mat_<double> (3,3) << 0, -t.at<double>(2, 0), t.at<double>(1, 0),
t.at<double>(2,0), 0, -t.at<double>(0,0),
-t.at<double>(1,0), t.at<double>(0,0), 0
);
cout << "对第 " << size << " t^R= " << t_x * R << endl;
}
chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
chrono::duration<double> poseEstimation_2d2d = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
cout << "estimate 100 pose cost = " << poseEstimation_2d2d.count() << " seconds." <<endl;
return 0;
}
Point2d pixel2cam(const Point2d &p, const Mat &K) {
return Point2d(
(p.x - K.at<double>(0, 2)) / K.at<double>(0, 0),
(p.y - K.at<double>(1, 2)) / K.at<double>(1, 1)
);
}
void find_feature_matches(Mat &img_1, Mat &img_2,
std::vector<KeyPoint> &keypoints_1,
std::vector<KeyPoint> &keypoints_2,
std::vector<DMatch> &matches
){
Mat descriptors_1, descriptors_2;
Ptr<FeatureDetector> detector = ORB::create();
Ptr<DescriptorExtractor> descriptor = ORB::create();
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
detector->detect(img_1, keypoints_1);
detector->detect(img_2, keypoints_2);
descriptor->compute(img_1, keypoints_1, descriptors_1);
descriptor->compute(img_2, keypoints_2, descriptors_2);
vector<DMatch> match;
BFMatcher(NORM_HAMMING);
matcher->match(descriptors_1, descriptors_2, match);
double min_dist = 10000, max_dist = 0;
for (int num = 0; num < descriptors_1.rows; num++){
double dist = match[num].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
printf(" --Max dist : %f \n", max_dist);
printf(" --Min dist : %f \n", min_dist);
for (int num1 = 0; num1 < descriptors_1.rows; num1++){
if (match[num1].distance <= max(2 * min_dist, 30.0)){
matches.push_back(match[num1]);
}
}
}
void pose_estimation_2d2d(
std::vector<KeyPoint> &keypoints_1,
std::vector<KeyPoint> &keypoints_2,
std::vector<DMatch> &matches,
Mat &R, Mat &t
){
cout << "进入pose-estimation" << endl;
Mat K = (Mat_<double>(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
vector<Point2f> points1;
vector<Point2f> points2;
for (int i = 0; i < (int) matches.size(); i++){
points1.push_back(keypoints_1[matches[i].queryIdx].pt);
points2.push_back(keypoints_2[matches[i].queryIdx].pt);
}
Mat fundamental_matrix;
fundamental_matrix = findFundamentalMat(points1, points2, CV_FM_RANSAC_ONLY);
cout << " fundamental_matrix is " << endl << fundamental_matrix <<endl;
Point2d principal_point(325.1, 249.7);
double focal_length = 521;
Mat essential_matrix;
essential_matrix = findEssentialMat(points1, points2, focal_length, principal_point);
cout << "essential_Matrix is " << endl <<essential_matrix <<endl;
Mat homography_matrix;
homography_matrix = findHomography(points1, points2, RANSAC, 3);
cout << "homography_matrix is " << endl << homography_matrix << endl;
recoverPose(essential_matrix, points1, points2, R, t, focal_length, principal_point);
cout << "R is " << endl << R << endl;
cout << "t is " << endl << t <<endl;
}
运行结果
homography_matrix is
[-0.6227391139971105, -2.886970200092988, 364.1872697938189;
-0.2148405971333806, -0.972686679720263, 126.9249350232471;
-0.001631390801007104, -0.007687557320241691, 0.9999999999999999]
R is
[0.3194695470556352, 0.4729054474713082, -0.8211575039272485;
0.4490655530003259, -0.8386377196567202, -0.3082643415601096;
-0.8344335429574837, -0.2702724790298453, -0.4802847587280729]
t is
[0.2296954847355427;
-0.1516112670155564;
0.9613813020888443]
位姿估计 i= 2
对第 2 t^R= [-0.3052136993786365, 0.8472269758895471, 0.3691761548756424;
0.4987976662552955, 0.5167228229363109, -0.6791262298784655;
0.1515835126764352, -0.1209335034643769, -0.1953036569510742]
--Max dist : 85.000000
--Min dist : 5.000000
进入pose-estimation
fundamental_matrix is
[-6.255652045940543e-06, 1.679691660890056e-05, -0.0005118221938695405;
-5.358345603116775e-05, 0.0001784510571767766, -0.005452954314486946;
0.01061051013232514, -0.03275577826825726, 1]
essential_Matrix is
[0.004266503965144853, -0.6636712990779564, -0.1952643925552019;
0.6959001651454869, -0.03315309018943863, 0.04331212815465878;
0.1142026543150515, 0.1367396083384311, 0.04900965516181151]
homography_matrix is
[-1.133399786845522, 0.2333755152279524, 382.6506100442739;
-0.3823145696849643, 0.07505512543265744, 129.6483054540828;
-0.002966733373018169, 0.0006222148878302129, 1]
R is
[0.9606504274809822, -0.072697821483587, 0.2680779418977755;
0.03923589585057277, 0.9909803233208437, 0.1281348635919174;
-0.2749750909762716, -0.1125745332766574, 0.9548380353757032]
t is
[0.2068527047159039;
-0.159673994852563;
0.9652544607094746]
位姿估计 i= 1
对第 1 t^R= [0.006033747771430509, -0.9385729521138152, -0.2761455522001097;
0.9841514516064251, -0.04688554978048633, 0.06125259905155897;
0.1615069425913533, 0.1933790086257947, 0.0693101190170626]
estimate 100 pose cost = 57.3466 seconds.
本文内容由网友自发贡献,版权归原作者所有,本站不承担相应法律责任。如您发现有涉嫌抄袭侵权的内容,请联系:hwhale#tublm.com(使用前将#替换为@)