#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "g2o/types/slam3d/types_slam3d.h"
#include "g2o/types/slam3d/vertex_pointxyz.h"
#include "g2o/types/slam3d/vertex_se3.h"
#include "g2o/types/slam3d/edge_se3.h"
#include "g2o/types/slam3d/edge_se3_pointxyz.h"
#include "g2o/core/factory.h"
#include "g2o/core/optimization_algorithm_factory.h"
#include "g2o/core/sparse_optimizer.h"
#include "g2o/core/block_solver.h"
#include "g2o/solvers/dense/linear_solver_dense.h"
#include "g2o/core/optimization_algorithm_levenberg.h"
#include "icp_g2o.hpp"
using namespace std;
using namespace cv;
using namespace Eigen;
using namespace g2o;
//使用图片
//输入当前帧检测到的tag
//维护一张节点地图用于g2o
class ArucoFrame
{
public:
vector landmark_id;
vector<:vector3d> landmarks_pointXYZ;//在相机坐标系下的位置
Eigen::Isometry3d robot_pose;//在map坐标系下的位姿
};
class ArucoMap
{
public:
vector landmark_id;
vector<:vector3d> landmarks_pointXYZ;//在Map坐标系下的位置
};
ArucoMap ArucoMapGlobal;
vector ArucoFrameGlobal;
int id_vertex_pose=0, id_vertex_pointXYZ_start=100;//机器人pose的初始点认为是建图坐标系原点,id从0开始,tag的节点id从100开始。默认机器人pose不可能超过100个
void buildMap(vector< int > &ids, vector< vector< Point2f > > &corners, vector< Vec3d > &rvecs, vector< Vec3d > &tvecs, g2o::SparseOptimizer &optimizer)
{
//获取当前帧里的tag
ArucoFrame ArucoFrame_temp;
if(ids.size() != 0)
{
ArucoFrame_temp.robot_pose = Eigen::Isometry3d::Identity();
for(int i=0; i
{
ArucoFrame_temp.landmark_id.push_back(ids[i]);
//cv转换到eigen下
Mat rot_mat;
Rodrigues(rvecs[i], rot_mat);
Eigen::Matrix3d eigen_r;
Eigen::Vector3d eigen_t;
cv2eigen(rot_mat, eigen_r);
cv2eigen(tvecs[i],eigen_t);
//这里的rt对应的T是从mark坐标系变到相机坐标系
//Point_camera = T_camera-mark * Point_mark
//很重要!!所以这里的四个点是按mark坐标系下的顺序排列的
ArucoFrame_temp.landmarks_pointXYZ.push_back( eigen_r * Eigen::Vector3d(-0.03,0.03,0)+eigen_t );
ArucoFrame_temp.landmarks_pointXYZ.push_back( eigen_r * Eigen::Vector3d(0.03,0.03,0)+eigen_t );
ArucoFrame_temp.landmarks_pointXYZ.push_back( eigen_r * Eigen::Vector3d(0.03,-0.03,0)+eigen_t );
ArucoFrame_temp.landmarks_pointXYZ.push_back( eigen_r * Eigen::Vector3d(-0.03,-0.03,0)+eigen_t );
}
ArucoFrameGlobal.push_back(ArucoFrame_temp);
}
else
return ;
//对地图操作
if(ArucoMapGlobal.landmark_id.size() == 0)//初始化第一帧
{
for(int i=0; i
{
ArucoMapGlobal.landmark_id.push_back(ArucoFrame_temp.landmark_id[i]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(ArucoFrame_temp.landmarks_pointXYZ[i*4+0]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(ArucoFrame_temp.landmarks_pointXYZ[i*4+1]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(ArucoFrame_temp.landmarks_pointXYZ[i*4+2]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(ArucoFrame_temp.landmarks_pointXYZ[i*4+3]);
//添加pose
VertexSE3* v = new VertexSE3;
v->setEstimate(ArucoFrame_temp.robot_pose);
v->setId(id_vertex_pose++);
optimizer.addVertex(v);
//添加landmark和edge
for(int j=0; j<4; j++)
{
VertexPointXYZ* l = new VertexPointXYZ;
l->setEstimate(ArucoFrame_temp.landmarks_pointXYZ[i*4+j]);
l->setFixed(true);
l->setId(id_vertex_pointXYZ_start+ArucoFrame_temp.landmark_id[i]*4+j);//之所以直接用tag的id是因为不会存在相同的tag,因此vertex的序号不会出现一样的。
optimizer.addVertex(l);
EdgeSE3PointXYZ* e = new EdgeSE3PointXYZ;
e->setVertex(0, (optimizer.vertices().find(id_vertex_pose-1))->second);
e->setVertex(1, (optimizer.vertices().find(id_vertex_pointXYZ_start+ArucoFrame_temp.landmark_id[i]*4+j))->second);
e->setMeasurement(ArucoFrame_temp.landmarks_pointXYZ[i*4+0]);
e->setParameterId(0,0);//这句话必须加
optimizer.addEdge(e);
}
}
}
else
{
//寻找当前帧里的tagid是否和map里的id一样
vector id_same, id_different;
for(int i=0; i
{
int j;
for(j=0; j
{
if(ArucoMapGlobal.landmark_id[j] == ArucoFrame_temp.landmark_id[i])
{
id_same.push_back(i);
break;
}
}
if(j == ArucoMapGlobal.landmark_id.size())
id_different.push_back(i);
}
if(id_different.size() != 0)//如果有新的id能看到,就要添加进地图里
{
if(id_same.size() != 0)//必须要同时看到地图里存在的tag以及新的tag,才能将新的tag添加进地图中,因为是根据scan匹配map获得自身位资
{
//先根据icp匹配获得当前帧的位置。
vector<:vector3d> landmarkXYZ_common_map;
vector<:vector3d> landmarkXYZ_common_frame;
for(int i=0; i
{
for(int j=0; j
{
if(ArucoMapGlobal.landmark_id[i] == ArucoFrame_temp.landmark_id[id_same[j]])
{
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*4+0]);
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*4+1]);
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*4+2]);
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*4+3]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*4+0]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*4+1]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*4+2]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*4+3]);
break;
}
}
}
Eigen::Isometry3d T;
bundleAdjustment(landmarkXYZ_common_map, landmarkXYZ_common_frame, T);//这个T就是当前摄像头的pose
ArucoFrame_temp.robot_pose = T;
ArucoFrameGlobal.back().robot_pose = T;
//当前摄像头的位姿计算出来以后,更新map中的pose和edge,这一步没有landmark添加进来
VertexSE3* v = new VertexSE3;
v->setEstimate(ArucoFrame_temp.robot_pose);
v->setId(id_vertex_pose++);
optimizer.addVertex(v);
for(int i=0; i
{
for(int j=0; j<4; j++)
{
EdgeSE3PointXYZ* e = new EdgeSE3PointXYZ;
e->setVertex(0, (optimizer.vertices().find(id_vertex_pose-1))->second);
e->setVertex(1, (optimizer.vertices().find(id_vertex_pointXYZ_start+ArucoFrame_temp.landmark_id[id_same[i]]*4+j))->second);
e->setMeasurement(ArucoFrame_temp.landmarks_pointXYZ[id_same[i]*4+j]);
e->setParameterId(0,0);//这句话必须加
optimizer.addEdge(e);
}
}
//更新map中的landmark和edge
for(int i=0; i
{
ArucoMapGlobal.landmark_id.push_back(ArucoFrame_temp.landmark_id[id_different[i]]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(T * ArucoFrame_temp.landmarks_pointXYZ[id_different[i]*4+0]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(T * ArucoFrame_temp.landmarks_pointXYZ[id_different[i]*4+1]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(T * ArucoFrame_temp.landmarks_pointXYZ[id_different[i]*4+2]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(T * ArucoFrame_temp.landmarks_pointXYZ[id_different[i]*4+3]);
for(int j=0; j<4; j++)
{
VertexPointXYZ* l = new VertexPointXYZ;
l->setEstimate(T * ArucoFrame_temp.landmarks_pointXYZ[id_different[i]*4+j]);
l->setId(id_vertex_pointXYZ_start+ArucoFrame_temp.landmark_id[id_different[i]]*4+j);
optimizer.addVertex(l);
EdgeSE3PointXYZ* e = new EdgeSE3PointXYZ;
e->setVertex(0, (optimizer.vertices().find(id_vertex_pose-1))->second);
e->setVertex(1, (optimizer.vertices().find(id_vertex_pointXYZ_start+ArucoFrame_temp.landmark_id[id_different[i]]*4+j))->second);
e->setMeasurement(ArucoFrame_temp.landmarks_pointXYZ[id_different[i]*4+j]);
e->setParameterId(0,0);//这句话必须加
optimizer.addEdge(e);
}
}
}
}
else//如果看到的全是存在的tag就添加优化关系
{
vector id_same;
for(int i=0; i
{
int j;
for(j=0; j
{
if(ArucoMapGlobal.landmark_id[j] == ArucoFrame_temp.landmark_id[i])
{
id_same.push_back(i);
break;
}
}
}
//先根据icp匹配获得当前帧的位置。
vector<:vector3d> landmarkXYZ_common_map;
vector<:vector3d> landmarkXYZ_common_frame;
for(int i=0; i
for(int j=0; j
{
if(ArucoMapGlobal.landmark_id[i] == ArucoFrame_temp.landmark_id[id_same[j]])
{
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*4+0]);
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*4+1]);
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*4+2]);
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*4+3]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*4+0]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*4+1]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*4+2]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*4+3]);
break;
}
}
Eigen::Isometry3d T;
bundleAdjustment(landmarkXYZ_common_map, landmarkXYZ_common_frame, T);//这个T就是当前摄像头的pose
ArucoFrame_temp.robot_pose = T;
ArucoFrameGlobal.back().robot_pose = T;
VertexSE3* v = new VertexSE3;
v->setEstimate(ArucoFrame_temp.robot_pose);
v->setId(id_vertex_pose++);
optimizer.addVertex(v);
for(int i=0; i
{
for(int j=0; j<4; j++)
{
EdgeSE3PointXYZ* e = new EdgeSE3PointXYZ;
e->setVertex(0, (optimizer.vertices().find(id_vertex_pose-1))->second);
e->setVertex(1, (optimizer.vertices().find(id_vertex_pointXYZ_start+ArucoFrame_temp.landmark_id[id_same[i]]*4+j))->second);
e->setMeasurement(ArucoFrame_temp.landmarks_pointXYZ[id_same[i]*4+j]);
e->setParameterId(0,0);//这句话必须加
optimizer.addEdge(e);
}
}
}
}
}
namespace {
const char* about = "Basic marker detection";
const char* keys =
"{d | 0 | dictionary: DICT_4X4_50=0, DICT_4X4_100=1, DICT_4X4_250=2,"
"DICT_4X4_1000=3, DICT_5X5_50=4, DICT_5X5_100=5, DICT_5X5_250=6, DICT_5X5_1000=7, "
"DICT_6X6_50=8, DICT_6X6_100=9, DICT_6X6_250=10, DICT_6X6_1000=11, DICT_7X7_50=12,"
"DICT_7X7_100=13, DICT_7X7_250=14, DICT_7X7_1000=15, DICT_ARUCO_ORIGINAL = 16}"
"{v | | Input from video file, if ommited, input comes from camera }"
"{ci | 0 | Camera id if input doesnt come from video (-v) }"
"{c | log270 | Camera intrinsic parameters. Needed for camera pose }"
"{l | 0.06 | Marker side lenght (in meters). Needed for correct scale in camera pose }"
"{dp | detector_params.yml | File of marker detector parameters }"
"{r | | show rejected candidates too }";
}
static bool readCameraParameters(string filename, Mat &camMatrix, Mat &distCoeffs) {
FileStorage fs(filename, FileStorage::READ);
if(!fs.isOpened())
return false;
fs["camera_matrix"] >> camMatrix;
fs["distortion_coefficients"] >> distCoeffs;
return true;
}
static bool readDetectorParameters(string filename, Ptr<:detectorparameters> ¶ms) {
FileStorage fs(filename, FileStorage::READ);
if(!fs.isOpened())
return false;
fs["adaptiveThreshWinSizeMin"] >> params->adaptiveThreshWinSizeMin;
fs["adaptiveThreshWinSizeMax"] >> params->adaptiveThreshWinSizeMax;
fs["adaptiveThreshWinSizeStep"] >> params->adaptiveThreshWinSizeStep;
fs["adaptiveThreshConstant"] >> params->adaptiveThreshConstant;
fs["minMarkerPerimeterRate"] >> params->minMarkerPerimeterRate;
fs["maxMarkerPerimeterRate"] >> params->maxMarkerPerimeterRate;
fs["polygonalApproxAccuracyRate"] >> params->polygonalApproxAccuracyRate;
fs["minCornerDistanceRate"] >> params->minCornerDistanceRate;
fs["minDistanceToBorder"] >> params->minDistanceToBorder;
fs["minMarkerDistanceRate"] >> params->minMarkerDistanceRate;
fs["doCornerRefinement"] >> params->doCornerRefinement;
fs["cornerRefinementWinSize"] >> params->cornerRefinementWinSize;
fs["cornerRefinementMaxIterations"] >> params->cornerRefinementMaxIterations;
fs["cornerRefinementMinAccuracy"] >> params->cornerRefinementMinAccuracy;
fs["markerBorderBits"] >> params->markerBorderBits;
fs["perspectiveRemovePixelPerCell"] >> params->perspectiveRemovePixelPerCell;
fs["perspectiveRemoveIgnoredMarginPerCell"] >> params->perspectiveRemoveIgnoredMarginPerCell;
fs["maxErroneousBitsInBorderRate"] >> params->maxErroneousBitsInBorderRate;
fs["minOtsuStdDev"] >> params->minOtsuStdDev;
fs["errorCorrectionRate"] >> params->errorCorrectionRate;
return true;
}
int main(int argc, char *argv[]) {
CommandLineParser parser(argc, argv, keys);
parser.about(about);
int dictionaryId = parser.get("d");
bool showRejected = parser.has("r");
bool estimatePose = parser.has("c");
float markerLength = parser.get("l");
Ptr<:detectorparameters> detectorParams = aruco::DetectorParameters::create();
if(parser.has("dp")) {
bool readOk = readDetectorParameters(parser.get("dp"), detectorParams);
if(!readOk) {
cerr << "Invalid detector parameters file" << endl;
return 0;
}
}
int camId = parser.get("ci");
String video;
if(parser.has("v")) {
video = parser.get("v");
}
if(!parser.check()) {
parser.printErrors();
return 0;
}
Ptr<:dictionary> dictionary =
aruco::getPredefinedDictionary(aruco::PREDEFINED_DICTIONARY_NAME(dictionaryId));
Mat camMatrix, distCoeffs;
if(estimatePose) {
bool readOk = readCameraParameters(parser.get("c"), camMatrix, distCoeffs);
if(!readOk) {
cerr << "Invalid camera file" << endl;
return 0;
}
}
VideoCapture inputVideo;
/* int waitTime; if(!video.empty()) { inputVideo.open(video); waitTime = 0; } else { inputVideo.open(camId); waitTime = 10; } inputVideo.set(CAP_PROP_FRAME_WIDTH, 640); inputVideo.set(CAP_PROP_FRAME_HEIGHT , 480); */
double totalTime = 0;
int totalIterations = 0;
g2o::SparseOptimizer optimizer;//全局优化器
//以下三句话要加
g2o::ParameterSE3Offset* cameraOffset = new g2o::ParameterSE3Offset;
cameraOffset->setId(0);
optimizer.addParameter(cameraOffset);
optimizer.setVerbose(true);//调试信息输出
g2o::BlockSolverX::LinearSolverType * linearSolver;//6代表pose的自由度 3代表landmark的自由度
linearSolver= new g2o::LinearSolverDense<:blocksolverx::posematrixtype>();
g2o::BlockSolverX * solver_ptr
= new g2o::BlockSolverX(linearSolver);
//优化方法LM
g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(solver_ptr);
optimizer.setAlgorithm(solver);
Mat inputImage=imread("./squre_80_120/0.jpg");
int num_image=1;
while(true) {
Mat image, imageCopy;
//inputVideo.retrieve(image);
image = inputImage;
double tick = (double)getTickCount();
vector< int > ids;
vector< vector< Point2f > > corners, rejected;
vector< Vec3d > rvecs, tvecs;
// detect markers and estimate pose
aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected);
if(estimatePose && ids.size() > 0)
aruco::estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs,
tvecs);
double currentTime = ((double)getTickCount() - tick) / getTickFrequency();
totalTime += currentTime;
// draw results
image.copyTo(imageCopy);
if(ids.size() > 0)
{
aruco::drawDetectedMarkers(imageCopy, corners, ids);
if(estimatePose) {
for(unsigned int i = 0; i < ids.size(); i++)
aruco::drawAxis(imageCopy, camMatrix, distCoeffs, rvecs[i], tvecs[i],
markerLength * 0.5f);
}
}
if(showRejected && rejected.size() > 0)
aruco::drawDetectedMarkers(imageCopy, rejected, noArray(), Scalar(100, 0, 255));
imshow("out", imageCopy);
int key = waitKey(0);
cout << key << endl;
if(key == 1048689) break;//'q'
if(key == 1048675)//'c'
buildMap(ids, corners, rvecs, tvecs, optimizer);
string string_temp = "./squre2/" + to_string(num_image++) +".jpg";
if(num_image == 14)
break;
inputImage=imread(string_temp);
}
//之后开始g2o优化建图等。
optimizer.save("before.g2o");
optimizer.initializeOptimization();
optimizer.optimize(10);
optimizer.save("after.g2o");
return 0;
}