编译环境ubuntu20.04,vs code
先cmake文件
cmake_minimum_required(VERSION 2.8)
project(image)
set(CMAKE_BUILD_TYPE "Release")
set(CMAKE_CXX_FLAGS "-std=c++17")
find_package(OpenCV REQUIRED)
find_package( Pangolin REQUIRED)
find_package( Sophus REQUIRED)
include_directories("/usr/include/eigen3")
include_directories(${Pangolin_INCLUDE_DIRS})
add_executable(img image.cpp)
add_executable(distort undistort.cpp)
add_executable(stereo stereoVision.cpp)
add_executable(joint jointMap.cpp)
target_link_libraries(img ${OpenCV_LIBS})
target_link_libraries(distort ${OpenCV_LIBS})
target_link_libraries(stereo ${OpenCV_LIBS} ${Pangolin_LIBRARIES})
target_link_libraries(joint ${OpenCV_LIBS} ${Pangolin_LIBRARIES})
target_link_libraries(joint Sophus::Sophus)
然后是双目视觉
#include <opencv2/opencv.hpp>
#include <Eigen/Core>
#include <iostream>
#include <pangolin/pangolin.h>
#include <unistd.h>
using namespace std;
using namespace Eigen;
string left_file = "/home/martin/桌面/code/image/left.png";
string right_file = "/home/martin/桌面/code/image/right.png";
void showPointCloud(const vector<Vector4d, Eigen::aligned_allocator<Vector4d>> &pointcloud);
int main(int argc,char** argv)
{
double fx = 718.856, fy = 718.856, cx = 607.1928, cy = 185.2157; //相机内参
double b = 0.573; //基线宽度
cv::Mat left = cv::imread(left_file , 0); //读图
cv::Mat right = cv::imread(right_file, 0);
cv::Ptr<cv::StereoSGBM> sgbm = cv::StereoSGBM::create(
0,96,9,8*9*9,32*9*9,1,63,10,100,2);
/*双目立体匹配算法
参数含义解释(按顺序):
int minDisparity 最小差异值,通常是0,在个别情况需要进行改动,比如整流算法将图像改变
int numDisparities 最大差异减去最小差异。首先这个数字得大于零,其次还得能被16整除
int blockSize 匹配的块大小。必须是大于等于1的奇数,通常取值在3~11内
int P1 控制视差平滑度的第一个参数
int P2 控制视差平滑度的第二个参数。这两个值越大,差异越平滑。P1是相邻像素之间的视差变化正负1的惩罚
P2是相邻像素之间视差超过1的惩罚。算法要求P2>P1。
int disp12MaxDiff 左右视差检查中允许的最大差异。设为非正值即禁用该功能
int preFilterCap 预滤波图像像素的截断值。
该算法先计算每个像素的导数,用[-preFilterCap,perFilterCap]间断剪切,
最后将结果传递给Birchfield-Tomas像素成本函数。(取值1~63)
int uniquenessRatio 视差唯一性百分比,视差窗口范围内最低代价是次低代价的(1+uniquenessRatio/100)倍时,
最低代价对应的视差值才是该像素点的视差,否则视差置0(一般5~15)
int speckleWindowSize 平滑视差区域的最大尺寸,0的话是禁用斑点过滤,一般50~200
int speckleRange 每个连接组件内的最大视差变化。(会被隐式乘以16)一般1或2 ||所以我不太理解高博的32是什么意思
int mode 设置为StereoSGBM::MODE_HH以运行全尺寸双通道动态编程算法。默认false(这里直接没写)
*/
cv::Mat disparity_sgbm,disparity;
sgbm->compute(left,right,disparity_sgbm); //计算、转换图像类型和画幅
disparity_sgbm.convertTo(disparity, CV_32F, 1.0/16.0f);
vector<Vector4d, Eigen::aligned_allocator<Vector4d>> pointcloud;
for (size_t i = 0; i < left.rows; i++) //生成深度点云
{
for (size_t j = 0; j < left.cols; j++)
{
if(disparity.at<float>(i,j) <= 0.0 || disparity.at<float>(i,j) >= 96.0)//在宽高范围内获取像素值
continue;
Vector4d point (0,0,0, left.at<uchar>(i,j) / 255.0); //三维坐标带个颜色
double x = (j-cx)/fx; //书上的双目模型
double y = (i-cy)/fy;
double depth = fx * b /(disparity.at<float>(i,j));
point[0] = x*depth;
point[1] = y*depth;
point[2] = depth;
pointcloud.push_back(point);
}
}
cv::imshow("disparity",disparity / 96.0);
cv::waitKey(0);
showPointCloud(pointcloud);
return 0;
}
//pangolin正常调用生成点云图像----------------------------------------------------------------------
void showPointCloud(const vector<Vector4d, Eigen::aligned_allocator<Vector4d>> &pointcloud)
{
if (pointcloud.empty())
{
cerr << "Point cloud is empty!" << endl;
return;
}
pangolin::CreateWindowAndBind("Point Cloud Viewer", 1024, 768);
glEnable(GL_DEPTH_TEST);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
pangolin::OpenGlRenderState s_cam(
pangolin::ProjectionMatrix(1024, 768, 500, 500, 512, 389, 0.1, 1000),
pangolin::ModelViewLookAt(0, -0.1, -1.8, 0, 0, 0, 0.0, -1.0, 0.0)
);
pangolin::View &d_cam = pangolin::CreateDisplay()
.SetBounds(0.0, 1.0, pangolin::Attach::Pix(175), 1.0, -1024.0f / 768.0f)
.SetHandler(new pangolin::Handler3D(s_cam));
while (pangolin::ShouldQuit() == false)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
d_cam.Activate(s_cam);
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glPointSize(2);
glBegin(GL_POINTS);
for (auto &p: pointcloud)
{
glColor3f(p[3], p[3], p[3]);
glVertex3d(p[0], p[1], p[2]);
}
glEnd();
pangolin::FinishFrame();
usleep(5000);
}
return;
}
最后是点云地图
#include <opencv2/opencv.hpp>
#include <iostream>
#include <fstream>
#include <boost/format.hpp>
#include <pangolin/pangolin.h>
#include "sophus/se3.hpp"
using namespace std;
using namespace Eigen;
//声明同前两次一致
typedef Eigen::Matrix<double,6,1> Vector6d;
typedef vector<Sophus::SE3d,Eigen::aligned_allocator<Sophus::SE3d>> TrajectoryType;
//显示点云的函数
void showPointCloud(const vector<Vector6d, Eigen::aligned_allocator<Vector6d>> &pointcloud);
int main (int argc,char** argv)
{
vector<cv::Mat> colorImgs, depthImgs; //创建两个放置彩色图和深度图的容器
TrajectoryType poses; //相机姿态
ifstream fin("/home/martin/桌面/code/image/pose.txt"); //我改成了pose的绝对路径
if(!fin)
{
cerr<<"路径错误"<<endl;
return 1;
}
for (int i = 0; i < 5; i++) //书中的fmt读取文件的方式我改成了绝对路径还是不行
{
string img_file = "color.png"; //我选择用字符串插值(本人c++还是太菜了)
string dpt_file = "depth.pgm";
string q = to_string(i+1);
img_file.insert(5,q);
dpt_file.insert(5,q);
cout<<img_file<<endl;
colorImgs.push_back(cv::imread(img_file)); //逐个读取并尾插进容器
depthImgs.push_back(cv::imread(dpt_file,-1));
double data[7] = {0}; //姿态读取3456旋转012平移
for(auto &d:data) fin>>d; //c++11新的for遍历,还挺好用的
Sophus::SE3d pose (Eigen::Quaterniond(data[6],data[3],data[4],data[5]),
Eigen::Vector3d(data[0],data[1],data[2]));
poses.push_back(pose);
}
double cx = 325.5, cy = 253.5, fx = 518.0, fy = 519.0; //相机内参
double depthScale = 1000.0;
vector<Vector6d, Eigen::aligned_allocator<Vector6d>> pointcloud;
pointcloud.reserve(10000000);
for (size_t i = 0; i < 5; i++)
{
cout<<"图象转换中: "<<i+1<<endl;
cv::Mat color = colorImgs[i]; //取出图像容器中的图像
cv::Mat depth = depthImgs[i];
Sophus::SE3d T = poses[i];
for (size_t j = 0; j < color.rows; j++) //遍历图像
{
for (size_t k = 0; k < color.cols; k++)
{
unsigned int d = depth.ptr<unsigned short>(j)[k];//读取深度
if(d==0) continue; //深度为零就是没有测到
Eigen::Vector3d point;
point[2] = double(d) / depthScale;
point[0] = (k-cx)*point[2]/fx;
point[1] = (j-cy)*point[2]/fy;
Eigen::Vector3d pointworld = T * point;
Vector6d p;
p.head<3>() = pointworld;
p[5] = color.data[j * color.step + k * color.channels()]; //blue
p[4] = color.data[j * color.step + k * color.channels()+1]; //green
p[3] = color.data[j * color.step + k * color.channels()+2]; //red
pointcloud.push_back(p); //拼接
}
}
}
cout<<"点云共有"<<pointcloud.size()<<"个点"<<endl;
showPointCloud(pointcloud);
return 0;
}
//和先前的非常像,改动只有两处-------------------------------------------------------------------------
void showPointCloud(const vector<Vector6d, Eigen::aligned_allocator<Vector6d>> &pointcloud)
{
if (pointcloud.empty())
{
cerr << "Point cloud is empty!" << endl;
return;
}
pangolin::CreateWindowAndBind("Point Cloud Viewer", 1024, 768);
glEnable(GL_DEPTH_TEST);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
pangolin::OpenGlRenderState s_cam(
pangolin::ProjectionMatrix(1024, 768, 500, 500, 512, 389, 0.1, 1000),
pangolin::ModelViewLookAt(0, -0.1, -1.8, 0, 0, 0, 0.0, -1.0, 0.0)
);
pangolin::View &d_cam = pangolin::CreateDisplay()
.SetBounds(0.0, 1.0, pangolin::Attach::Pix(175), 1.0, -1024.0f / 768.0f)
.SetHandler(new pangolin::Handler3D(s_cam));
while (pangolin::ShouldQuit() == false)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
d_cam.Activate(s_cam);
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glPointSize(2);
glBegin(GL_POINTS);
for (auto &p: pointcloud)
{
glColor3f(p[3]/255.0, p[4]/255.0, p[5]/255.0); //这里两句
glVertex3d(p[0], p[1], p[2]);
}
glEnd();
pangolin::FinishFrame();
usleep(5000);
}
return;
}
c++东西确实多,还得继续学。
本文内容由网友自发贡献,版权归原作者所有,本站不承担相应法律责任。如您发现有涉嫌抄袭侵权的内容,请联系:hwhale#tublm.com(使用前将#替换为@)