一,通过ros订阅realsense图像
int main(int argc, char **argv)
{
ros::init(argc, argv, "image_listener");
ros::NodeHandle nh;
cv::namedWindow("view");
cv::startWindowThread();
message_filters::Subscriber<sensor_msgs::Image> image_sub(nh, "/camera/color/image_raw", 1);
message_filters::Subscriber<sensor_msgs::Image> info_sub(nh, "/camera/aligned_depth_to_color/image_raw", 1);
message_filters::TimeSynchronizer<sensor_msgs::Image, sensor_msgs::Image> sync(image_sub, info_sub, 10);
sync.registerCallback(boost::bind(&callback, _1, _2));
ros::spin();
cv::destroyWindow("view");
}
这里使用了message_filters,用于将彩色图像和深度图像设置同一个回调函数,也就是在回调函数里,可以同时取得彩色图与深度图的数据
二,将图像转换为opencv的Mat并获取指针
cv_bridge::CvImagePtr color_ptr, depth_ptr;
cv::Mat color_pic, depth_pic;
color_ptr = cv_bridge::toCvCopy(color, sensor_msgs::image_encodings::BGR8);
color_pic = color_ptr->image;
depth_ptr = cv_bridge::toCvCopy(depth, sensor_msgs::image_encodings::TYPE_32FC1);
depth_pic = depth_ptr->image;
三,保存图像
深度图的格式是32FC1,直接使用imwrite会保存为8U的格式。这里有两种方法可以保存32FC1图像。
1,将图像保存为TIF格式
std::string droadtif="/home/project/myimage/all/depth+"+std::string(tmp)+".tif";
cv::imwrite(droadtif,depth_pic);
在读取的时候使用下面代码读取:
cv::Mat depthtif = imread("/home/project/myimage/depth1.tif",IMREAD_UNCHANGED);//UNCHANGED是重点
cout<<"tif center point"<<depthtif.at<float>(240,320)<<endl;//显示中心深度
2,将图像保存为8UC4的PNG格式
Mat dep8u(depth_pic.rows,depth_pic.cols,CV_8UC4,depth_pic.data);
std::vector<int> compression_params;
compression_params.push_back(IMWRITE_PNG_COMPRESSION);
compression_params.push_back(9);
Mat depth832(depth_pic.rows,depth_pic.cols,CV_32FC1,dep8u.data);
cout<<"depth832 center point"<<depth832.at<float>(240,320)<<endl;
std::string droad="/home/project/myimage/all/depth+"+std::string(tmp)+".png";
cv::imwrite(droad,dep8u,compression_params);
在读取的时候使用下面代码读取
cv::Mat depth1 = imread("/home/project/myimage/depth1.png",IMREAD_UNCHANGED);
Mat depth32(depth_pic.rows,depth_pic.cols,CV_32FC1,depth1.data);//8UC4转32FC1
四,完整代码
代码实时接收realsense图像,并实时显示点云+深度图+彩色图
#include <time.h>
#include <iostream>
#include <ros/ros.h>
#include <image_transport/image_transport.h>
#include <opencv2/highgui/highgui.hpp>
#include <cv_bridge/cv_bridge.h>
#include <message_filters/subscriber.h>
#include <message_filters/time_synchronizer.h>
// PCL 库
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl_conversions/pcl_conversions.h>
#include <pcl/visualization/cloud_viewer.h>
using namespace cv;
typedef pcl::PointCloud<pcl::PointXYZRGB> PointCloud;
pcl::visualization::CloudViewer viewer("Cloud Viewer: Rabbit"); //创建viewer对象
// 相机内参
const double camera_factor = 1000;
const double camera_cx = 312.7254638671875;
const double camera_cy = 231.78810119628906;
const double camera_fx = 602.482177734375;
const double camera_fy = 601.5348510742188;
void callback(const sensor_msgs::ImageConstPtr& color, const sensor_msgs::ImageConstPtr& depth)
{
//ROS_INFO("RECEIVE Image...");
// try
// {
// cv::imshow("view", cv_bridge::toCvShare(color, "bgr8")->image);
// }
// catch (cv_bridge::Exception& e)
// {
// ROS_ERROR("Could not convert from '%s' to 'bgr8'.", color->encoding.c_str());
// }
// try
// {
// //cv::imshow("depthview", cv_bridge::toCvShare(depth, "16UC1")->image);
// cv::imshow("depthview", cv_bridge::toCvShare(depth, sensor_msgs::image_encodings::TYPE_32FC1)->image);
// }
// catch (cv_bridge::Exception& e)
// {
// ROS_ERROR("Could not convert from '%s' to '16UC1'.", depth->encoding.c_str());
// }
//ointCloud
cv_bridge::CvImagePtr color_ptr, depth_ptr;
cv::Mat color_pic, depth_pic;
color_ptr = cv_bridge::toCvCopy(color, sensor_msgs::image_encodings::BGR8);
color_pic = color_ptr->image;
depth_ptr = cv_bridge::toCvCopy(depth, sensor_msgs::image_encodings::TYPE_32FC1);
depth_pic = depth_ptr->image;
//cout<<"channels"<<depth_pic.depth()<<endl;
PointCloud::Ptr cloud ( new PointCloud );
for (int m = 0; m < depth_pic.rows; m++){
for (int n = 0; n < depth_pic.cols; n++){
// 获取深度图中(m,n)处的值
if(depth_pic.ptr<float>(m)[n]>4096)
depth_pic.ptr<float>(m)[n]=0;//depth filter
float d = depth_pic.ptr<float>(m)[n];//ushort d = depth_pic.ptr<ushort>(m)[n];
// d 可能没有值,若如此,跳过此点
if (d == 0)
continue;
// d 存在值,则向点云增加一个点
pcl::PointXYZRGB p;
// 计算这个点的空间坐标
p.z = double(d) / camera_factor;
// if(m==(int)(depth_pic.rows/2)&&n==(int)(depth_pic.cols/2))
// cout<<double(d)<<endl;
p.x = (n - camera_cx) * p.z / camera_fx;
p.y = (m - camera_cy) * p.z / camera_fy;
// 从rgb图像中获取它的颜色
// rgb是三通道的BGR格式图,所以按下面的顺序获取颜色
p.b = color_pic.ptr<uchar>(m)[n*3];
p.g = color_pic.ptr<uchar>(m)[n*3+1];
p.r = color_pic.ptr<uchar>(m)[n*3+2];
// 把p加入到点云中
cloud->points.push_back( p );
}
}
cv::imshow("view", color_pic);
cv::imshow("depthview", depth_pic);
viewer.showCloud(cloud);
char c = (char)cv::waitKey(50);//得到键值
static bool startsave=false;
if (c == 'h')
{
startsave = true;
ROS_INFO("Start write Image...");
}
if(c=='f')
{
startsave= false;
ROS_INFO("Stop write Image...");
}
if(startsave||c == 'a')
{
time_t t = time(NULL);
struct tm* stime=localtime(&t);
char tmp[32]{0};
snprintf(tmp,sizeof(tmp),"%04d%02d%02d%02d%02d%02d",1900+stime->tm_year,1+stime->tm_mon,stime->tm_mday, stime->tm_hour,stime->tm_min,stime->tm_sec);
cout<<tmp<<endl;
// cout<<depth_pic.channels()<<endl;
// cout<<depth_pic.depth()<<endl;
cout<<"d center point"<<depth_pic.at<float>(240,320)<<endl;
std::string croad="/home/project/myimage/all/color+"+std::string(tmp)+".png";
std::string droad="/home/project/myimage/all/depth+"+std::string(tmp)+".png";
std::string droadtif="/home/cq/project/myimage/all/depth+"+std::string(tmp)+".tif";
// std::string croad="/home/project/myimage/color1.png";
// std::string droad="/home/project/myimage/depth1.png";
// std::string droadtif="/home/project/myimage/depth1.tif";
cv::imwrite(croad,color_pic);//
//********************************
Mat dep8u(depth_pic.rows,depth_pic.cols,CV_8UC4,depth_pic.data);
std::vector<int> compression_params;
compression_params.push_back(IMWRITE_PNG_COMPRESSION);
compression_params.push_back(9);
Mat depth832(depth_pic.rows,depth_pic.cols,CV_32FC1,dep8u.data);
cout<<"depth832 center point"<<depth832.at<float>(240,320)<<endl;
//***save depth
cv::imwrite(droad,dep8u,compression_params);
cv::imwrite(droadtif,depth_pic);
//evluate ***********
// cv::Mat depth1 = imread("/home/cq/project/myimage/depth1.png",IMREAD_UNCHANGED);
// Mat depth32(depth_pic.rows,depth_pic.cols,CV_32FC1,depth1.data);
//
// cout<<depth32.channels()<<endl;
// cout<<depth32.depth()<<endl;
// cout<<"d center point"<<depth32.at<float>(240,320)<<endl;
//
// cv::Mat depthtif = imread("/home/cq/project/myimage/depth1.tif",IMREAD_UNCHANGED);
// cout<<"tif center point"<<depthtif.at<float>(240,320)<<endl;
ROS_INFO("write Image...");
}
color_pic.release();
depth_pic.release();
}
int main(int argc, char **argv)
{
ros::init(argc, argv, "image_listener");
ros::NodeHandle nh;
cv::namedWindow("view");
cv::startWindowThread();
message_filters::Subscriber<sensor_msgs::Image> image_sub(nh, "/camera/color/image_raw", 1);
message_filters::Subscriber<sensor_msgs::Image> info_sub(nh, "/camera/aligned_depth_to_color/image_raw", 1);
message_filters::TimeSynchronizer<sensor_msgs::Image, sensor_msgs::Image> sync(image_sub, info_sub, 10);
sync.registerCallback(boost::bind(&callback, _1, _2));
ros::spin();
cv::destroyWindow("view");
}
catkin_create_pkg visionrobot sensor_msgs cv_bridge roscpp std_msgs image_transport
cmake_minimum_required(VERSION 3.0.2)
project(visionrobot)
find_package(catkin REQUIRED COMPONENTS
cv_bridge
image_transport
roscpp
sensor_msgs
std_msgs
)
find_package(OpenCV REQUIRED)
find_package(PCL 1.10 REQUIRED)
catkin_package(
# INCLUDE_DIRS include
# LIBRARIES visionrobot
# CATKIN_DEPENDS cv_bridge image_transport roscpp sensor_msgs std_msgs
# DEPENDS system_lib
)
include_directories(
# include
${catkin_INCLUDE_DIRS}
)
include_directories(include ${OpenCV_INCLUDE_DIRS})
include_directories(include ${PCL_INCLUDE_DIRS})
include_directories(include ${catkin_INCLUDE_DIRS})
add_executable(imageSubscriber src/imageSubscriber.cpp)
target_link_libraries(imageSubscriber ${catkin_LIBRARIES} ${OpenCV_LIBRARIES} ${PCL_LIBRARIES} )
<?xml version="1.0"?>
<package format="2">
<name>visionrobot</name>
<version>0.0.0</version>
<description>The visionrobot package</description>
<!-- One maintainer tag required, multiple allowed, one person per tag -->
<!-- Example: -->
<!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
<maintainer email="xx@todo.todo">cq</maintainer>
<!-- One license tag required, multiple allowed, one license per tag -->
<!-- Commonly used license strings: -->
<!-- BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
<license>TODO</license>
<!-- Url tags are optional, but multiple are allowed, one per tag -->
<!-- Optional attribute type can be: website, bugtracker, or repository -->
<!-- Example: -->
<!-- <url type="website">http://wiki.ros.org/visionrobot</url> -->
<!-- Author tags are optional, multiple are allowed, one per tag -->
<!-- Authors do not have to be maintainers, but could be -->
<!-- Example: -->
<!-- <author email="jane.doe@example.com">Jane Doe</author> -->
<!-- The *depend tags are used to specify dependencies -->
<!-- Dependencies can be catkin packages or system dependencies -->
<!-- Examples: -->
<!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
<!-- <depend>roscpp</depend> -->
<!-- Note that this is equivalent to the following: -->
<!-- <build_depend>roscpp</build_depend> -->
<!-- <exec_depend>roscpp</exec_depend> -->
<!-- Use build_depend for packages you need at compile time: -->
<!-- <build_depend>message_generation</build_depend> -->
<!-- Use build_export_depend for packages you need in order to build against this package: -->
<!-- <build_export_depend>message_generation</build_export_depend> -->
<!-- Use buildtool_depend for build tool packages: -->
<!-- <buildtool_depend>catkin</buildtool_depend> -->
<!-- Use exec_depend for packages you need at runtime: -->
<!-- <exec_depend>message_runtime</exec_depend> -->
<!-- Use test_depend for packages you need only for testing: -->
<!-- <test_depend>gtest</test_depend> -->
<!-- Use doc_depend for packages you need only for building documentation: -->
<!-- <doc_depend>doxygen</doc_depend> -->
<buildtool_depend>catkin</buildtool_depend>
<build_depend>cv_bridge</build_depend>
<build_depend>image_transport</build_depend>
<build_depend>roscpp</build_depend>
<build_depend>sensor_msgs</build_depend>
<build_depend>std_msgs</build_depend>
<build_export_depend>cv_bridge</build_export_depend>
<build_export_depend>image_transport</build_export_depend>
<build_export_depend>roscpp</build_export_depend>
<build_export_depend>sensor_msgs</build_export_depend>
<build_export_depend>std_msgs</build_export_depend>
<exec_depend>cv_bridge</exec_depend>
<exec_depend>image_transport</exec_depend>
<exec_depend>roscpp</exec_depend>
<exec_depend>sensor_msgs</exec_depend>
<exec_depend>std_msgs</exec_depend>
<!-- The export tag contains other, unspecified, tags -->
<export>
<!-- Other tools can request additional information be placed here -->
</export>
</package>
本文内容由网友自发贡献,版权归原作者所有,本站不承担相应法律责任。如您发现有涉嫌抄袭侵权的内容,请联系:hwhale#tublm.com(使用前将#替换为@)