根据重叠点云生成匹配图像之间的对应点对
生成两张图像的对应点对
#include <iostream> //标准C++库中的输入输出
#include <pcl/io/pcd_io.h> //PCD文件的读写
#include <pcl/point_types.h> //点类型定义
#include <pcl/point_cloud.h> //点云类定义
#include <pcl/kdtree/kdtree_flann.h> //KD-Tree搜索/*common模块*/
#include <pcl/common/common.h> //标准的C以及C++类,是其他common 函数的父类;getMinMax3D()函数所需头文件,获得点云三维坐标的最值
#include <pcl/common/angles.h> //定义了标准的C接口的角度计算函数
#include <pcl/common/centroid.h> //定义了中心点的估算以及协方差矩阵的计算
#include <pcl/common/distances.h> //定义标准的C接口用于计算距离
#include <pcl/common/file_io.h> //定义了一些文件帮助写或者读方面的功能。
#include <pcl/common/random.h> //定义一些随机点云生成的函数
#include <pcl/common/geometry.h> //定义一些基本的几何功能的函数
#include <pcl/common/intersections.h> //定义了线与线相交的函数
#include <pcl/common/norms.h> //定义了标准的C方法计算矩阵的正则化
#include <pcl/common/time.h> //定义了时间计算的函数/*surface模块*/
#include <pcl/surface/mls.h> //最小二乘平滑处理
#include <pcl/surface/concave_hull.h> //创建凹多边形
#include <pcl/surface/gp3.h> //贪婪投影三角化算法
#include <pcl/surface/organized_fast_mesh.h>/*feature模块*/
#include <pcl/features/normal_3d.h> //法线特征估计
#include <pcl/features/normal_3d_omp.h> //法线特征估计加速
#include <pcl/features/pfh.h> //PFH特征估计
#include <pcl/features/fpfh.h> //FPFH特征估计
#include <pcl/features/fpfh_omp.h> //FPFH特征估计加速
#include <pcl/features/vfh.h> //VFH特征估计
#include <pcl/features/narf.h> //NARF特征估计
#include <pcl/features/boundary.h> //边界提取
#include <pcl/features/integral_image_normal.h>#include <pcl/features/principal_curvatures.h>/*registration模块*/
#include <pcl/registration/icp.h> //ICP配准
#include <pcl/registration/icp_nl.h> //非线性ICP配准
#include <pcl/registration/ndt.h> //NDT配准
#include <pcl/registration/transforms.h> //变换矩阵
#include <pcl/registration/ia_ransac.h> //sac-ia类头文件
#include <pcl/registration/ia_fpcs.h>
#include <pcl/registration/correspondence_estimation.h> //直方图配准
#include <pcl/registration/correspondence_rejection_features.h> //特征的错误对应关系去除
#include <pcl/registration/correspondence_rejection_sample_consensus.h> //随机采样一致性去除 /*filters模块*/
#include <pcl/filters/filter.h> //滤波相关头文件
#include <pcl/filters/passthrough.h> //滤波相关类头文件
#include <pcl/filters/project_inliers.h> //滤波相关类头文件,点云投影
#include <pcl/filters/extract_indices.h> //索引提取
#include <pcl/filters/voxel_grid.h> //基于体素网格化的滤波
#include <pcl/filters/approximate_voxel_grid.h> //体素网格过滤器滤波
#include <pcl/filters/statistical_outlier_removal.h> //统计离群点/*segmentation模块*/
#include <pcl/ModelCoefficients.h> //采样一致性模型
#include <pcl/sample_consensus/method_types.h> //随机参数估计方法
#include <pcl/sample_consensus/model_types.h> //模型定义
#include <pcl/segmentation/sac_segmentation.h> //基于采样一致性分割
#include <pcl/segmentation/region_growing.h> //区域生长头文件
#include <pcl/segmentation/region_growing_rgb.h> //基于颜色的区域生长
#include <pcl/segmentation/supervoxel_clustering.h> //超体聚类
#include <pcl/segmentation/lccp_segmentation.h> //基于凹凸性分割
#include <pcl/segmentation/extract_clusters.h>
/*visualization模块*/
#include <pcl/visualization/cloud_viewer.h> //CloudViewer类可视化
#include <pcl/visualization/range_image_visualizer.h>
#include <pcl/visualization/pcl_visualizer.h>/*range_image模块*/
#include <pcl/range_image/range_image.h> //深度图像相关
#include <pcl/range_image/range_image_planar.h>/*Eigen模块*/
#include <Eigen/StdVector>
#include <Eigen/Geometry>/*console模块*/
#include <pcl/console/print.h>
#include <pcl/console/time.h>
#include <pcl/console/parse.h>#include <pcl/filters/crop_box.h>
#include <pcl/io/io.h> //IO相关头文件
#include <boost/make_shared.hpp> //boost指针相关头文件
#include <pcl/point_representation.h> //点表示相关头文件
#include <pcl/io/openni2_grabber.h> //OpenNI数据流获取类相关头文件
# include <pcl/filters/filter.h>
#include<pcl/segmentation/extract_clusters.h>#include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgproc/types_c.h"
#include "lasreader.hpp"
#include "laswriter.hpp"
#include "rapidxml.hpp"
#include "rapidxml_utils.hpp" //rapidxml::file
#include "rapidxml_print.hpp" //rapidxml::print
#include <opencv2/opencv.hpp>
#include <cmath>
#include <vector>
#include <omp.h>
//#include "DBSCAN_simple.h"
//#include "DBSCAN_precomp.h"
//#include "DBSCAN_kdtree.h"
//#include "../dbscan_kdtree-master/build/MinimumBoundingRectangle.h"
//#include "../dbscan_kdtree-master/build/DBSCAN.h"
//#include "../dbscan_kdtree-master/build/Sar_sift.h"
//#include "../dbscan_kdtree-master/build/match.h"
#include <fstream>
#include <iostream>
#include <string>
#include <omp.h>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <vector>using namespace cv;
using namespace std;
using namespace std;
using namespace Eigen;
using namespace std;
using namespace cv;
using namespace pcl;
typedef std::map<std::string, std::vector<double>> ParamMap;
typedef pcl::PointCloud<pcl::PointXYZ> PointCloudXYZ;
typedef vector<pair<Point, Point>> PointPairs;void readXMLByFile(const char *file_name)
{static const int buf_len = 2048;static char buf[buf_len] = { 0 };try{// 2.拼接绝对路径std::string strXml = file_name;// 3.用file文件读入缓冲区rapidxml::file<> fdoc(strXml.c_str());// 4.打印从文件中读取的内容std::cout << fdoc.data();// 5.解析获取DOM实例rapidxml::xml_document<> doc;doc.parse<0>(fdoc.data());rapidxml::xml_node<>* root = doc.first_node(); // 获取根节点<ca_table>rapidxml::xml_node<>* pCePortNodeBasic = root->first_node("chunk"); // 获取节点<ce_port>if (pCePortNodeBasic) {rapidxml::xml_node<>* ptrNodeCameras = pCePortNodeBasic->first_node()->next_sibling()->next_sibling();rapidxml::xml_node<>* pCurrentPortNodeCamera = ptrNodeCameras->first_node();while (pCurrentPortNodeCamera){rapidxml::xml_attribute<>* pname = pCurrentPortNodeCamera->first_attribute("label");std:; string imageName = pname->value();cout << imageName << endl;rapidxml::xml_node<>* pTransformPortNode = pCurrentPortNodeCamera->first_node("transform");string content = pTransformPortNode->value();pCurrentPortNodeCamera = pCurrentPortNodeCamera->next_sibling();// 按照空格分隔std::vector<double> lineValues;std::stringstream line_stream1(content);line_stream1.str(content);// 按照空格分隔std::vector<double> transformValues;std::string single;while (getline(line_stream1, single, ' ')) {transformValues.push_back(atof(single.c_str()));}} }}catch (rapidxml::parse_error e){std::cout << e.what() << std::endl;}}Mat eulerAnglesToRotationMatrix(Vec3f &theta)
{// Calculate rotation about x axisMat R_x = (Mat_<double>(3, 3) <<1, 0, 0,0, cos(theta[0]), -sin(theta[0]),0, sin(theta[0]), cos(theta[0]));// Calculate rotation about y axisMat R_y = (Mat_<double>(3, 3) <<cos(theta[1]), 0, sin(theta[1]),0, 1, 0,-sin(theta[1]), 0, cos(theta[1]));// Calculate rotation about z axisMat R_z = (Mat_<double>(3, 3) <<cos(theta[2]), -sin(theta[2]), 0,sin(theta[2]), cos(theta[2]), 0,0, 0, 1);// Combined rotation matrixMat R = R_z * R_y * R_x;return R;
}
void computeProjectDense(pcl::PointCloud<pcl::PointXYZI>::Ptr cloud, string image_name, double x_off, double y_off, double z_off, std::vector<cv::Point2d> imagePoints)
{//PCL 多线程加速计算点云密度-----------------------------------------------------//float dist_max = 2.0;//std::vector <int> indices;for (int i = 0; i < cloud->size(); i++){//indices.push_back(i);cloud->points[i].x = cloud->points[i].x + x_off;cloud->points[i].y = cloud->points[i].y + y_off;cloud->points[i].z = cloud->points[i].z + z_off;}//float pointDensity = pcl::getMeanPointDensity<pcl::PointXYZI>(cloud, indices, dist_max, 12);//8个线程//cout << " 格网密度: " << pointDensity << endl;//根据原始点云坐标最值创建格网const float GridScale = 0.5; // 格网尺寸pcl::PointXYZI minPt, maxPt; // 存储点云最值int width, height, LayerNum; // 格网的行、列、层的总数const float Xscale3d = GridScale; // 格网沿x轴方向的尺寸const float Yscale3d = GridScale; // 格网沿y轴方向的尺寸const float Zscale3d = GridScale; // 格网沿z轴方向的尺寸pcl::getMinMax3D(*cloud, minPt, maxPt); // getMinMax3D ( )计算点云最值的函数double X_Max = maxPt.x;double X_Min = minPt.x;double Y_Max = maxPt.y;double Y_Min = minPt.y;double Z_Max = maxPt.z;double Z_Min = minPt.z;double Intensity_Max =18000;double Intensity_Min = minPt.intensity;X_Max = X_Max + GridScale * 2; // 扩大格网边界,避免在格网边界计算 X_Min = X_Min - GridScale * 2;Y_Max = Y_Max + GridScale * 2;Y_Min = Y_Min - GridScale * 2;width = int((X_Max - X_Min) / Xscale3d + 1); // 格网行数height = int((Y_Max - Y_Min) / Yscale3d + 1); // 格网列数//统计格网密度Mat image_index(height, width, CV_8UC1); //创建一个高200,宽100的灰度图//统计格网密度Mat image(height, width, CV_8UC3); //创建一个高200,宽100的灰度图//Mat imageOut(height, width, CV_8UC3); //创建一个高200,宽100的灰度图for (int i = 0; i < image_index.rows; i++){uchar *p = image_index.ptr<uchar>(i);for (int j = 0; j < image_index.cols; j++){p[j] = 0.0;}}for (int i = 0; i < image.rows; i++){for (int j = 0; j < image.cols; j++){image.at<Vec3b>(i, j)[0] = 0;image.at<Vec3b>(i, j)[1] = 0;image.at<Vec3b>(i, j)[2] = 0;}}MatrixXd M; // 记录二维格网每个格网内部点云个数,即为密度M = MatrixXd::Zero(height, width); //MatrixXd M_Density;//M_Density = MatrixXd::Zero(height, width); //矩阵初始化 int row, col;vector<double> vec_intensity;for (size_t Index = 0; Index < cloud->points.size(); Index++) // 遍历原始点云,放入二维格网对应的哈希表,并记录有点格网的索引号{vec_intensity.push_back(cloud->points[Index].intensity);}sort(vec_intensity.begin(), vec_intensity.end());Intensity_Min = *(vec_intensity.begin());Intensity_Max = 20000; //vec_intensity[vec_intensity.size()-1];int typePrj = 1;int iternum = 0;if (typePrj == 0){//求取最大值for (size_t Index = 0; Index < cloud->points.size(); Index++) // 遍历原始点云,放入二维格网对应的哈希表,并记录有点格网的索引号{row = int((cloud->points[Index].x - X_Min) / Xscale3d) + 1;col = int((cloud->points[Index].y - Y_Min) / Yscale3d) + 1;int z_normal = int((cloud->points[Index].z - Z_Min) / (Z_Max - Z_Min) * 255);//M(row, col) = z_normal;//uchar *p = image.ptr<uchar>(row);//p[col] = M(row, col);int intensity_normal = int((cloud->points[Index].intensity - Intensity_Min) / (Intensity_Max - Intensity_Min) * 255);if (intensity_normal > 255)intensity_normal = 255;uchar *p = image.ptr<uchar>(row);float *p_index = image_index.ptr<float>(row);if (M(row, col) > 0 && M(row, col) < z_normal){M(row, col) = intensity_normal;p[col] = M(row, col);p_index[col] = Index;}else if (M(row, col) == 0){M(row, col) = intensity_normal;p[col] = M(row, col);p_index[col] = Index;}}}else if (typePrj == 1){// 创建一个名为data.txt的文本文件std::ofstream outfile1("D:\\mydatasets\\groundtruths\\" + image_name + ".txt");std::ofstream outfile2("D:\\mydatasets\\soleground\\" + image_name + ".txt");//求取最大值for (size_t Index = 0; Index < cloud->points.size(); Index++) // 遍历原始点云,放入二维格网对应的哈希表,并记录有点格网的索引号{col = int((cloud->points[Index].x - X_Min) / Xscale3d) + 1;row = height - int((cloud->points[Index].y - Y_Min) / Yscale3d) - 1;uchar* p = image_index.ptr<uchar>(row);p[col] = p[col] + 1;M(row, col) = p[col];}double Mmin = M.minCoeff();double Mmax = M.maxCoeff();for (size_t Index = 0; Index < cloud->points.size(); Index++) // 遍历原始点云,放入二维格网对应的哈希表,并记录有点格网的索引号{col = int((cloud->points[Index].x - X_Min) / Xscale3d) + 1;row = height-int((cloud->points[Index].y - Y_Min) / Yscale3d) - 1;if (outfile1.is_open()) {// 写入数组到文本文件outfile1 << fixed << setprecision(12);outfile1 << cloud->points[Index].x << " " << cloud->points[Index].y << " " << cloud->points[Index].z << " " << imagePoints[Index].x << " " << imagePoints[Index].y << " " << col << " " << row << " " << cloud->points[Index].intensity << " " << Intensity_Min <<endl;// 关闭文件//std::cout << "文件写入成功!" << std::endl;}else {std::cout << "无法打开文件!" << std::endl;}int z_normal = int((cloud->points[Index].z - Z_Min) / (Z_Max - Z_Min) * 255);int intensity_normal = int((cloud->points[Index].intensity - Intensity_Min) / (Intensity_Max - Intensity_Min) * 255);int density_normal = int((M(row, col) - Mmin) / (Mmax - Mmin) * 255);if (intensity_normal > 255)intensity_normal = 255;if (z_normal > 255)z_normal = 255;if (density_normal > 255)density_normal = 255;if (image.at<Vec3b>(row, col)[0] <= z_normal){image.at<Vec3b>(row, col)[0] = z_normal;image.at<Vec3b>(row, col)[1] = intensity_normal;image.at<Vec3b>(row, col)[2] = intensity_normal;iternum = 0;}else {if (iternum>=1)continue;if (outfile2.is_open()) {// 写入数组到文本文件outfile2 << fixed << setprecision(12);outfile2 << cloud->points[Index].x << " " << cloud->points[Index].y << " " << cloud->points[Index].z << " " << imagePoints[Index].x << " " <<imagePoints[Index].y << " " << col << " " << row << endl;// 关闭文件//std::cout << "文件写入成功!" << std::endl;}else {std::cout << "无法打开文件!" << std::endl;}iternum++;}}//outfile1.close();outfile2.close();}//normalize(image_index, image_index, -1, 255, NORM_MINMAX);//vector<cv::Mat> channels;//split(image, channels);//channels.at(2) = image_index;cout << " 格网密度计算完成 " << endl;插值//for (int i = 1; i < image.rows - 1; i++)//{// for (int j = 1; j < image.cols - 1; j++)// {// if (image.channels() == 1)// {// if (image.at<uchar>(i, j) == 0)// {// vector<uchar> vec_pix;// vec_pix.push_back(image.at<uchar>(i - 1, j));// vec_pix.push_back(image.at<uchar>(i - 1, j + 1));// vec_pix.push_back(image.at<uchar>(i - 1, j - 1));// vec_pix.push_back(image.at<uchar>(i, j + 1));// vec_pix.push_back(image.at<uchar>(i, j - 1));// vec_pix.push_back(image.at<uchar>(i + 1, j + 1));// vec_pix.push_back(image.at<uchar>(i + 1, j));// vec_pix.push_back(image.at<uchar>(i + 1, j - 1));// sort(vec_pix.begin(), vec_pix.end());// image.at<uchar>(i, j) = vec_pix[4];// vec_pix.clear();// }// }// }//}imwrite("D:\\mydatasets\\projectImages\\Gray_Image" + image_name + ".jpg", image);//imwrite("D:\\lidarAndImage\\2023projectionLidar\\project\\Gray_Image_index" + image_name + ".tif", image_index);//imwrite("D:\\lidarAndImage\\2023projectionLidar\\project\\Gray_Image_index" + image_name + ".tif", image_index);//imshow("原图1", image);cout << "投影密度完成" << endl;
}pcl::PointCloud<pcl::PointXYZI>::Ptr cropclipper3D(pcl::PointCloud<pcl::PointXYZI>::Ptr& cloud, double x, double y, double z, double width, double height)
{//pcl::PointXYZ min_point(x, y, -300);//pcl::PointXYZ max_point(x -width, y + height, 1000);pcl::CropBox<pcl::PointXYZI> crop_box;Eigen::Vector4f minpt(x - width, y - height, z-300, 1);Eigen::Vector4f maxpt(x + width, y + height, z+300, 1);crop_box.setMin(minpt);crop_box.setMax(maxpt);// 将点云限制在 3D 盒子内部或者外部,并保存输出点云pcl::PointCloud<pcl::PointXYZI>::Ptr clipped_cloud(new pcl::PointCloud<pcl::PointXYZI>);crop_box.setInputCloud(cloud);crop_box.setNegative(false);crop_box.filter(*clipped_cloud);std::cout << "filer point cloud:" << clipped_cloud->points.size() << "points" << std::endl;return clipped_cloud;
}int writeMatchImagePoints(const Mat& img1,const Mat& img2, const PointPairs& pointpairs,const std::string& savepath) {// 1. 读取两幅图像//Mat img1 = imread("image1.jpg");//Mat img2 = imread("image2.jpg");if (img1.empty() || img2.empty()) {cerr << "无法加载图像,请检查路径" << endl;return -1;}// 2. 创建合并图像int height = max(img1.rows, img2.rows);int width = img1.cols + img2.cols;Mat merged_img(height, width, CV_8UC3, Scalar(0, 0, 0));// 将图像复制到合并画布上img1.copyTo(merged_img(Rect(0, 0, img1.cols, img1.rows)));img2.copyTo(merged_img(Rect(img1.cols, 0, img2.cols, img2.rows)));// 3. 定义对应点对 (格式:{Point(x1,y1), Point(x2,y2)})//vector<pair<Point, Point>> point_pairs = {// {Point(50, 100), Point(300, 120)}, // 点对1// {Point(200, 80), Point(450, 90)}, // 点对2// {Point(150, 200), Point(400, 220)}, // 点对3// {Point(80, 250), Point(350, 260)} // 点对4//};// 4. 绘制点和连线int i = 0;for (const auto& pair : pointpairs) {if (i > 20)break;++i;Point pt1 = pair.first;Point pt2 = pair.second;// 调整第二幅图像点的坐标 (添加水平偏移)Point adjusted_pt2(pt2.x + img1.cols, pt2.y);// 绘制连线 (蓝色)line(merged_img, pt1, adjusted_pt2, Scalar(255, 0, 0), 2);// 绘制第一幅图像的点 (红色)circle(merged_img, pt1, 5, Scalar(0, 0, 255), -1);// 绘制第二幅图像的点 (绿色)circle(merged_img, adjusted_pt2, 5, Scalar(0, 255, 0), -1);}// 5. 显示并保存结果//imshow("Point Correspondences", merged_img);imwrite(savepath, merged_img);waitKey(0);destroyAllWindows();return 0;
}void computeProject3D_2D_Dense_XML(pcl::PointCloud<pcl::PointXYZI>::Ptr cloud)
{const int width = 7952;const int height = 5304;const float pixelresolution = 0.07;//double x_off = 530923.32489504258;///530923.00000000000;//double y_off = 3958530.2597993412;//double z_off = 175.69202813226730;double x_off = 0.0;///530923.00000000000;double y_off = 0.0;double z_off = 0.0;const float scale = 1.0;//local坐标系到Global坐标系//cv::Mat RT_localGlobalMat(4, 4, cv::DataType<double>::type); // Intrisic matrixcv::Mat R_localGlobalMat(3, 3, cv::DataType<double>::type); // Intrisic matrix//固定值 全局的旋转参数//cv::Mat R_localGlobalMat_T(3, 3, cv::DataType<double>::type); // Intrisic matrixR_localGlobalMat.at<double>(0, 0) = 8.4328535063808485e-001;R_localGlobalMat.at<double>(1, 0) = -5.2403615256904434e-001;R_localGlobalMat.at<double>(2, 0) = -1.1939819177791421e-001;R_localGlobalMat.at<double>(0, 1) = 5.3738866832410814e-001;R_localGlobalMat.at<double>(1, 1) = 8.1832429138164420e-001;R_localGlobalMat.at<double>(2, 1) = 2.0385969020768061e-001;R_localGlobalMat.at<double>(0, 2) = -9.1234070414389183e-003;R_localGlobalMat.at<double>(1, 2) = -2.3607512561759539e-001;R_localGlobalMat.at<double>(2, 2) = 9.7169197717620015e-001;//R_localGlobalMat_T = R_localGlobalMat.t();//固定值 全局的平移参数cv::Mat T_localGlobalMat(3, 1, cv::DataType<double>::type); // Intrisic matrix//cv::Mat T_localGlobalMat_T(3, 1, cv::DataType<double>::type); // Intrisic matrixT_localGlobalMat.at<double>(0) = 5.3110107804608194e+005;T_localGlobalMat.at<double>(1) = 3.9592132436881764e+006;T_localGlobalMat.at<double>(2) = 3.9691317611246762e+002;//T_localGlobalMat_T = -R_localGlobalMat_T * T_localGlobalMat;//相机到local坐标系cv::Mat Mat_CameraLocal(4, 4, cv::DataType<double>::type); // Intrisic matrixcv::Mat Mat_CameraLocal_R(3, 3, cv::DataType<double>::type); // Intrisic matrixMat_CameraLocal_R.at<double>(0, 0) = -9.9763374348616152e-01;Mat_CameraLocal_R.at<double>(1, 0) = 2.5888134639684635e-02;Mat_CameraLocal_R.at<double>(2, 0) = -6.3692372719705820e-02;Mat_CameraLocal_R.at<double>(0, 1) = 4.2186781874565982e-02;Mat_CameraLocal_R.at<double>(1, 1) = 9.6199885539966123e-01;Mat_CameraLocal_R.at<double>(2, 1) = -2.6977486473874623e-01;Mat_CameraLocal_R.at<double>(0, 2) = 5.4288021633286249e-02;Mat_CameraLocal_R.at<double>(1, 2) = -2.7182348444278837e-01;Mat_CameraLocal_R.at<double>(2, 2) = -9.6081465643095021e-01;cv::Mat Mat_CameraLocal_T(3, 1, cv::DataType<double>::type); // Intrisic matrixMat_CameraLocal_T.at<double>(0) = 7.4609553581479258e+00;Mat_CameraLocal_T.at<double>(1) = 7.3413396614441817e+00;Mat_CameraLocal_T.at<double>(2) = -3.4636831297553317e-01;//固定值 内参矩阵及其参数cv::Mat intrisicMat(3, 3, cv::DataType<double>::type); // Intrisic matrixintrisicMat.at<double>(0, 0) = 5567.26026351138 / scale;intrisicMat.at<double>(1, 0) = 0;intrisicMat.at<double>(2, 0) = 0;intrisicMat.at<double>(0, 1) = 0;intrisicMat.at<double>(1, 1) = 5567.26026351138 / scale;intrisicMat.at<double>(2, 1) = 0;intrisicMat.at<double>(0, 2) = (width / 2 + 2.38411623416083) / scale;//2.4999267471251629intrisicMat.at<double>(1, 2) = (height / 2 - 32.3673155075826) / scale;//31.225670155148315intrisicMat.at<double>(2, 2) = 1;cv::Mat distCoeffs(5, 1, cv::DataType<double>::type); // Distortion vectordouble k1 = -0.0457617707518695;double k2 = 0.0911130942388938;double k3 = -0.0153369695445818;double p1 = -0.00166274534179506;double p2 = 0.00277374993084083;double b1 = 2.21658355621507;double b2 = 0.235004131559968;distCoeffs.at<double>(0) = k1;distCoeffs.at<double>(1) = k2;distCoeffs.at<double>(2) = k3;distCoeffs.at<double>(3) = p1;distCoeffs.at<double>(4) = p2;try{std::string strXml = "L:/rlh/segmentation-old/dbscan_kdtree-master/dbscan_new/release/resources/\\3.xml";// 3.用file文件读入缓冲区rapidxml::file<> fdoc(strXml.c_str());// 4.打印从文件中读取的内容std::cout << fdoc.data();// 5.解析获取DOM实例rapidxml::xml_document<> doc;doc.parse<0>(fdoc.data());rapidxml::xml_node<>* root = doc.first_node(); // 获取根节点<ca_table>rapidxml::xml_node<>* pCePortNodeBasic = root->first_node("chunk"); // 获取节点<ce_port>if (pCePortNodeBasic) {rapidxml::xml_node<>* ptrNodeCameras = pCePortNodeBasic->first_node()->next_sibling();rapidxml::xml_node<>* pCurrentPortNodeCamera = ptrNodeCameras->first_node();while (pCurrentPortNodeCamera){rapidxml::xml_attribute<>* pname = pCurrentPortNodeCamera->first_attribute("label");string imageName = pname->value();rapidxml::xml_attribute<>* id = pCurrentPortNodeCamera->first_attribute("id");int id_num = atoi(id->value());cout << imageName << endl;rapidxml::xml_node<>* pTransformPortNode = pCurrentPortNodeCamera->first_node("transform");string content = pTransformPortNode->value();旋转//rapidxml::xml_node<>* pRotatePortNode = pCurrentPortNodeCamera->first_node("rotation_covariance");//string rotatecontent = pRotatePortNode->value();//获取中心rapidxml::xml_node<>* pCurrentPortNodeCenter = pTransformPortNode->next_sibling()->next_sibling()->next_sibling()->next_sibling();double x_center = atof(pCurrentPortNodeCenter->first_attribute("x")->value());double y_center = atof(pCurrentPortNodeCenter->first_attribute("y")->value());double z_center = atof(pCurrentPortNodeCenter->first_attribute("z")->value());pCurrentPortNodeCamera = pCurrentPortNodeCamera->next_sibling();//if (id_num<588)continue;// pcl::PointCloud<pcl::PointXYZI>::Ptr cropcloud = cropclipper3D(cloud, x_center-x_off, y_center-y_off, z_center-z_off,(width/2+100)*pixelresolution,(height/2+100)*pixelresolution);vector<double> vec_intensity;pcl::PointCloud<pcl::PointXYZI>::Ptr cropcloud(cloud);if (cropcloud->points.size() == 0)continue;std::vector<cv::Point3d> objectPoints;std::vector<cv::Point2d> imagePoints;//std::vector<cv::Point3d> outObjectPoints;pcl::PointCloud<pcl::PointXYZI>::Ptr outcloud_filtered(new pcl::PointCloud<pcl::PointXYZI>);//imagePoints.resize(cropcloud->points.size 、 for (int i = 0; i < cropcloud->points.size(); i++){vec_intensity.push_back(cropcloud->points[i].intensity);objectPoints.push_back(cv::Point3d((cropcloud->points[i].x + x_off), (cropcloud->points[i].y+y_off ), (cropcloud->points[i].z +z_off)));}sort(vec_intensity.begin(), vec_intensity.end());float Intensity_Min = *(vec_intensity.begin());float Intensity_Max = vec_intensity.back(); //vec_intensity[vec_intensity.size()-1];// 按照空格分隔std::vector<double> lineValues;std::stringstream line_stream1(content);line_stream1.str(content);// 按照空格分隔std::vector<double> transformValues;std::string single;while (getline(line_stream1, single, ' ')) {transformValues.push_back(atof(single.c_str()));}//更新当前图像对应的旋转参数for (int i = 0; i < 3; i++){for (int j = 0; j < 3; j++){Mat_CameraLocal_R.at<double>(i, j) = transformValues[i*4+j];}}//更新当前图像对应的平移参数Mat_CameraLocal_T.at<double>(0) = transformValues[3];Mat_CameraLocal_T.at<double>(1) = transformValues[7];Mat_CameraLocal_T.at<double>(2) = transformValues[11];//统计格网密度Mat image_index(height / scale, width / scale, CV_32FC1); //创建一个高200,宽100的灰度图bool write_ok = false;pcl::PointXYZI pointXYZI;#pragma omp parallel forfor (int i = 0; i < objectPoints.size(); i++){cout << "输入的全局坐标: (" << objectPoints[i].x << ", " << objectPoints[i].y << ", " << objectPoints[i].z << ")" << std::endl;//if (objectPoints[i].x != 531409.375 & objectPoints[i].y != 3959080.25 & objectPoints[i].z != 181.390030)continue;cv::Mat point(3, 1, cv::DataType<double>::type);point.at<double>(0) = objectPoints[i].x;point.at<double>(1) = objectPoints[i].y;point.at<double>(2) = objectPoints[i].z;cv::Mat RTTempt(3, 1, cv::DataType<double>::type);cv::Mat uvMat(3, 1, cv::DataType<double>::type);cv::Mat tolocalMat(3, 1, cv::DataType<double>::type);tolocalMat = (R_localGlobalMat.t() * (point - T_localGlobalMat)) / 2.9666228452733847e+001; //进行全局变换RTTempt = Mat_CameraLocal_R.t()*(tolocalMat - Mat_CameraLocal_T);//计算畸变系数double distortedPoint_x = RTTempt.at<double>(0) / RTTempt.at<double>(2);double distortedPoint_y = RTTempt.at<double>(1) / RTTempt.at<double>(2);double xx = RTTempt.at<double>(0);double yy = RTTempt.at<double>(1);double r = sqrt(pow(distortedPoint_x, 2) + pow(distortedPoint_y, 2));double x_distorted = distortedPoint_x * (1 + k1 * pow(r, 2) + k2 * pow(r, 4) + k3 * pow(r, 6)) + 2 * p2*distortedPoint_x*distortedPoint_y +p1 * (pow(r, 2) + 2 * pow(distortedPoint_x, 2));double y_distorted = distortedPoint_y *(1 + k1 * pow(r, 2) + k2 * pow(r, 4) + k3 * pow(r, 6) ) + p2 * (pow(r, 2) + 2 * pow(distortedPoint_y, 2)) + 2 * p1*distortedPoint_x*distortedPoint_y;RTTempt.at<double>(0) = x_distorted;RTTempt.at<double>(1) = y_distorted;RTTempt.at<double>(2) = 1;//RTTempt.pop_back();uvMat = intrisicMat * RTTempt;cv::Point2i xy(uvMat.at<double>(0)+ distortedPoint_x * b1 + distortedPoint_y * b2, uvMat.at<double>(1)); // 调用反算函数//cv::Point3f objectPoint = compute2D_3D(xy, intrisicMat, R_localGlobalMat, T_localGlobalMat, Mat_CameraLocal_R, Mat_CameraLocal_T,// k1, k2, k3, p1, p2, scale);// 输出结果if (xy.x > 0 && xy.y > 0 && xy.x < 8000 && xy.y < 6000){cout << "计算得到的像素坐标: (" << xy.x << ", " << xy.y << std::endl;//cout << "反算得到的全局坐标: (" << objectPoint.x << ", " << objectPoint.y << ", " << objectPoint.z << ")" << std::endl;//int ccc = 0;}int ccc = 0;//if (xy.x > 0 && xy.y > 0 && xy.x < width / scale && xy.y < height / scale)//{// write_ok = true;// int intensity_normal = int((cropcloud->points[i].intensity - Intensity_Min) / (Intensity_Max - Intensity_Min) * 255);// if (intensity_normal > 255)intensity_normal = 255;// float *p = image_index.ptr<float>(xy.y);// int temp = xy.x;// p[temp] = intensity_normal;// #pragma omp critical// {imagePoints.push_back(xy);// //outObjectPoints.push_back(objectPoints[i]);// pointXYZI.x = objectPoints[i].x - x_off;// pointXYZI.y = objectPoints[i].y - y_off ;// pointXYZI.z = objectPoints[i].z - z_off ;// pointXYZI.intensity = cropcloud->points[i].intensity;// outcloud_filtered->push_back(pointXYZI);// }// //}else{// // #pragma omp critical {cropcloud->points[i].z = -999; }}}cout << "deal ok!";if (write_ok)imwrite("L:/rlh/segmentation-old/dbscan_kdtree-master/dbscan_new/release/resources/\\groundtruths\\" + imageName + ".jpg", image_index);// 创建直通滤波器对象//pcl::PassThrough<pcl::PointXYZI> pass;//pass.setInputCloud(cropcloud);//pass.setFilterFieldName("z"); // 设置过滤字段为z坐标//pass.setFilterLimits(-100.0, 10000.0); // 设置范围为[0.0, 1.0]// 执行滤波,结果存储在cloud_filtered对象中//pcl::PointCloud<pcl::PointXYZI>::Ptr cloud_filtered(new pcl::PointCloud<pcl::PointXYZI>);//pass.filter(*cloud_filtered);computeProjectDense(outcloud_filtered, imageName, x_off, y_off, z_off, imagePoints);imagePoints.clear();objectPoints.clear();vec_intensity.clear();cropcloud->clear();outcloud_filtered->clear();}}}catch (rapidxml::parse_error e){std::cout << e.what() << std::endl;}}void lasToPCDIntensity()
{//--------------------------------//-----------加载las点云-----------//--------------------------------cout << "->正在加载las点云..." << endl;LASreadOpener lasrReadOpener;lasrReadOpener.set_file_name("D:\\lidarAndImage\\2023projectionLidar\\066067087.las");LASreader* lasReader = lasrReadOpener.open(false);size_t count = lasReader->header.number_of_point_records;//--------------------------------//-----------创建pcd点云-----------//--------------------------------pcl::PointCloud<pcl::PointXYZI>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZI>);cloud->width = count;cloud->height = 1;cloud->resize(count);cloud->is_dense = false;//--------------------------------//-----------执行转换--------------//--------------------------------cout << "->正在执行las点云转pcd点云..." << endl;/// 偏移值double xOffset = lasReader->header.x_offset;double yOffset = lasReader->header.y_offset;double zOffset = lasReader->header.z_offset;size_t j = 0;while (lasReader->read_point() && j < count){/// 获取三维坐标信息cloud->points[j].x = lasReader->point.get_x() - xOffset;cloud->points[j].y = lasReader->point.get_y() - yOffset;cloud->points[j].z = lasReader->point.get_z()- zOffset;///获取intensity信息cloud->points[j].intensity = lasReader->point.get_intensity();++j;}//-----------------------------------//---------保存转换后的pcd点云---------//-----------------------------------cout << "->正在保存转换后的pcd点云..." << endl;pcl::io::savePCDFileBinary("D:\\lidarAndImage\\Traindatasets\\waterCloud_new.pcd", *cloud);
}void readCloudIndex() {pcl::PCDReader reader;pcl::PointCloud<pcl::PointXYZI>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZI>), cloud_f(new pcl::PointCloud<pcl::PointXYZI>);reader.read("D:\\lidarAndImage\\waterCloud.pcd", *cloud);//读取索引,返回坐标值//std::ifstream file("D:\\lidarAndImage\\waterCloud.pcd");vector<vector<int>>vv;vector<char*> vec_name;//读取1.in.txt文件数据ifstream fin("D:\\lidarAndImage\\result\\result.txt");if (!fin.is_open()){cout << "open error!" << endl;}//将数据存入vv数组(以字符串形式)string temp;while (getline(fin, temp)){char* s = new char[temp.size() + 1];strcpy(s, temp.c_str());char* p = strtok(s, " ");vec_name.push_back(p);vector<int> words;while (p) {words.push_back(atoi(p));p = strtok(NULL, " ");}vv.push_back(words);}ofstream outfile;outfile.open("D:\\lidarAndImage\\result\\result_lidar.txt", ios::out);//打开文件for (int i = 0; i < vv.size(); i++) {double x = cloud->points[vv[i].back()].x;double y = cloud->points[vv[i].back()].y;double z = cloud->points[vv[i].back()].z;outfile << vec_name[i] << " " << vv[i][1] << " " << vv[i][2] << " " << x << " " << y << " " << z << " " << endl;//fixed << setprecision(8)是为了保留小数点后8位进行写入}outfile.close();//关闭文件,保存文件。}PointCloudXYZ::Ptr readTxtCloud(const std::string& cloud_path)
{// 创建点云对象(根据实际数据调整点类型)PointCloudXYZ::Ptr cloud(new PointCloudXYZ);// 打开文本文件std::ifstream file(cloud_path);if (!file.is_open()) {std::cerr << "Error: Failed to open file!" << std::endl;return cloud;}std::string line;while (std::getline(file, line)) {// 跳过空行和注释行(可选)if (line.empty() || line[0] == '#') continue;std::istringstream iss(line);pcl::PointXYZ point;// 解析XYZ坐标(根据实际数据调整)if (!(iss >> point.x >> point.y >> point.z)) {std::cerr << "Warning: Failed to parse line: " << line << std::endl;continue;}// 可选:解析其他数据(如RGB、法线等)// float r, g, b;// if (iss >> r >> g >> b) { ... }cloud->push_back(point);}file.close();// 设置点云属性cloud->width = cloud->size();cloud->height = 1; // 无组织点云cloud->is_dense = true;// 输出信息std::cout << "Loaded " << cloud->size() << " points" << std::endl;return cloud;
}//求取点云中的交集
PointCloudXYZ::Ptr set_CloudIntersection(PointCloudXYZ::Ptr cloud1, PointCloudXYZ::Ptr cloud2)
{pcl::search::KdTree<pcl::PointXYZ>::Ptr kdtree(new pcl::search::KdTree<pcl::PointXYZ>);kdtree->setInputCloud(cloud2);std::vector<int> pointIdxRadiusSearch;std::vector<float> pointRadiusSquaredDistance;std::vector<int> indices;float radius = 0.0001;float startTime, endTime;startTime = omp_get_wtime();
#pragma omp parallel forfor (size_t i = 0; i < cloud1->size(); i++){if (kdtree->radiusSearch(cloud1->points[i], radius, pointIdxRadiusSearch, pointRadiusSquaredDistance) > 0){indices.push_back(i);}}endTime = omp_get_wtime();printf("指定多个线程,执行时间: %f\n", endTime - startTime);PointCloudXYZ::Ptr cloud3(new PointCloudXYZ);for (size_t i = 0; i < cloud1->size(); i++){if (find(indices.begin(), indices.end(), i) != indices.end())cloud3->push_back(cloud1->points[i]);}std::cout << "cloud1 has " << cloud1->size() << " points" << std::endl;std::cout << "cloud2 has " << cloud2->size() << " points" << std::endl;std::cout << "cloud3 has " << cloud3->size() << " points" << std::endl;return cloud3;
}//读取参数文件
ParamMap readXmlParameter(std::string path)
{ParamMap labelParamMap;std::string strXml = path;// 3.用file文件读入缓冲区rapidxml::file<> fdoc(strXml.c_str());// 4.打印从文件中读取的内容//std::cout << fdoc.data();// 5.解析获取DOM实例rapidxml::xml_document<> doc;doc.parse<0>(fdoc.data());rapidxml::xml_node<>* root = doc.first_node(); // 获取根节点<ca_table>rapidxml::xml_node<>* pCePortNodeBasic = root->first_node("chunk"); // 获取节点<ce_port>if (pCePortNodeBasic) {rapidxml::xml_node<>* ptrNodeCameras = pCePortNodeBasic->first_node()->next_sibling();rapidxml::xml_node<>* pCurrentPortNodeCamera = ptrNodeCameras->first_node();while (pCurrentPortNodeCamera){rapidxml::xml_attribute<>* pname = pCurrentPortNodeCamera->first_attribute("label");string imageName = pname->value();rapidxml::xml_attribute<>* id = pCurrentPortNodeCamera->first_attribute("id");int id_num = atoi(id->value());cout << imageName << endl;rapidxml::xml_node<>* pTransformPortNode = pCurrentPortNodeCamera->first_node("transform");string content = pTransformPortNode->value();pCurrentPortNodeCamera = pCurrentPortNodeCamera->next_sibling();// 按照空格分隔std::vector<double> lineValues;std::stringstream line_stream1(content);line_stream1.str(content);// 按照空格分隔std::vector<double> transformValues;std::string single;while (getline(line_stream1, single, ' ')) {transformValues.push_back(atof(single.c_str()));}//插入当前参数labelParamMap.insert(std::make_pair(imageName, transformValues));}}return labelParamMap;
}bool isValid(cv::Point2d pt1, cv::Point2d pt2,int width1,int height1,int width2,int height2)
{bool novalid = false;if (pt1.x>=0&&pt1.y>=0&&pt2.x>=0&&pt2.y>=0&&pt1.x<=width1&&pt1.y<=height1&&pt2.x<=width2&&pt2.y<=height2){novalid = true;}return novalid;
}//计算对应图像的ij
cv::Point2d calculateUV(const pcl::PointXYZ& objectPoint,const std::string& label,const ParamMap&xmlMap,const cv::Mat& intrisicMat,const cv::Mat& R_localGlobalMat,const cv::Mat& T_localGlobalMat,cv::Mat Mat_CameraLocal_R,cv::Mat Mat_CameraLocal_T,double k1, double k2, double k3,double p1, double p2,double b1, double b2,double scale)
{cv::Point2d pt_out;//cout << "输入的全局坐标: (" << objectPoint.x << ", " << objectPoint.y << ", " << objectPoint.z << ")" << std::endl;//更新局部参数//5. 更新局部参数std::vector<double> transformValues1;auto transformValues1_find = xmlMap.find(label);if (transformValues1_find != xmlMap.end()) {// 键存在,修改其对应的值 transformValues1 = transformValues1_find->second;}else {// 键不存在,可以选择插入新的键值对 cout << "xml can not find label:" << label << endl;return pt_out;}for (int i = 0; i < 3; i++){for (int j = 0; j < 3; j++){Mat_CameraLocal_R.at<double>(i, j) = transformValues1[i * 4 + j];}}//更新当前图像对应的平移参数Mat_CameraLocal_T.at<double>(0) = transformValues1[3];Mat_CameraLocal_T.at<double>(1) = transformValues1[7];Mat_CameraLocal_T.at<double>(2) = transformValues1[11];//if (objectPoints[i].x != 531409.375 & objectPoints[i].y != 3959080.25 & objectPoints[i].z != 181.390030)continue;cv::Mat point(3, 1, cv::DataType<double>::type);point.at<double>(0) = objectPoint.x;point.at<double>(1) = objectPoint.y;point.at<double>(2) = objectPoint.z;cv::Mat RTTempt(3, 1, cv::DataType<double>::type);cv::Mat uvMat(3, 1, cv::DataType<double>::type);cv::Mat tolocalMat(3, 1, cv::DataType<double>::type);tolocalMat = (R_localGlobalMat.t() * (point - T_localGlobalMat)) / 2.9666228452733847e+001; //进行全局变换RTTempt = Mat_CameraLocal_R.t() * (tolocalMat - Mat_CameraLocal_T);//计算畸变系数double distortedPoint_x = RTTempt.at<double>(0) / RTTempt.at<double>(2);double distortedPoint_y = RTTempt.at<double>(1) / RTTempt.at<double>(2);double xx = RTTempt.at<double>(0);double yy = RTTempt.at<double>(1);double r = sqrt(pow(distortedPoint_x, 2) + pow(distortedPoint_y, 2));double x_distorted = distortedPoint_x * (1 + k1 * pow(r, 2) + k2 * pow(r, 4) + k3 * pow(r, 6)) +2 * p2 * distortedPoint_x * distortedPoint_y + p1 * (pow(r, 2) + 2 * pow(distortedPoint_x, 2));double y_distorted = distortedPoint_y * (1 + k1 * pow(r, 2) + k2 * pow(r, 4) + k3 * pow(r, 6)) + p2 * (pow(r, 2) + 2 * pow(distortedPoint_y, 2)) + 2 * p1 * distortedPoint_x * distortedPoint_y;RTTempt.at<double>(0) = x_distorted;RTTempt.at<double>(1) = y_distorted;RTTempt.at<double>(2) = 1;//RTTempt.pop_back();uvMat = intrisicMat * RTTempt;cv::Point2i xy(uvMat.at<double>(0) + distortedPoint_x * b1 + distortedPoint_y * b2, uvMat.at<double>(1));return xy;
}
/*
根据点云和图像 计算图像的像素对
1. 输入: 两张影像的label 参数文件data_path: 数据输入路径label1: 第一张影像的标签label2:第二张影像的标签xmlMap:参数文件读取出的字典savepath:像素对文件保存路径
2. 输出: 两张影像中的像素对应关系
过程:1. 根据label获得两张影像对应的点云数据读取点云2. 计算点云中重叠部分3. 利用重叠点云计算在两张影像中的对应坐标 输出点云坐标、影像1坐标、影像2坐标
*/
void matchImagePoints(const std::string& data_path, const std::string& label1, const std::string& label2, const ParamMap& xmlMap, const std::string& savepath)
{std::string cloud1_path = data_path + "/" + label1 + ".txt";std::string cloud2_path = data_path + "/" + label2 + ".txt";std::string img1_path = data_path + "/" + label1 + ".jpg";std::string img2_path = data_path + "/" + label2 + ".jpg";//1. 获取点云文件读取点云std::cout << "1. 获取点云文件读取点云" << std::endl;pcl::PointCloud<pcl::PointXYZ>::Ptr cloud1 = readTxtCloud(cloud1_path);pcl::PointCloud<pcl::PointXYZ>::Ptr cloud2 = readTxtCloud(cloud2_path);//2. 获取点云中相交部分std::cout << "2. 获取点云中相交部分" << std::endl;pcl::PointCloud<pcl::PointXYZ>::Ptr cloudIntersection = set_CloudIntersection(cloud1, cloud2);//3. 获取两张图像的宽高std::cout << "3. 获取两张图像的宽高" << std::endl;Mat image1 = imread(img1_path);if (image1.empty()) {cout << "无法打开或找到图像" << endl;return ;}int width1 = image1.cols;int height1 = image1.rows;Mat image2 = imread(img2_path);if (image2.empty()) {cout << "无法打开或找到图像" << endl;return;}int width2 = image2.cols;int height2 = image2.rows;//4. 设置参数std::cout << "4. 设置参数" << std::endl;const float scale = 1.0;//local坐标系到Global坐标系//cv::Mat RT_localGlobalMat(4, 4, cv::DataType<double>::type); // Intrisic matrixcv::Mat R_localGlobalMat(3, 3, cv::DataType<double>::type); // Intrisic matrix//固定值 全局的旋转参数//cv::Mat R_localGlobalMat_T(3, 3, cv::DataType<double>::type); // Intrisic matrixR_localGlobalMat.at<double>(0, 0) = 8.4328535063808485e-001;R_localGlobalMat.at<double>(1, 0) = -5.2403615256904434e-001;R_localGlobalMat.at<double>(2, 0) = -1.1939819177791421e-001;R_localGlobalMat.at<double>(0, 1) = 5.3738866832410814e-001;R_localGlobalMat.at<double>(1, 1) = 8.1832429138164420e-001;R_localGlobalMat.at<double>(2, 1) = 2.0385969020768061e-001;R_localGlobalMat.at<double>(0, 2) = -9.1234070414389183e-003;R_localGlobalMat.at<double>(1, 2) = -2.3607512561759539e-001;R_localGlobalMat.at<double>(2, 2) = 9.7169197717620015e-001;//R_localGlobalMat_T = R_localGlobalMat.t();//固定值 全局的平移参数cv::Mat T_localGlobalMat(3, 1, cv::DataType<double>::type); // Intrisic matrix//cv::Mat T_localGlobalMat_T(3, 1, cv::DataType<double>::type); // Intrisic matrixT_localGlobalMat.at<double>(0) = 5.3110107804608194e+005;T_localGlobalMat.at<double>(1) = 3.9592132436881764e+006;T_localGlobalMat.at<double>(2) = 3.9691317611246762e+002;//T_localGlobalMat_T = -R_localGlobalMat_T * T_localGlobalMat;//相机到local坐标系cv::Mat Mat_CameraLocal(4, 4, cv::DataType<double>::type); // Intrisic matrixcv::Mat Mat_CameraLocal_R(3, 3, cv::DataType<double>::type); // Intrisic matrixMat_CameraLocal_R.at<double>(0, 0) = -9.9763374348616152e-01;Mat_CameraLocal_R.at<double>(1, 0) = 2.5888134639684635e-02;Mat_CameraLocal_R.at<double>(2, 0) = -6.3692372719705820e-02;Mat_CameraLocal_R.at<double>(0, 1) = 4.2186781874565982e-02;Mat_CameraLocal_R.at<double>(1, 1) = 9.6199885539966123e-01;Mat_CameraLocal_R.at<double>(2, 1) = -2.6977486473874623e-01;Mat_CameraLocal_R.at<double>(0, 2) = 5.4288021633286249e-02;Mat_CameraLocal_R.at<double>(1, 2) = -2.7182348444278837e-01;Mat_CameraLocal_R.at<double>(2, 2) = -9.6081465643095021e-01;cv::Mat Mat_CameraLocal_T(3, 1, cv::DataType<double>::type); // Intrisic matrixMat_CameraLocal_T.at<double>(0) = 7.4609553581479258e+00;Mat_CameraLocal_T.at<double>(1) = 7.3413396614441817e+00;Mat_CameraLocal_T.at<double>(2) = -3.4636831297553317e-01;//固定值 内参矩阵及其参数int width = 7952;int height = 5304;cv::Mat intrisicMat(3, 3, cv::DataType<double>::type); // Intrisic matrixintrisicMat.at<double>(0, 0) = 5567.26026351138 / scale;intrisicMat.at<double>(1, 0) = 0;intrisicMat.at<double>(2, 0) = 0;intrisicMat.at<double>(0, 1) = 0;intrisicMat.at<double>(1, 1) = 5567.26026351138 / scale;intrisicMat.at<double>(2, 1) = 0;intrisicMat.at<double>(0, 2) = (width / 2 + 2.38411623416083) / scale;//2.4999267471251629intrisicMat.at<double>(1, 2) = (height / 2 - 32.3673155075826) / scale;//31.225670155148315intrisicMat.at<double>(2, 2) = 1;cv::Mat distCoeffs(5, 1, cv::DataType<double>::type); // Distortion vectordouble k1 = -0.0457617707518695;double k2 = 0.0911130942388938;double k3 = -0.0153369695445818;double p1 = -0.00166274534179506;double p2 = 0.00277374993084083;double b1 = 2.21658355621507;double b2 = 0.235004131559968;distCoeffs.at<double>(0) = k1;distCoeffs.at<double>(1) = k2;distCoeffs.at<double>(2) = k3;distCoeffs.at<double>(3) = p1;distCoeffs.at<double>(4) = p2;//5. 打开文件等待写入std::ofstream outfile(savepath);std::cout << "5. 打开文件等待写入" << std::endl;//6. 计算UV坐标写入文件std::cout << "6. 计算UV坐标写入文件" << std::endl;PointPairs pointpairs;float startTime, endTime;startTime = omp_get_wtime();
#pragma omp parallel forfor (int i = 0; i < cloudIntersection->points.size(); i++){//if (i > 20)//{// break; //}pcl::PointXYZ pt = cloudIntersection->points[i];cv::Point2d UV1 = calculateUV(pt, label1, xmlMap, intrisicMat, R_localGlobalMat, T_localGlobalMat,Mat_CameraLocal_R, Mat_CameraLocal_T, k1, k2, k3, p1, p2, b1, b2, scale);cv::Point2d UV2 = calculateUV(pt, label2, xmlMap, intrisicMat, R_localGlobalMat, T_localGlobalMat,Mat_CameraLocal_R, Mat_CameraLocal_T, k1, k2, k3, p1, p2, b1, b2, scale);//判断是否在范围内bool isvalid = isValid(UV1, UV2, width1, height1, width2, height2);if (isvalid){pointpairs.push_back(std::make_pair(UV1,UV2));//输出到文件中if (outfile.is_open()) {// 写入数组到文本文件//outfile << fixed << setprecision(12);outfile << fixed << setprecision(6) << pt.x << " " << pt.y << " " << pt.z << " " << fixed << setprecision(0) << UV1.x << " " <<UV1.y << " " << UV2.x << " " <<UV2.y << endl;std::cout << fixed << setprecision(6) << pt.x << " " << pt.y << " " << pt.z << " " << fixed << setprecision(0) << UV1.x << " " <<UV1.y << " " << UV2.x << " " << UV2.y << endl;int ccc = 0;}else {std::cout << "无法打开文件!" << std::endl;}}int ccc = 0;}endTime = omp_get_wtime();printf("指定多个线程,执行时间: %f\n", endTime - startTime);//7.关闭文件 打印输出std::cout << "7.关闭文件 打印输出" << std::endl;outfile.close();cout << "save path to " << savepath << endl;std::stringstream ss;ss << data_path << "/" << label1 << "_" << label2 << "_matchimg.jpg";std::string saveImgpath = ss.str();writeMatchImagePoints(image1,image2,pointpairs, saveImgpath);cout << "save match image to " << saveImgpath << endl;cloud1->clear();cloud2->clear();cloudIntersection->clear();}int main()
{std::cout << "Hello World!\n";std::string xmlpath = "L:/rlh/segmentation-old/dbscan_kdtree-master/dbscan_new/release/resources/3.xml";//1. 在外面读取xml文件,可以只读一次重复使用ParamMap labelParamMap = readXmlParameter(xmlpath);//2. 图像匹配相关操作std::string data_path = "L:/rlh/segmentation-old/dbscan_kdtree-master/dbscan_new/release/resources/Images";std::string label1 = "01VbFBGJ000000165";std::string label2 = "01VbFBGJ000000168";std::stringstream ss;ss << data_path <<"/"<< label1 << "_" << label2 << ".txt";std::string savepath = ss.str();//执行匹配生成matchImagePoints(data_path, label1, label2, labelParamMap, savepath);return 0;}