概述
PCL点云库的传统ICP代码分析
- ICP算法的介绍
- PCL里面的源码分析
ICP算法的介绍
ICP(Iterative Closest Point),即最近点迭代算法,是最为经典的数据配准算法。其特征在于,通过求取源点云和目标点云之间的对应点对,基于对应点对构造旋转平移矩阵,并利用所求矩阵,将源点云变换到目标点云的坐标系下,估计变换后源点云与目标点云的误差函数,若误差函数值大于阀值,则迭代进行上述运算直到满足给定的误差要求.
ICP算法采用最小二乘估计计算变换矩阵,原理简单且具有较好的精度,但是由于采用了迭代计算,导致算法计算速度较慢,而且采用ICP进行配准计算时,其对待配准点云的初始位置有一定要求,若所选初始位置不合理,则会导致算法陷入局部最优。。
PCL里面的源码分析
我接下来对pcl里面的源码了解了下,大体有些地方做了备注,但是未必万全正确。
- 首先要介绍的是主体的ICP启动程序:
#include <iostream>
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
#include <pcl/registration/icp.h>
int
main (int argc, char** argv)
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_in (new pcl::PointCloud<pcl::PointXYZ>);
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_out (new pcl::PointCloud<pcl::PointXYZ>);
// Fill in the CloudIn data
cloud_in->width
= 5;
cloud_in->height
= 1;
cloud_in->is_dense = false;
cloud_in->points.resize (cloud_in->width * cloud_in->height);
for (size_t i = 0; i < cloud_in->points.size (); ++i)
{
cloud_in->points[i].x = 1024 * rand () / (RAND_MAX + 1.0f);
cloud_in->points[i].y = 1024 * rand () / (RAND_MAX + 1.0f);
cloud_in->points[i].z = 1024 * rand () / (RAND_MAX + 1.0f);
}
std::cout << "Saved " << cloud_in->points.size () << " data points to input:"
<< std::endl;
for (size_t i = 0; i < cloud_in->points.size (); ++i) std::cout << "
" <<
cloud_in->points[i].x << " " << cloud_in->points[i].y << " " <<
cloud_in->points[i].z << std::endl;
*cloud_out = *cloud_in;
std::cout << "size:" << cloud_out->points.size() << std::endl;
for (size_t i = 0; i < cloud_in->points.size (); ++i)
cloud_out->points[i].x = cloud_in->points[i].x + 0.7f;
std::cout << "Transformed " << cloud_in->points.size () << " data points:"
<< std::endl;
for (size_t i = 0; i < cloud_out->points.size (); ++i)
std::cout << "
" << cloud_out->points[i].x << " " <<
cloud_out->points[i].y << " " << cloud_out->points[i].z << std::endl;
pcl::IterativeClosestPoint<pcl::PointXYZ, pcl::PointXYZ> icp;
icp.setInputCloud(cloud_in);
icp.setInputTarget(cloud_out);
pcl::PointCloud<pcl::PointXYZ> Final;
icp.align(Final);
std::cout << "has converged:" << icp.hasConverged() << " score: " <<
icp.getFitnessScore() << std::endl;
std::cout << icp.getFinalTransformation() << std::endl;
return (0);
}
其中主要的功能就是在align()这个函数中实现的。这个函数的大体位置是
registration/include/pcl/registraion/impl/registration.hpp这里。代码接下如下
- align()函数
//函数里调用这个真正的函数
template <typename PointSource, typename PointTarget, typename Scalar> inline void
pcl::Registration<PointSource, PointTarget, Scalar>::align (PointCloudSource &output, const Matrix4& guess)
{
//分配input点云的下标,函数在common/include/pcl/impl/pcl_base.hpp
if (!initCompute ())
return;
// Resize the output dataset
//如果output的下标数量和input不一样,那就成为一样的
if (output.points.size () != indices_->size ())
output.points.resize (indices_->size ());
// Copy the header
output.header
= input_->header;
// Check if the output will be computed for all points or only a subset
//这里没搞懂,感觉肯定是相等的呀?
if (indices_->size () != input_->points.size ())
{
output.width
= static_cast<uint32_t> (indices_->size ());
output.height
= 1;
}
else
{
output.width
= static_cast<uint32_t> (input_->width);
output.height
= input_->height;
}
output.is_dense = input_->is_dense;
// Copy the point data to output
//这里的output就是final,也就是最后由input转化过来的点云,不是匹配的目标点云
//因为没有被初试化的,所以直接拷贝点云
for (size_t i = 0; i < indices_->size (); ++i)
output.points[i] = input_->points[(*indices_)[i]];
// Set the internal point representation of choice unless otherwise noted
if (point_representation_ && !force_no_recompute_)
tree_->setPointRepresentation (point_representation_);
// Perform the actual transformation computation
converged_ = false;
final_transformation_ = transformation_ = previous_transformation_ = Matrix4::Identity ();
// Right before we estimate the transformation, we set all the point.data[3] values to 1 to aid the rigid
// transformation
//其实因为坐标是齐次坐标,所以第四个元素是1,前面三个元素是x,y,z
for (size_t i = 0; i < indices_->size (); ++i)
output.points[i].data[3] = 1.0;
//实现的icp.hpp里面,这个函数是重载函数,所以要找对
//变种icp的更改基本都在这里,改动h,hpp文件,以及改动主要的computeTransformation函数,前面的都是预备工作,关系不大
computeTransformation (output, guess);
//这个函数仅仅是返回一个布尔值true
deinitCompute ();
}
在这个align里面的最重要的函数就是computeTransformation (output, guess)。而这个函数就在registration/include/pcl/registraion/icp.hpp这里。
- computeTransformation()函数
///
template <typename PointSource, typename PointTarget, typename Scalar> void
pcl::IterativeClosestPoint<PointSource, PointTarget, Scalar>::computeTransformation (
PointCloudSource &output, const Matrix4 &guess)
{
// Point cloud containing the correspondences of each point in <input, indices>
//input_transformed是input被转换一次之后的点云
PointCloudSourcePtr input_transformed (new PointCloudSource);
nr_iterations_ = 0;
converged_ = false;
// Initialise final transformation to the guessed one
//都变成单位矩阵
final_transformation_ = guess;
// If the guessed transformation is non identity
if (guess != Matrix4::Identity ())
{
input_transformed->resize (input_->size ());
// Apply guessed transformation prior to search for neighbours
//在icp.hpp里48行
transformCloud (*input_, *input_transformed, guess);
}
else
//否则就是直接复制,其实这里还没有开始转换,因为input_transformed还是原来的input
*input_transformed = *input_;
transformation_ = Matrix4::Identity ();
// Make blobs if necessary
//我也不知道这个步骤的含义,要制造异常点吗?
determineRequiredBlobData ();
PCLPointCloud2::Ptr target_blob (new PCLPointCloud2);
if (need_target_blob_)
//转换成二进制的点云
pcl::toPCLPointCloud2 (*target_, *target_blob);
// Pass in the default target for the Correspondence Estimation/Rejection code
correspondence_estimation_->setInputTarget (target_);
if (correspondence_estimation_->requiresTargetNormals ())
correspondence_estimation_->setTargetNormals (target_blob);
// Correspondence Rejectors need a binary blob
for (size_t i = 0; i < correspondence_rejectors_.size (); ++i)
{
registration::CorrespondenceRejector::Ptr& rej = correspondence_rejectors_[i];
if (rej->requiresTargetPoints ())
rej->setTargetPoints (target_blob);
if (rej->requiresTargetNormals () && target_has_normals_)
rej->setTargetNormals (target_blob);
}
//MSE是均方误差,这里是设置迭代的相关参数
convergence_criteria_->setMaximumIterations (max_iterations_);
convergence_criteria_->setRelativeMSE (euclidean_fitness_epsilon_);
convergence_criteria_->setTranslationThreshold (transformation_epsilon_);
if (transformation_rotation_epsilon_ > 0)
convergence_criteria_->setRotationThreshold (transformation_rotation_epsilon_);
else
convergence_criteria_->setRotationThreshold (1.0 - transformation_epsilon_);
// Repeat until convergence
//该方法的主体是一个do-while循环,查找最近点,剔除错误的对应点,收敛原则都在这里
//correspondence_estimation_ 、correspondence_rejectors_ 和 convergence_criteria_
//这三个变量的作用分别代表查找最近点,剔除错误的对应点,收敛准则
//因为是do-while循环,因此收敛准则的作用是跳出循环
//transformation_estimation_是求解ICP的具体算法
do
{
// Get blob data if needed
PCLPointCloud2::Ptr input_transformed_blob;
if (need_source_blob_)
{
input_transformed_blob.reset (new PCLPointCloud2);
toPCLPointCloud2 (*input_transformed, *input_transformed_blob);
}
// Save the previously estimated transformation
//第一步迭代之前,到这个步骤之前一直是单位矩阵
previous_transformation_ = transformation_;
// Set the source each iteration, to ensure the dirty flag is updated
correspondence_estimation_->setInputSource (input_transformed);
if (correspondence_estimation_->requiresSourceNormals ())
correspondence_estimation_->setSourceNormals (input_transformed_blob);
// Estimate correspondences
//寻找迭代点云的对应点
//use_reciprocal_correspondence_是相反的对应关系
if (use_reciprocal_correspondence_)
//determineReciprocalCorrespondences()在correspondence_estimation.hpp文件里,corr_dist_threshold_是最大距离
correspondence_estimation_->determineReciprocalCorrespondences (*correspondences_, corr_dist_threshold_);
else
correspondence_estimation_->determineCorrespondences (*correspondences_, corr_dist_threshold_);
//if (correspondence_rejectors_.empty ())
//把已经有对应关系的correspondences_初始化temp_correspondences,当然这是一个动态的暂时内存
CorrespondencesPtr temp_correspondences (new Correspondences (*correspondences_));
for (size_t i = 0; i < correspondence_rejectors_.size (); ++i)
{
registration::CorrespondenceRejector::Ptr& rej = correspondence_rejectors_[i];
PCL_DEBUG ("Applying a correspondence rejector method: %s.n", rej->getClassName ().c_str ());
if (rej->requiresSourcePoints ())
rej->setSourcePoints (input_transformed_blob);
if (rej->requiresSourceNormals () && source_has_normals_)
rej->setSourceNormals (input_transformed_blob);
rej->setInputCorrespondences (temp_correspondences);
rej->getCorrespondences (*correspondences_);
// Modify input for the next iteration
if (i < correspondence_rejectors_.size () - 1)
*temp_correspondences = *correspondences_;
}
size_t cnt = correspondences_->size ();
// Check whether we have enough correspondences
if (static_cast<int> (cnt) < min_number_correspondences_)
{
PCL_ERROR ("[pcl::%s::computeTransformation] Not enough correspondences found. Relax your threshold parameters.n", getClassName ().c_str ());
convergence_criteria_->setConvergenceState(pcl::registration::DefaultConvergenceCriteria<Scalar>::CONVERGENCE_CRITERIA_NO_CORRESPONDENCES);
converged_ = false;
break;
}
//在前面的寻找一致性估计后(寻找对应点后),接下来的步骤又是主要的函数步骤,transformation_estimation_是求解ICP的具体算法
// Estimate the transform
//查看transformation_estimation_svd.hpp中的TransformationEstimationSVD类的estimateRigidTransformation 方法
//这里就是target_是最终的目标点云,在迭代过程中不变,但是input_transformed总是会不停的更新,直到和目标重合
transformation_estimation_->estimateRigidTransformation (*input_transformed, *target_, *correspondences_, transformation_);
// Tranform the data
transformCloud (*input_transformed, *input_transformed, transformation_);
// Obtain the final transformation
final_transformation_ = transformation_ * final_transformation_;
++nr_iterations_;
// Update the vizualization of icp convergence
//if (update_visualizer_ != 0)
//
update_visualizer_(output, source_indices_good, *target_, target_indices_good );
converged_ = static_cast<bool> ((*convergence_criteria_));
}
while (!converged_);
// Transform the input cloud using the final transformation
PCL_DEBUG ("Transformation is:nt%5ft%5ft%5ft%5fnt%5ft%5ft%5ft%5fnt%5ft%5ft%5ft%5fnt%5ft%5ft%5ft%5fn",
final_transformation_ (0, 0), final_transformation_ (0, 1), final_transformation_ (0, 2), final_transformation_ (0, 3),
final_transformation_ (1, 0), final_transformation_ (1, 1), final_transformation_ (1, 2), final_transformation_ (1, 3),
final_transformation_ (2, 0), final_transformation_ (2, 1), final_transformation_ (2, 2), final_transformation_ (2, 3),
final_transformation_ (3, 0), final_transformation_ (3, 1), final_transformation_ (3, 2), final_transformation_ (3, 3));
// Copy all the values
output = *input_;
// Transform the XYZ + normals
//先把input_复制过去,然后在将转换后的点云叠加上去,至此,算法完成
transformCloud (*input_, output, final_transformation_);
}
在computeTransformation()函数中主要用到的就是transformCloud()函数以及estimateRigidTransformation()函数还有determineCorrespondences()的函数。
transformCloud()位置
函数的位置也同样在icp.hpp里面。
determineCorrespondences()位置 registration/include/pcl/registraion/correspondence_estimation.hpp里面。estimateRigidTransformation()函数位置registrationincludepclregistrationimpltransformation_estimation_svd.hpp。
- transformCloud()函数
template <typename PointSource, typename PointTarget, typename Scalar> void
pcl::IterativeClosestPoint<PointSource, PointTarget, Scalar>::transformCloud (
const PointCloudSource &input,
PointCloudSource &output,
const Matrix4 &transform)
{
//这里的input和output在第一次的时候还是相同的值
//但是在第二次迭代的时候才是正常的步骤
Eigen::Vector4f pt (0.0f, 0.0f, 0.0f, 1.0f), pt_t;
Eigen::Matrix4f tr = transform.template cast<float> ();
// XYZ is ALWAYS present due to the templatization, so we only have to check for normals
if (source_has_normals_)
{
Eigen::Vector3f nt, nt_t;
Eigen::Matrix3f rot = tr.block<3, 3> (0, 0);
for (size_t i = 0; i < input.size (); ++i)
{
将input的数据填充到pt里
const uint8_t* data_in = reinterpret_cast<const uint8_t*> (&input[i]);
uint8_t* data_out = reinterpret_cast<uint8_t*> (&output[i]);
memcpy (&pt[0], data_in + x_idx_offset_, sizeof (float));
memcpy (&pt[1], data_in + y_idx_offset_, sizeof (float));
memcpy (&pt[2], data_in + z_idx_offset_, sizeof (float));
if (!pcl_isfinite (pt[0]) || !pcl_isfinite (pt[1]) || !pcl_isfinite (pt[2]))
continue;
//这里就是转换的公式,是齐次的转换
pt_t = tr * pt;
//把pt_t的值给data_out
memcpy (data_out + x_idx_offset_, &pt_t[0], sizeof (float));
memcpy (data_out + y_idx_offset_, &pt_t[1], sizeof (float));
memcpy (data_out + z_idx_offset_, &pt_t[2], sizeof (float));
memcpy (&nt[0], data_in + nx_idx_offset_, sizeof (float));
memcpy (&nt[1], data_in + ny_idx_offset_, sizeof (float));
memcpy (&nt[2], data_in + nz_idx_offset_, sizeof (float));
if (!pcl_isfinite (nt[0]) || !pcl_isfinite (nt[1]) || !pcl_isfinite (nt[2]))
continue;
//这里是非齐次的转换
nt_t = rot * nt;
//把转换后的nt_t给data_out
memcpy (data_out + nx_idx_offset_, &nt_t[0], sizeof (float));
memcpy (data_out + ny_idx_offset_, &nt_t[1], sizeof (float));
memcpy (data_out + nz_idx_offset_, &nt_t[2], sizeof (float));
}
}
else
{
for (size_t i = 0; i < input.size (); ++i)
{
const uint8_t* data_in = reinterpret_cast<const uint8_t*> (&input[i]);
uint8_t* data_out = reinterpret_cast<uint8_t*> (&output[i]);
memcpy (&pt[0], data_in + x_idx_offset_, sizeof (float));
memcpy (&pt[1], data_in + y_idx_offset_, sizeof (float));
memcpy (&pt[2], data_in + z_idx_offset_, sizeof (float));
if (!pcl_isfinite (pt[0]) || !pcl_isfinite (pt[1]) || !pcl_isfinite (pt[2]))
continue;
//这里是齐次的转换
pt_t = tr * pt;
memcpy (data_out + x_idx_offset_, &pt_t[0], sizeof (float));
memcpy (data_out + y_idx_offset_, &pt_t[1], sizeof (float));
memcpy (data_out + z_idx_offset_, &pt_t[2], sizeof (float));
}
}
}
- determineCorrespondences()函数
///
template <typename PointSource, typename PointTarget, typename Scalar> void
pcl::registration::CorrespondenceEstimation<PointSource, PointTarget, Scalar>::determineCorrespondences (
pcl::Correspondences &correspondences, double max_distance)
{
if (!initCompute ())
return;
double max_dist_sqr = max_distance * max_distance;
correspondences.resize (indices_->size ());
std::vector<int> index (1);
std::vector<float> distance (1);
pcl::Correspondence corr;
unsigned int nr_valid_correspondences = 0;
// Check if the template types are the same. If true, avoid a copy.
// Both point types MUST be registered using the POINT_CLOUD_REGISTER_POINT_STRUCT macro!
if (isSamePointType<PointSource, PointTarget> ())
{
// Iterate over the input set of source indices
for (std::vector<int>::const_iterator idx = indices_->begin (); idx != indices_->end (); ++idx)
{
tree_->nearestKSearch (input_->points[*idx], 1, index, distance);
if (distance[0] > max_dist_sqr)
continue;
corr.index_query = *idx;
corr.index_match = index[0];
corr.distance = distance[0];
correspondences[nr_valid_correspondences++] = corr;
}
}
else
{
PointTarget pt;
// Iterate over the input set of source indices
for (std::vector<int>::const_iterator idx = indices_->begin (); idx != indices_->end (); ++idx)
{
// Copy the source data to a target PointTarget format so we can search in the tree
copyPoint (input_->points[*idx], pt);
tree_->nearestKSearch (pt, 1, index, distance);
if (distance[0] > max_dist_sqr)
continue;
corr.index_query = *idx;
corr.index_match = index[0];
corr.distance = distance[0];
correspondences[nr_valid_correspondences++] = corr;
}
}
correspondences.resize (nr_valid_correspondences);
deinitCompute ();
}
- estimateRigidTransformation()
#ifndef PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_SVD_HPP_
#define PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_SVD_HPP_
#include <pcl/common/eigen.h>
///
template <typename PointSource, typename PointTarget, typename Scalar> inline void
pcl::registration::TransformationEstimationSVD<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
const pcl::PointCloud<PointSource> &cloud_src,
const pcl::PointCloud<PointTarget> &cloud_tgt,
Matrix4 &transformation_matrix) const
{
size_t nr_points = cloud_src.points.size ();
if (cloud_tgt.points.size () != nr_points)
{
PCL_ERROR ("[pcl::TransformationEstimationSVD::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!n", nr_points, cloud_tgt.points.size ());
return;
}
ConstCloudIterator<PointSource> source_it (cloud_src);
ConstCloudIterator<PointTarget> target_it (cloud_tgt);
estimateRigidTransformation (source_it, target_it, transformation_matrix);
}
///
template <typename PointSource, typename PointTarget, typename Scalar> void
pcl::registration::TransformationEstimationSVD<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
const pcl::PointCloud<PointSource> &cloud_src,
const std::vector<int> &indices_src,
const pcl::PointCloud<PointTarget> &cloud_tgt,
Matrix4 &transformation_matrix) const
{
if (indices_src.size () != cloud_tgt.points.size ())
{
PCL_ERROR ("[pcl::TransformationSVD::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!n", indices_src.size (), cloud_tgt.points.size ());
return;
}
ConstCloudIterator<PointSource> source_it (cloud_src, indices_src);
ConstCloudIterator<PointTarget> target_it (cloud_tgt);
estimateRigidTransformation (source_it, target_it, transformation_matrix);
}
///
template <typename PointSource, typename PointTarget, typename Scalar> inline void
pcl::registration::TransformationEstimationSVD<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
const pcl::PointCloud<PointSource> &cloud_src,
const std::vector<int> &indices_src,
const pcl::PointCloud<PointTarget> &cloud_tgt,
const std::vector<int> &indices_tgt,
Matrix4 &transformation_matrix) const
{
if (indices_src.size () != indices_tgt.size ())
{
PCL_ERROR ("[pcl::TransformationEstimationSVD::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!n", indices_src.size (), indices_tgt.size ());
return;
}
ConstCloudIterator<PointSource> source_it (cloud_src, indices_src);
ConstCloudIterator<PointTarget> target_it (cloud_tgt, indices_tgt);
estimateRigidTransformation (source_it, target_it, transformation_matrix);
}
///
//先调用这个函数,四个参数,但是这个函数里面还会再调用一个重载函数,那个重载函数才是最关键的
template <typename PointSource, typename PointTarget, typename Scalar> void
pcl::registration::TransformationEstimationSVD<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
const pcl::PointCloud<PointSource> &cloud_src,
const pcl::PointCloud<PointTarget> &cloud_tgt,
const pcl::Correspondences &correspondences,
Matrix4 &transformation_matrix) const
{
ConstCloudIterator<PointSource> source_it (cloud_src, correspondences, true);
ConstCloudIterator<PointTarget> target_it (cloud_tgt, correspondences, false);
estimateRigidTransformation (source_it, target_it, transformation_matrix);
}
///
template <typename PointSource, typename PointTarget, typename Scalar> inline void
pcl::registration::TransformationEstimationSVD<PointSource, PointTarget, Scalar>::estimateRigidTransformation (
ConstCloudIterator<PointSource>& source_it,
ConstCloudIterator<PointTarget>& target_it,
Matrix4 &transformation_matrix) const
{
// Convert to Eigen format
const int npts = static_cast <int> (source_it.size ());
//下面介绍了两种求解方法,
//1、利用eigen库里的umeyama直接求解
//2、普通的SVD分解,就是先求点云的中心点,然后再通过海瑟矩阵求得转换矩阵
if (use_umeyama_)
{
Eigen::Matrix<Scalar, 3, Eigen::Dynamic> cloud_src (3, npts);
Eigen::Matrix<Scalar, 3, Eigen::Dynamic> cloud_tgt (3, npts);
for (int i = 0; i < npts; ++i)
{
cloud_src (0, i) = source_it->x;
cloud_src (1, i) = source_it->y;
cloud_src (2, i) = source_it->z;
++source_it;
cloud_tgt (0, i) = target_it->x;
cloud_tgt (1, i) = target_it->y;
cloud_tgt (2, i) = target_it->z;
++target_it;
}
// Call Umeyama directly from Eigen (PCL patched version until Eigen is released)
//调用下面的代码实现了SVD求解,具体方法内部实现时通过Eigen3实现的
//直接通过svd分解求解转换矩阵,就这一条命令
transformation_matrix = pcl::umeyama (cloud_src, cloud_tgt, false);
}
else
{
source_it.reset (); target_it.reset ();
// <cloud_src,cloud_src> is the source dataset
transformation_matrix.setIdentity ();
Eigen::Matrix<Scalar, 4, 1> centroid_src, centroid_tgt;
// Estimate the centroids of source, target
compute3DCentroid (source_it, centroid_src);
compute3DCentroid (target_it, centroid_tgt);
source_it.reset (); target_it.reset ();
// Subtract the centroids from source, target
Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> cloud_src_demean, cloud_tgt_demean;
demeanPointCloud (source_it, centroid_src, cloud_src_demean);
demeanPointCloud (target_it, centroid_tgt, cloud_tgt_demean);
getTransformationFromCorrelation (cloud_src_demean, centroid_src, cloud_tgt_demean, centroid_tgt, transformation_matrix);
}
}
///
template <typename PointSource, typename PointTarget, typename Scalar> void
pcl::registration::TransformationEstimationSVD<PointSource, PointTarget, Scalar>::getTransformationFromCorrelation (
const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> &cloud_src_demean,
const Eigen::Matrix<Scalar, 4, 1> ¢roid_src,
const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> &cloud_tgt_demean,
const Eigen::Matrix<Scalar, 4, 1> ¢roid_tgt,
Matrix4 &transformation_matrix) const
{
transformation_matrix.setIdentity ();
// Assemble the correlation matrix H = source * target'
Eigen::Matrix<Scalar, 3, 3> H = (cloud_src_demean * cloud_tgt_demean.transpose ()).topLeftCorner (3, 3);
// Compute the Singular Value Decomposition
Eigen::JacobiSVD<Eigen::Matrix<Scalar, 3, 3> > svd (H, Eigen::ComputeFullU | Eigen::ComputeFullV);
Eigen::Matrix<Scalar, 3, 3> u = svd.matrixU ();
Eigen::Matrix<Scalar, 3, 3> v = svd.matrixV ();
// Compute R = V * U'
if (u.determinant () * v.determinant () < 0)
{
for (int x = 0; x < 3; ++x)
v (x, 2) *= -1;
}
Eigen::Matrix<Scalar, 3, 3> R = v * u.transpose ();
// Return the correct transformation
transformation_matrix.topLeftCorner (3, 3) = R;
const Eigen::Matrix<Scalar, 3, 1> Rc (R * centroid_src.head (3));
transformation_matrix.block (0, 3, 3, 1) = centroid_tgt.head (3) - Rc;
}
//#define PCL_INSTANTIATE_TransformationEstimationSVD(T,U) template class PCL_EXPORTS pcl::registration::TransformationEstimationSVD<T,U>;
#endif /* PCL_REGISTRATION_TRANSFORMATION_ESTIMATION_SVD_HPP_ */
当初的代码注释,但是也是依照个人理解初次写,如果有问题,还请见谅!
最后
以上就是正直蜜粉为你收集整理的ICP代码分析ICP算法的介绍的全部内容,希望文章能够帮你解决ICP代码分析ICP算法的介绍所遇到的程序开发问题。
如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。
发表评论 取消回复