VINS 回环检测与全局优化
回环检测
VINS回环检测与全局优化都在pose_graph.cpp内处理。首先在pose_graph_node加载vocabulary文件给BriefDatabase用,如果要加载地图,会loadPoseGraph, 它会读取一些列文件,然后加载所有的Keyframe。同时在经过一系列回调函数得到建立新的Keyframe所用的数据之后,构造Keyframe,且在其内重新提取更多的特征点并计算描述子,然后pose_graph调用addKeyframe。loadKeyframe 和addKeyframe 都会用到flag_detect_loop这个标志,如果是加载则为0,如果是新建则为1. 新建的Keyframe 需要调用detectLoop函数,得出具有最小index的历史关键帧。
//为了展示更核心的内容,我把DEBUG_IMAGE部分都删去了
int PoseGraph::detectLoop(KeyFrame* keyframe, int frame_index)
{
TicToc tmp_t;
//first query; then add this frame into database!
QueryResults ret;
TicToc t_query;
// 第一个参数是描述子,第二个是检测结果,第三个是结果个数,第四个是结果帧号必须小于此
db.query(keyframe->brief_descriptors, ret, 4, frame_index - 50);
//printf("query time: %f", t_query.toc());
//cout << "Searching for Image " << frame_index << ". " << ret << endl;
TicToc t_add;
db.add(keyframe->brief_descriptors);
//printf("add feature time: %f", t_add.toc());
// ret[0] is the nearest neighbour's score. threshold change with neighour score
bool find_loop = false;
cv::Mat loop_result;
// a good match with its nerghbour,
if (ret.size() >= 1 &&ret[0].Score > 0.05)
for (unsigned int i = 1; i < ret.size(); i++)
{
if (ret[i].Score > 0.015)
{
find_loop = true;
int tmp_index = ret[i].Id;
}
}
//找到最小帧号的匹配帧
if (find_loop && frame_index > 50)
{
int min_index = -1;
for (unsigned int i = 0; i < ret.size(); i++)
{
if (min_index == -1 || (ret[i].Id < min_index && ret[i].Score > 0.015))
min_index = ret[i].Id;
}
return min_index;
}
else
return -1;
}
得到匹配上关键帧后,经过计算相对位姿,并把当前帧号记录到全局优化内
if (loop_index != -1)
{
//printf(" %d detect loop with %d \n", cur_kf->index, loop_index);
KeyFrame* old_kf = getKeyFrame(loop_index);
// findConnection 是为了计算相对位姿,最主要的就是利用了PnPRANSAC(matched_2d_old_norm, matched_3d, status, PnP_T_old, PnP_R_old)函数,
//并且它负责把匹配好的点发送到estimator节点中去
if (cur_kf->findConnection(old_kf))
{
if (earliest_loop_index > loop_index || earliest_loop_index == -1)
earliest_loop_index = loop_index;
Vector3d w_P_old, w_P_cur, vio_P_cur;
Matrix3d w_R_old, w_R_cur, vio_R_cur;
old_kf->getVioPose(w_P_old, w_R_old);
cur_kf->getVioPose(vio_P_cur, vio_R_cur);
Vector3d relative_t;
Quaterniond relative_q;
relative_t = cur_kf->getLoopRelativeT();
relative_q = (cur_kf->getLoopRelativeQ()).toRotationMatrix();
w_P_cur = w_R_old * relative_t + w_P_old;
w_R_cur = w_R_old * relative_q;
double shift_yaw;
Matrix3d shift_r;
Vector3d shift_t;
shift_yaw = Utility::R2ypr(w_R_cur).x() - Utility::R2ypr(vio_R_cur).x();
// 根据old frame 和相对位姿能计算出当前帧位姿,也就能得出和已知当前帧位姿的差别
//分别计算出shift_r, shift_t,用来更新其他帧位姿
shift_r = Utility::ypr2R(Vector3d(shift_yaw, 0, 0));
shift_t = w_P_cur - w_R_cur * vio_R_cur.transpose() * vio_P_cur;
// shift vio pose of whole sequence to the world frame
if (old_kf->sequence != cur_kf->sequence && sequence_loop[cur_kf->sequence] == 0)
{
w_r_vio = shift_r;
w_t_vio = shift_t;
vio_P_cur = w_r_vio * vio_P_cur + w_t_vio;
vio_R_cur = w_r_vio * vio_R_cur;
cur_kf->updateVioPose(vio_P_cur, vio_R_cur);
list<KeyFrame*>::iterator it = keyframelist.begin();
for (; it != keyframelist.end(); it++)
{
if((*it)->sequence == cur_kf->sequence)
{
Vector3d vio_P_cur;
Matrix3d vio_R_cur;
(*it)->getVioPose(vio_P_cur, vio_R_cur);
vio_P_cur = w_r_vio * vio_P_cur + w_t_vio;
vio_R_cur = w_r_vio * vio_R_cur;
(*it)->updateVioPose(vio_P_cur, vio_R_cur);
}
}
sequence_loop[cur_kf->sequence] = 1;
}
m_optimize_buf.lock();
optimize_buf.push(cur_kf->index);
m_optimize_buf.unlock();
}
}
m_keyframelist.lock();
Vector3d P;
Matrix3d R;
cur_kf->getVioPose(P, R);
P = r_drift * P + t_drift;
R = r_drift * R;
cur_kf->updatePose(P, R);
Quaterniond Q{R};
geometry_msgs::PoseStamped pose_stamped;
pose_stamped.header.stamp = ros::Time(cur_kf->time_stamp);
pose_stamped.header.frame_id = "world";
pose_stamped.pose.position.x = P.x() + VISUALIZATION_SHIFT_X;
pose_stamped.pose.position.y = P.y() + VISUALIZATION_SHIFT_Y;
pose_stamped.pose.position.z = P.z();
pose_stamped.pose.orientation.x = Q.x();
pose_stamped.pose.orientation.y = Q.y();
pose_stamped.pose.orientation.z = Q.z();
pose_stamped.pose.orientation.w = Q.w();
path[sequence_cnt].poses.push_back(pose_stamped);
path[sequence_cnt].header = pose_stamped.header;
//发送path主题数据,用以显示
keyframelist.push_back(cur_kf);
publish();
m_keyframelist.unlock();
全局优化
VINS论文里有一句:因为他们的设备能保证roll和pitch是一直可观的,所以只需要优化x,y,z和yaw这几个有漂移的参数。在vins_estimator文件的visualization.cpp内我们看到:
void pubKeyframe(const Estimator &estimator)
{
// pub camera pose, 2D-3D points of keyframe
//当一个旧帧被边缘化后,才被发送到pose-graph内
//pose_graph收到vio主题后,仅仅是为了显示用,真正构造Keyframe的是边缘化的帧的数据。
if (estimator.solver_flag == Estimator::SolverFlag::NON_LINEAR && estimator.marginalization_flag == 0)
{
int i = WINDOW_SIZE - 2;
//Vector3d P = estimator.Ps[i] + estimator.Rs[i] * estimator.tic[0];
Vector3d P = estimator.Ps[i];
Quaterniond R = Quaterniond(estimator.Rs[i]);
nav_msgs::Odometry odometry;
odometry.header = estimator.Headers[WINDOW_SIZE - 2];
odometry.header.frame_id = "world";
odometry.pose.pose.position.x = P.x();
odometry.pose.pose.position.y = P.y();
odometry.pose.pose.position.z = P.z();
odometry.pose.pose.orientation.x = R.x();
odometry.pose.pose.orientation.y = R.y();
odometry.pose.pose.orientation.z = R.z();
odometry.pose.pose.orientation.w = R.w();
//printf("time: %f t: %f %f %f r: %f %f %f %f\n", odometry.header.stamp.toSec(), P.x(), P.y(), P.z(), R.w(), R.x(), R.y(), R.z());
pub_keyframe_pose.publish(odometry);
//被边缘化的帧的特征点也被发送到pose_graph内
sensor_msgs::PointCloud point_cloud;
point_cloud.header = estimator.Headers[WINDOW_SIZE - 2];
for (auto &it_per_id : estimator.f_manager.feature)
{
int frame_size = it_per_id.feature_per_frame.size();
if(it_per_id.start_frame < WINDOW_SIZE - 2 && it_per_id.start_frame + frame_size - 1 >= WINDOW_SIZE - 2 && it_per_id.solve_flag == 1)
{
int imu_i = it_per_id.start_frame;
Vector3d pts_i = it_per_id.feature_per_frame[0].point * it_per_id.estimated_depth;
Vector3d w_pts_i = estimator.Rs[imu_i] * (estimator.ric[0] * pts_i + estimator.tic[0])
+ estimator.Ps[imu_i];
geometry_msgs::Point32 p;
p.x = w_pts_i(0);
p.y = w_pts_i(1);
p.z = w_pts_i(2);
point_cloud.points.push_back(p);
int imu_j = WINDOW_SIZE - 2 - it_per_id.start_frame;
sensor_msgs::ChannelFloat32 p_2d;
p_2d.values.push_back(it_per_id.feature_per_frame[imu_j].point.x());
p_2d.values.push_back(it_per_id.feature_per_frame[imu_j].point.y());
p_2d.values.push_back(it_per_id.feature_per_frame[imu_j].uv.x());
p_2d.values.push_back(it_per_id.feature_per_frame[imu_j].uv.y());
p_2d.values.push_back(it_per_id.feature_id);
point_cloud.channels.push_back(p_2d);
}
}
pub_keyframe_point.publish(point_cloud);
}
}
全局优化函数
void PoseGraph::optimize4DoF()
{
while(true)
{
int cur_index = -1;
int first_looped_index = -1;
m_optimize_buf.lock();
while(!optimize_buf.empty())
{
cur_index = optimize_buf.front();
first_looped_index = earliest_loop_index;
optimize_buf.pop();
}
m_optimize_buf.unlock();
if (cur_index != -1)
{
printf("optimize pose graph \n");
TicToc tmp_t;
m_keyframelist.lock();
KeyFrame* cur_kf = getKeyFrame(cur_index);
int max_length = cur_index + 1;
// w^t_i w^q_i
double t_array[max_length][3];
Quaterniond q_array[max_length];
double euler_array[max_length][3];
double sequence_array[max_length];
ceres::Problem problem;
ceres::Solver::Options options;
options.linear_solver_type = ceres::SPARSE_NORMAL_CHOLESKY;
options.max_num_iterations = 5;
ceres::Solver::Summary summary;
ceres::LossFunction *loss_function;
loss_function = new ceres::HuberLoss(1.0);
ceres::LocalParameterization* angle_local_parameterization =
AngleLocalParameterization::Create();
list<KeyFrame*>::iterator it;
int i = 0;
for (it = keyframelist.begin(); it != keyframelist.end(); it++)
{
//回环检测到帧以前的都略过
if ((*it)->index < first_looped_index)
continue;
(*it)->local_index = i;
Quaterniond tmp_q;
Matrix3d tmp_r;
Vector3d tmp_t;
(*it)->getVioPose(tmp_t, tmp_r);
tmp_q = tmp_r;
t_array[i][0] = tmp_t(0);
t_array[i][1] = tmp_t(1);
t_array[i][2] = tmp_t(2);
q_array[i] = tmp_q;
Vector3d euler_angle = Utility::R2ypr(tmp_q.toRotationMatrix());
euler_array[i][0] = euler_angle.x();
euler_array[i][1] = euler_angle.y();
euler_array[i][2] = euler_angle.z();
sequence_array[i] = (*it)->sequence;
problem.AddParameterBlock(euler_array[i], 1, angle_local_parameterization);
problem.AddParameterBlock(t_array[i], 3);
//回环检测到的帧参数设为固定
if ((*it)->index == first_looped_index || (*it)->sequence == 0)
{
problem.SetParameterBlockConstant(euler_array[i]);
problem.SetParameterBlockConstant(t_array[i]);
}
//add edge
//对于每个i, 只计算它之前五个的位置和yaw残差
for (int j = 1; j < 5; j++)
{
if (i - j >= 0 && sequence_array[i] == sequence_array[i-j])
{
Vector3d euler_conncected = Utility::R2ypr(q_array[i-j].toRotationMatrix());
Vector3d relative_t(t_array[i][0] - t_array[i-j][0], t_array[i][1] - t_array[i-j][1], t_array[i][2] - t_array[i-j][2]);
relative_t = q_array[i-j].inverse() * relative_t;
double relative_yaw = euler_array[i][0] - euler_array[i-j][0];
ceres::CostFunction* cost_function = FourDOFError::Create( relative_t.x(), relative_t.y(), relative_t.z(),
relative_yaw, euler_conncected.y(), euler_conncected.z());
problem.AddResidualBlock(cost_function, NULL, euler_array[i-j],
t_array[i-j],
euler_array[i],
t_array[i]);
}
}
//add loop edge
// 如果有检测到回环
if((*it)->has_loop)
{
//必须回环检测的帧号大于或者等于当前帧的回环检测匹配帧号
assert((*it)->loop_index >= first_looped_index);
int connected_index = getKeyFrame((*it)->loop_index)->local_index;
Vector3d euler_conncected = Utility::R2ypr(q_array[connected_index].toRotationMatrix());
Vector3d relative_t;
relative_t = (*it)->getLoopRelativeT();
double relative_yaw = (*it)->getLoopRelativeYaw();
ceres::CostFunction* cost_function = FourDOFWeightError::Create( relative_t.x(), relative_t.y(), relative_t.z(),
relative_yaw, euler_conncected.y(), euler_conncected.z());
problem.AddResidualBlock(cost_function, loss_function, euler_array[connected_index],
t_array[connected_index],
euler_array[i],
t_array[i]);
}
if ((*it)->index == cur_index)
break;
i++;
}
m_keyframelist.unlock();
ceres::Solve(options, &problem, &summary);
m_keyframelist.lock();
i = 0;
//根据优化后的参数更新参与优化的关键帧的位姿
for (it = keyframelist.begin(); it != keyframelist.end(); it++)
{
if ((*it)->index < first_looped_index)
continue;
Quaterniond tmp_q;
tmp_q = Utility::ypr2R(Vector3d(euler_array[i][0], euler_array[i][1], euler_array[i][2]));
Vector3d tmp_t = Vector3d(t_array[i][0], t_array[i][1], t_array[i][2]);
Matrix3d tmp_r = tmp_q.toRotationMatrix();
(*it)-> updatePose(tmp_t, tmp_r);
if ((*it)->index == cur_index)
break;
i++;
}
//根据当前帧的drift,更新全部关键帧位姿
Vector3d cur_t, vio_t;
Matrix3d cur_r, vio_r;
cur_kf->getPose(cur_t, cur_r);
cur_kf->getVioPose(vio_t, vio_r);
m_drift.lock();
yaw_drift = Utility::R2ypr(cur_r).x() - Utility::R2ypr(vio_r).x();
r_drift = Utility::ypr2R(Vector3d(yaw_drift, 0, 0));
t_drift = cur_t - r_drift * vio_t;
m_drift.unlock();
it++;
for (; it != keyframelist.end(); it++)
{
Vector3d P;
Matrix3d R;
(*it)->getVioPose(P, R);
P = r_drift * P + t_drift;
R = r_drift * R;
(*it)->updatePose(P, R);
}
m_keyframelist.unlock();
updatePath();
}
std::chrono::milliseconds dura(2000);
std::this_thread::sleep_for(dura);
}
}
VINS 回环检测与全局优化的更多相关文章
- ORB-SLAM(六)回环检测
上一篇提到,无论在单目.双目还是RGBD中,追踪得到的位姿都是有误差的.随着路径的不断延伸,前面帧的误差会一直传递到后面去,导致最后一帧的位姿在世界坐标系里的误差有可能非常大.除了利用优化方法在局部和 ...
- Code Reading: ORB-SLAM回环检测源码阅读+注释
之前研究过一些回环检测的内容,首先要看的自然是用词袋回环的鼻祖和正当继承人(没有冒犯VINS和LDSO的意思)ORB-SLAM.下面是我的代码注释.因为代码都是自己手打的,不是在源码上注释的,所以一些 ...
- DLoopDetector回环检测算法
词袋模型是一种文本表征方法,它应用到计算机视觉领域就称之为BoF(bag of features),通过BoF可以把一张图片表示成一个向量.DBoW2是一个视觉词袋库,它提供了生成和使用词典的接口,但 ...
- 【C++】链表回环检测
//链表回环检测问题 #include<iostream> #include<cstdlib> using namespace std; ; struct node { int ...
- segMatch:基于3D点云分割的回环检测
该论文的地址是:https://arxiv.org/pdf/1609.07720.pdf segmatch是一个提供车辆的回环检测的技术,使用提取和匹配分割的三维激光点云技术.分割的例子可以在下面的图 ...
- 浅谈SLAM的回环检测技术
什么是回环检测? 在讲解回环检测前,我们先来了解下回环的概念.在视觉SLAM问题中,位姿的估计往往是一个递推的过程,即由上一帧位姿解算当前帧位姿,因此其中的误差便这样一帧一帧的传递下去,也就是我们所说 ...
- 综述 | SLAM回环检测方法
本文作者任旭倩,公众号:计算机视觉life成员,由于格式原因,公式显示可能出问题,建议阅读原文链接:综述 | SLAM回环检测方法 在视觉SLAM问题中,位姿的估计往往是一个递推的过程,即由上一帧位姿 ...
- 一个基于深度学习回环检测模块的简单双目 SLAM 系统
转载请注明出处,谢谢 原创作者:Mingrui 原创链接:https://www.cnblogs.com/MingruiYu/p/12634631.html 写在前面 最近在搞本科毕设,关于基于深度学 ...
- VINS 检测回环辅助激光建图
最近接到一个任务,在激光检测回环失败时,比如黑色物体多,场景大等,可否利用视觉进行回环检测.如果只是检测回环,现有的许多框架都可以使用.ORB-SLAM本身就有单目模式,且效果不错.但是发现ORB在检 ...
随机推荐
- opencv视频流的读取和处理
Opencv提供一个简单易用的框架以提取视频文件和USB摄像头中的图像帧,如果只是想读取某个视频,你只需要创建一个VideoCapture实例,然后在循环中提取每一帧.下面是一个简单的代码 #incl ...
- linux性能分析工具Swap
- Nexus搭建Maven私服中央仓库
一.概述 1.概要 现在的项目基本都是用Maven来管理工程,这样一来在公司内容搭建一个私服就非常有必要了,这样一来可以管理公司内部用的JAR包,也可以管理第三方的各种JAR来,以免每次都要从外网的仓 ...
- 读书笔记一、pandas数据结构介绍
pandas数据结构介绍 主要两种数据结构:Series和DataFrame. Series Series是一种类似于一维数组的对象,由一组数据(各种NumPy数据类型)+数据标签(即索引)组 ...
- 查看linux服务器的版本信息
查看linux系统信息 uname -a Linux localhost.localdomain 3.10.0-693.el7.x86_64 #1 SMP Tue Aug 22 21:09:27 UT ...
- day19 python模块 json模块 pickle模块
day19 python 一.序列化模块 序列类型: 列表 字符串 元组 bytes 序列化: 特指字符串和bytes, 就是把其他的数据类型转化成序列的数据类型的过程 dic = ...
- bzoj2004 [Hnoi2010]Bus 公交线路 矩阵快速幂+状压DP
题目传送门 https://lydsy.com/JudgeOnline/problem.php?id=2004 题解 如果 \(N\) 没有那么大,考虑把每一位分配给每一辆车. 假设已经分配到了第 \ ...
- Java Web 之HttpServletRequest对象初识
通过request对象获得请求行 获得客户端请求方式:String getMethod(); 获得请求的资源: String getRequestURL(); String getQueryStrin ...
- 机器学习:1.K近邻算法
1.简单案例:预测男女,根据身高,体重,鞋码 import numpy as np import matplotlib import sklearn from skleran.neighbors im ...
- python实现简单接口
接口实现 import flask,json server=flask.Flask(__name__)#__name__代表当前的python文件.把当前的python文件当做一个服务启动 @serv ...