VINS的优化除了添加了投影残差,回环检测残差,还有IMU的残差,边缘化产生的先验信息残差等。有些比较难理解,可参考此博客知乎回答

void Estimator::optimization()
{
ceres::Problem problem;
ceres::LossFunction *loss_function;
//loss_function = new ceres::HuberLoss(1.0);
loss_function = new ceres::CauchyLoss(1.0);
//添加ceres参数块,包括位姿,速度,零偏,外参,时间偏置
for (int i = 0; i < WINDOW_SIZE + 1; i++)
{
ceres::LocalParameterization *local_parameterization = new PoseLocalParameterization();
problem.AddParameterBlock(para_Pose[i], SIZE_POSE, local_parameterization);
problem.AddParameterBlock(para_SpeedBias[i], SIZE_SPEEDBIAS);
}
for (int i = 0; i < NUM_OF_CAM; i++)
{
ceres::LocalParameterization *local_parameterization = new PoseLocalParameterization();
problem.AddParameterBlock(para_Ex_Pose[i], SIZE_POSE, local_parameterization);
if (!ESTIMATE_EXTRINSIC)
{
ROS_DEBUG("fix extinsic param");
problem.SetParameterBlockConstant(para_Ex_Pose[i]);
}
else
ROS_DEBUG("estimate extinsic param");
}
if (ESTIMATE_TD)
{
problem.AddParameterBlock(para_Td[0], 1);
//problem.SetParameterBlockConstant(para_Td[0]);
} TicToc t_whole, t_prepare;
//数据类型转换,由Ps,Rs转换为para_Pose,Vs,Bas,Bgs转换为para_SpeedBias,tic,ric转换为para_Ex_Pose。
vector2double();
//添加先验残差,其具体内容后面再仔细理解
if (last_marginalization_info)
{
// construct new marginlization_factor
MarginalizationFactor *marginalization_factor = new MarginalizationFactor(last_marginalization_info);
problem.AddResidualBlock(marginalization_factor, NULL,
last_marginalization_parameter_blocks);
}
//添加IMU残差
for (int i = 0; i < WINDOW_SIZE; i++)
{
int j = i + 1;
if (pre_integrations[j]->sum_dt > 10.0)
continue;
IMUFactor* imu_factor = new IMUFactor(pre_integrations[j]);
problem.AddResidualBlock(imu_factor, NULL, para_Pose[i], para_SpeedBias[i], para_Pose[j], para_SpeedBias[j]);
}
int f_m_cnt = 0;
int feature_index = -1;
for (auto &it_per_id : f_manager.feature)
{
it_per_id.used_num = it_per_id.feature_per_frame.size();
if (!(it_per_id.used_num >= 2 && it_per_id.start_frame < WINDOW_SIZE - 2))
continue; ++feature_index; int imu_i = it_per_id.start_frame, imu_j = imu_i - 1; Vector3d pts_i = it_per_id.feature_per_frame[0].point; for (auto &it_per_frame : it_per_id.feature_per_frame)
{
imu_j++;
if (imu_i == imu_j)
{
continue;
}
Vector3d pts_j = it_per_frame.point;
//根据是否估计TD,添加不同的投影残差
if (ESTIMATE_TD)
{
ProjectionTdFactor *f_td = new ProjectionTdFactor(pts_i, pts_j, it_per_id.feature_per_frame[0].velocity, it_per_frame.velocity,
it_per_id.feature_per_frame[0].cur_td, it_per_frame.cur_td,
it_per_id.feature_per_frame[0].uv.y(), it_per_frame.uv.y());
problem.AddResidualBlock(f_td, loss_function, para_Pose[imu_i], para_Pose[imu_j], para_Ex_Pose[0], para_Feature[feature_index], para_Td[0]);
/*
double **para = new double *[5];
para[0] = para_Pose[imu_i];
para[1] = para_Pose[imu_j];
para[2] = para_Ex_Pose[0];
para[3] = para_Feature[feature_index];
para[4] = para_Td[0];
f_td->check(para);
*/
}
else
{
ProjectionFactor *f = new ProjectionFactor(pts_i, pts_j);
problem.AddResidualBlock(f, loss_function, para_Pose[imu_i], para_Pose[imu_j], para_Ex_Pose[0], para_Feature[feature_index]);
}
f_m_cnt++;
}
} ROS_DEBUG("visual measurement count: %d", f_m_cnt);
ROS_DEBUG("prepare for ceres: %f", t_prepare.toc());
// 添加回环检测残差
if(relocalization_info)
{
//printf("set relocalization factor! \n");
ceres::LocalParameterization *local_parameterization = new PoseLocalParameterization();
problem.AddParameterBlock(relo_Pose, SIZE_POSE, local_parameterization);
int retrive_feature_index = 0;
int feature_index = -1;
for (auto &it_per_id : f_manager.feature)
{
it_per_id.used_num = it_per_id.feature_per_frame.size();
if (!(it_per_id.used_num >= 2 && it_per_id.start_frame < WINDOW_SIZE - 2))
continue;
++feature_index;
int start = it_per_id.start_frame;
if(start <= relo_frame_local_index)
{
while((int)match_points[retrive_feature_index].z() < it_per_id.feature_id)
{
retrive_feature_index++;
}
if((int)match_points[retrive_feature_index].z() == it_per_id.feature_id)
{
Vector3d pts_j = Vector3d(match_points[retrive_feature_index].x(), match_points[retrive_feature_index].y(), 1.0);
Vector3d pts_i = it_per_id.feature_per_frame[0].point; ProjectionFactor *f = new ProjectionFactor(pts_i, pts_j);
problem.AddResidualBlock(f, loss_function, para_Pose[start], relo_Pose, para_Ex_Pose[0], para_Feature[feature_index]);
retrive_feature_index++;
}
}
} } ceres::Solver::Options options; options.linear_solver_type = ceres::DENSE_SCHUR;
//options.num_threads = 2;
options.trust_region_strategy_type = ceres::DOGLEG;
options.max_num_iterations = NUM_ITERATIONS;
//options.use_explicit_schur_complement = true;
//options.minimizer_progress_to_stdout = true;
//options.use_nonmonotonic_steps = true;
if (marginalization_flag == MARGIN_OLD)
options.max_solver_time_in_seconds = SOLVER_TIME * 4.0 / 5.0;
else
options.max_solver_time_in_seconds = SOLVER_TIME;
TicToc t_solver;
ceres::Solver::Summary summary;
ceres::Solve(options, &problem, &summary);
//cout << summary.BriefReport() << endl;
ROS_DEBUG("Iterations : %d", static_cast<int>(summary.iterations.size()));
ROS_DEBUG("solver costs: %f", t_solver.toc()); double2vector();
//优化后进行边缘化处理,关于边缘化可参考[此博客](https://blog.csdn.net/heyijia0327/article/details/53707261)
TicToc t_whole_marginalization;
if (marginalization_flag == MARGIN_OLD)
{
MarginalizationInfo *marginalization_info = new MarginalizationInfo();
vector2double(); if (last_marginalization_info)
{
vector<int> drop_set;
for (int i = 0; i < static_cast<int>(last_marginalization_parameter_blocks.size()); i++)
{
if (last_marginalization_parameter_blocks[i] == para_Pose[0] ||
last_marginalization_parameter_blocks[i] == para_SpeedBias[0])
drop_set.push_back(i);
}
// construct new marginlization_factor
MarginalizationFactor *marginalization_factor = new MarginalizationFactor(last_marginalization_info);
ResidualBlockInfo *residual_block_info = new ResidualBlockInfo(marginalization_factor, NULL,
last_marginalization_parameter_blocks,
drop_set); marginalization_info->addResidualBlockInfo(residual_block_info);
} {
if (pre_integrations[1]->sum_dt < 10.0)
{
IMUFactor* imu_factor = new IMUFactor(pre_integrations[1]);
ResidualBlockInfo *residual_block_info = new ResidualBlockInfo(imu_factor, NULL,
vector<double *>{para_Pose[0], para_SpeedBias[0], para_Pose[1], para_SpeedBias[1]},
vector<int>{0, 1});
marginalization_info->addResidualBlockInfo(residual_block_info);
}
} {
int feature_index = -1;
for (auto &it_per_id : f_manager.feature)
{
it_per_id.used_num = it_per_id.feature_per_frame.size();
if (!(it_per_id.used_num >= 2 && it_per_id.start_frame < WINDOW_SIZE - 2))
continue; ++feature_index; int imu_i = it_per_id.start_frame, imu_j = imu_i - 1;
if (imu_i != 0)
continue; Vector3d pts_i = it_per_id.feature_per_frame[0].point; for (auto &it_per_frame : it_per_id.feature_per_frame)
{
imu_j++;
if (imu_i == imu_j)
continue; Vector3d pts_j = it_per_frame.point;
if (ESTIMATE_TD)
{
ProjectionTdFactor *f_td = new ProjectionTdFactor(pts_i, pts_j, it_per_id.feature_per_frame[0].velocity, it_per_frame.velocity,
it_per_id.feature_per_frame[0].cur_td, it_per_frame.cur_td,
it_per_id.feature_per_frame[0].uv.y(), it_per_frame.uv.y());
ResidualBlockInfo *residual_block_info = new ResidualBlockInfo(f_td, loss_function,
vector<double *>{para_Pose[imu_i], para_Pose[imu_j], para_Ex_Pose[0], para_Feature[feature_index], para_Td[0]},
vector<int>{0, 3});
marginalization_info->addResidualBlockInfo(residual_block_info);
}
else
{
ProjectionFactor *f = new ProjectionFactor(pts_i, pts_j);
ResidualBlockInfo *residual_block_info = new ResidualBlockInfo(f, loss_function,
vector<double *>{para_Pose[imu_i], para_Pose[imu_j], para_Ex_Pose[0], para_Feature[feature_index]},
vector<int>{0, 3});
marginalization_info->addResidualBlockInfo(residual_block_info);
}
}
}
} TicToc t_pre_margin;
marginalization_info->preMarginalize();
ROS_DEBUG("pre marginalization %f ms", t_pre_margin.toc()); TicToc t_margin;
marginalization_info->marginalize();
ROS_DEBUG("marginalization %f ms", t_margin.toc()); std::unordered_map<long, double *> addr_shift;
for (int i = 1; i <= WINDOW_SIZE; i++)
{
addr_shift[reinterpret_cast<long>(para_Pose[i])] = para_Pose[i - 1];
addr_shift[reinterpret_cast<long>(para_SpeedBias[i])] = para_SpeedBias[i - 1];
}
for (int i = 0; i < NUM_OF_CAM; i++)
addr_shift[reinterpret_cast<long>(para_Ex_Pose[i])] = para_Ex_Pose[i];
if (ESTIMATE_TD)
{
addr_shift[reinterpret_cast<long>(para_Td[0])] = para_Td[0];
}
vector<double *> parameter_blocks = marginalization_info->getParameterBlocks(addr_shift); if (last_marginalization_info)
delete last_marginalization_info;
last_marginalization_info = marginalization_info;
last_marginalization_parameter_blocks = parameter_blocks; }
else
{
//如果第二最新帧不是关键帧的话,则把这帧的视觉测量舍弃掉(边缘化)而保留IMU测量值在滑动窗口中。(其他步骤和上一步骤相同)
if (last_marginalization_info &&
std::count(std::begin(last_marginalization_parameter_blocks), std::end(last_marginalization_parameter_blocks), para_Pose[WINDOW_SIZE - 1]))
{ MarginalizationInfo *marginalization_info = new MarginalizationInfo();
vector2double();
if (last_marginalization_info)
{
vector<int> drop_set;
for (int i = 0; i < static_cast<int>(last_marginalization_parameter_blocks.size()); i++)
{
ROS_ASSERT(last_marginalization_parameter_blocks[i] != para_SpeedBias[WINDOW_SIZE - 1]);
if (last_marginalization_parameter_blocks[i] == para_Pose[WINDOW_SIZE - 1])
drop_set.push_back(i);
}
// construct new marginlization_factor
MarginalizationFactor *marginalization_factor = new MarginalizationFactor(last_marginalization_info);
ResidualBlockInfo *residual_block_info = new ResidualBlockInfo(marginalization_factor, NULL,
last_marginalization_parameter_blocks,
drop_set); marginalization_info->addResidualBlockInfo(residual_block_info);
} TicToc t_pre_margin;
ROS_DEBUG("begin marginalization");
marginalization_info->preMarginalize();
ROS_DEBUG("end pre marginalization, %f ms", t_pre_margin.toc()); TicToc t_margin;
ROS_DEBUG("begin marginalization");
marginalization_info->marginalize();
ROS_DEBUG("end marginalization, %f ms", t_margin.toc()); std::unordered_map<long, double *> addr_shift;
for (int i = 0; i <= WINDOW_SIZE; i++)
{
if (i == WINDOW_SIZE - 1)
continue;
else if (i == WINDOW_SIZE)
{
addr_shift[reinterpret_cast<long>(para_Pose[i])] = para_Pose[i - 1];
addr_shift[reinterpret_cast<long>(para_SpeedBias[i])] = para_SpeedBias[i - 1];
}
else
{
addr_shift[reinterpret_cast<long>(para_Pose[i])] = para_Pose[i];
addr_shift[reinterpret_cast<long>(para_SpeedBias[i])] = para_SpeedBias[i];
}
}
for (int i = 0; i < NUM_OF_CAM; i++)
addr_shift[reinterpret_cast<long>(para_Ex_Pose[i])] = para_Ex_Pose[i];
if (ESTIMATE_TD)
{
addr_shift[reinterpret_cast<long>(para_Td[0])] = para_Td[0];
} vector<double *> parameter_blocks = marginalization_info->getParameterBlocks(addr_shift);
if (last_marginalization_info)
delete last_marginalization_info;
last_marginalization_info = marginalization_info;
last_marginalization_parameter_blocks = parameter_blocks; }
}
ROS_DEBUG("whole marginalization costs: %f", t_whole_marginalization.toc()); ROS_DEBUG("whole time for ceres: %f", t_whole.toc());
}

滑动窗口更新

  1. 如果是删去最旧一帧,则每个Ps 等参数都要往后移动,第 i 个要与 i +1 交换,WINDOW_SIZE那一帧要清空
  2. 如果是删去次新帧,则只需要让次新和最新帧进行数据交换,然后把最新帧数据清空
void Estimator::slideWindow()
{
TicToc t_margin;
if (marginalization_flag == MARGIN_OLD)
{
back_R0 = Rs[0];
back_P0 = Ps[0];
if (frame_count == WINDOW_SIZE)
{
for (int i = 0; i < WINDOW_SIZE; i++)
{
Rs[i].swap(Rs[i + 1]); std::swap(pre_integrations[i], pre_integrations[i + 1]); dt_buf[i].swap(dt_buf[i + 1]);
linear_acceleration_buf[i].swap(linear_acceleration_buf[i + 1]);
angular_velocity_buf[i].swap(angular_velocity_buf[i + 1]); Headers[i] = Headers[i + 1];
Ps[i].swap(Ps[i + 1]);
Vs[i].swap(Vs[i + 1]);
Bas[i].swap(Bas[i + 1]);
Bgs[i].swap(Bgs[i + 1]);
}
Headers[WINDOW_SIZE] = Headers[WINDOW_SIZE - 1];
Ps[WINDOW_SIZE] = Ps[WINDOW_SIZE - 1];
Vs[WINDOW_SIZE] = Vs[WINDOW_SIZE - 1];
Rs[WINDOW_SIZE] = Rs[WINDOW_SIZE - 1];
Bas[WINDOW_SIZE] = Bas[WINDOW_SIZE - 1];
Bgs[WINDOW_SIZE] = Bgs[WINDOW_SIZE - 1]; delete pre_integrations[WINDOW_SIZE];
pre_integrations[WINDOW_SIZE] = new IntegrationBase{acc_0, gyr_0, Bas[WINDOW_SIZE], Bgs[WINDOW_SIZE]}; dt_buf[WINDOW_SIZE].clear();
linear_acceleration_buf[WINDOW_SIZE].clear();
angular_velocity_buf[WINDOW_SIZE].clear(); if (true || solver_flag == INITIAL)
{
double t_0 = Headers[0].stamp.toSec();
map<double, ImageFrame>::iterator it_0;
it_0 = all_image_frame.find(t_0);
delete it_0->second.pre_integration;
all_image_frame.erase(all_image_frame.begin(), it_0); }
slideWindowOld();
}
}
else
{
if (frame_count == WINDOW_SIZE)
{
for (unsigned int i = 0; i < dt_buf[frame_count].size(); i++)
{
double tmp_dt = dt_buf[frame_count][i];
Vector3d tmp_linear_acceleration = linear_acceleration_buf[frame_count][i];
Vector3d tmp_angular_velocity = angular_velocity_buf[frame_count][i]; pre_integrations[frame_count - 1]->push_back(tmp_dt, tmp_linear_acceleration, tmp_angular_velocity); dt_buf[frame_count - 1].push_back(tmp_dt);
linear_acceleration_buf[frame_count - 1].push_back(tmp_linear_acceleration);
angular_velocity_buf[frame_count - 1].push_back(tmp_angular_velocity);
} Headers[frame_count - 1] = Headers[frame_count];
Ps[frame_count - 1] = Ps[frame_count];
Vs[frame_count - 1] = Vs[frame_count];
Rs[frame_count - 1] = Rs[frame_count];
Bas[frame_count - 1] = Bas[frame_count];
Bgs[frame_count - 1] = Bgs[frame_count]; delete pre_integrations[WINDOW_SIZE];
pre_integrations[WINDOW_SIZE] = new IntegrationBase{acc_0, gyr_0, Bas[WINDOW_SIZE], Bgs[WINDOW_SIZE]}; dt_buf[WINDOW_SIZE].clear();
linear_acceleration_buf[WINDOW_SIZE].clear();
angular_velocity_buf[WINDOW_SIZE].clear(); slideWindowNew();
}
}
}

VINS 估计器之优化与边缘化的更多相关文章

  1. VINS 估计器之检查视差

    为什么检查视差 VINS里为了控制优化计算量,在实时情况下,只对当前帧之前某一部分帧进行优化,而不是全部历史帧.局部优化帧的数量就是窗口大小.为了维持窗口大小,需要去除旧的帧添加新的帧,也就是边缘化 ...

  2. VINS 估计器之结构初始化

    为什么要初始化 非线性VINS估计器的性能对于初始的速度,尺度,重力向量,空间点3D位置,以及外参等非常敏感.在很多场合中,能做到相机和IMU即插即用,线上自动校准与初始化,将会给用户带来极大的方便性 ...

  3. 经典案例:如何优化Oracle使用DBlink的SQL语句

    转自 https://blog.csdn.net/Enmotech/article/details/78788083 作者介绍 赵全文 就职于太极计算机股份有限公司,在中央电化教育馆做Oracle D ...

  4. 机器学习(ML)十四之凸优化

    优化与深度学习 优化与估计 尽管优化方法可以最小化深度学习中的损失函数值,但本质上优化方法达到的目标与深度学习的目标并不相同. 优化方法目标:训练集损失函数值 深度学习目标:测试集损失函数值(泛化性) ...

  5. L19深度学习中的优化问题和凸性介绍

    优化与深度学习 优化与估计 尽管优化方法可以最小化深度学习中的损失函数值,但本质上优化方法达到的目标与深度学习的目标并不相同. 优化方法目标:训练集损失函数值 深度学习目标:测试集损失函数值(泛化性) ...

  6. Direct Visual-Inertial Odometry with Stereo Cameras

    这对于直接方法是特别有益的:众所周知直接图像对准是非凸的,并且只有在足够准确的初始估计可用时才能预期收敛.虽然在实践中像粗到精跟踪这样的技术会增加收敛半径,但是紧密的惯性积分可以更有效地解决这个问题, ...

  7. Trifo-VIO:Roubst and Efficient Stero Visual Inertial Odometry using Points and Lines论文笔记

    这是2018-IROS上的一篇文章,亮点是作者提出了Lines特征的VIO方案,还有就是提出一个新颖的回环检测,不是用传统的基于优化的方法或者BA,另外作者还发布了一个新的用于VIO的数据集.亮点主要 ...

  8. VIO的一些随笔

    大公司跑在手机的似乎都是滤波MSCKF那种,有优化的但似乎功耗不行.还有就是杂交的前端滤波后面在挂地图,反正国内的似乎就是SVO, VINS, ORBSLAM,MSCKF组合起来. 缺啥补啥,那个太烂 ...

  9. 狼人杀BETA阶段计划简介

    狼人杀beta阶段任务与目标 简介 一.前言 狼人杀alpha阶段终于在组团刷夜中结束了,我们取得了一些成绩,同时也暴露了团队的一些问题.但不管怎样,有了在alpha版本中收获的经验,我们将在beta ...

随机推荐

  1. ASE Alpha Sprint - backend scrum 7

    本次scrum于2019.11.12在sky garden进行,持续30分钟. 参与人: Zhikai Chen, Jia Ning, Hao Wang 请假: Xin Kang, Lihao Ran ...

  2. mysql授予权限

    grant show databases on *.* to 'asg'@'%'; grant select on *.* to 'asg'@'%'; grant show view on *.* t ...

  3. 【串线篇】Mybatis之缓存原理

    所谓二级缓存是名称空间级别的缓存,什么意思呢? TeacherDao.xml首行 <mapper namespace="com.atguigu.dao.TeacherDao" ...

  4. 【leetcode】564. Find the Closest Palindrome

    题目如下: 解题思路:既然是要求回文字符串,那么最终的输出结果就是对称的.要变成对称字符串,只要把处于对称位置上对应的两个字符中较大的那个变成较小的那个即可,假设n=1234,1和4对称所以把4变成1 ...

  5. spring微服务(顺序由简入难易于理解)

    一.为微服务应用增加健康监控 1.在 build.gradle 文件 dependencies 属性中增加 compile('org.springframework.boot:spring-boot- ...

  6. 英语单词custom

    custom 来源——xshell快捷键 翻译 n. 习惯,惯例:风俗:海关,关税:经常光顾:[总称](经常性的)顾客 adj. (衣服等)定做的,定制的 高中 | 初中 词源 英语单词custom含 ...

  7. .NET Core 使用 mongodb

    1.运行环境 开发工具:Visual Studio 2017 JDK版本:.NET Core 2.0 项目管理工具:nuget 2.GITHUB地址 https://github.com/nbfujx ...

  8. UOJ 418 【集训队作业2018】三角形——思路+线段树合并

    题目:http://uoj.ac/problem/418 看了题解才会…… 很好的想法是把整个过程看成若干 “取一点 i ,值+=w[ i ],值-=\(\sum w[j]\)”(其中 j 是 i 的 ...

  9. LG2704 [NOI2001] 炮兵阵地

    题目描述 (试题来源:Link ) 司令部的将军们打算在 \(N\times M\) 的网格地图上部署他们的炮兵部队.一个 \(N\times M\) 的地图由 \(N\) 行 \(M\) 列组成,地 ...

  10. 在React中跨组件分发状态的三种方法

    在React中跨组件分发状态的三种方法 当我问自己第一百次时,我正在研究一个典型的CRUD屏幕:"我应该将状态保留在这个组件中还是将其移动到父组件?". 如果需要对子组件的状态进行 ...