VINS 估计器之优化与边缘化
VINS的优化除了添加了投影残差,回环检测残差,还有IMU的残差,边缘化产生的先验信息残差等。有些比较难理解,可参考此博客和知乎回答。
void Estimator::optimization()
{
ceres::Problem problem;
ceres::LossFunction *loss_function;
//loss_function = new ceres::HuberLoss(1.0);
loss_function = new ceres::CauchyLoss(1.0);
//添加ceres参数块,包括位姿,速度,零偏,外参,时间偏置
for (int i = 0; i < WINDOW_SIZE + 1; i++)
{
ceres::LocalParameterization *local_parameterization = new PoseLocalParameterization();
problem.AddParameterBlock(para_Pose[i], SIZE_POSE, local_parameterization);
problem.AddParameterBlock(para_SpeedBias[i], SIZE_SPEEDBIAS);
}
for (int i = 0; i < NUM_OF_CAM; i++)
{
ceres::LocalParameterization *local_parameterization = new PoseLocalParameterization();
problem.AddParameterBlock(para_Ex_Pose[i], SIZE_POSE, local_parameterization);
if (!ESTIMATE_EXTRINSIC)
{
ROS_DEBUG("fix extinsic param");
problem.SetParameterBlockConstant(para_Ex_Pose[i]);
}
else
ROS_DEBUG("estimate extinsic param");
}
if (ESTIMATE_TD)
{
problem.AddParameterBlock(para_Td[0], 1);
//problem.SetParameterBlockConstant(para_Td[0]);
}
TicToc t_whole, t_prepare;
//数据类型转换,由Ps,Rs转换为para_Pose,Vs,Bas,Bgs转换为para_SpeedBias,tic,ric转换为para_Ex_Pose。
vector2double();
//添加先验残差,其具体内容后面再仔细理解
if (last_marginalization_info)
{
// construct new marginlization_factor
MarginalizationFactor *marginalization_factor = new MarginalizationFactor(last_marginalization_info);
problem.AddResidualBlock(marginalization_factor, NULL,
last_marginalization_parameter_blocks);
}
//添加IMU残差
for (int i = 0; i < WINDOW_SIZE; i++)
{
int j = i + 1;
if (pre_integrations[j]->sum_dt > 10.0)
continue;
IMUFactor* imu_factor = new IMUFactor(pre_integrations[j]);
problem.AddResidualBlock(imu_factor, NULL, para_Pose[i], para_SpeedBias[i], para_Pose[j], para_SpeedBias[j]);
}
int f_m_cnt = 0;
int feature_index = -1;
for (auto &it_per_id : f_manager.feature)
{
it_per_id.used_num = it_per_id.feature_per_frame.size();
if (!(it_per_id.used_num >= 2 && it_per_id.start_frame < WINDOW_SIZE - 2))
continue;
++feature_index;
int imu_i = it_per_id.start_frame, imu_j = imu_i - 1;
Vector3d pts_i = it_per_id.feature_per_frame[0].point;
for (auto &it_per_frame : it_per_id.feature_per_frame)
{
imu_j++;
if (imu_i == imu_j)
{
continue;
}
Vector3d pts_j = it_per_frame.point;
//根据是否估计TD,添加不同的投影残差
if (ESTIMATE_TD)
{
ProjectionTdFactor *f_td = new ProjectionTdFactor(pts_i, pts_j, it_per_id.feature_per_frame[0].velocity, it_per_frame.velocity,
it_per_id.feature_per_frame[0].cur_td, it_per_frame.cur_td,
it_per_id.feature_per_frame[0].uv.y(), it_per_frame.uv.y());
problem.AddResidualBlock(f_td, loss_function, para_Pose[imu_i], para_Pose[imu_j], para_Ex_Pose[0], para_Feature[feature_index], para_Td[0]);
/*
double **para = new double *[5];
para[0] = para_Pose[imu_i];
para[1] = para_Pose[imu_j];
para[2] = para_Ex_Pose[0];
para[3] = para_Feature[feature_index];
para[4] = para_Td[0];
f_td->check(para);
*/
}
else
{
ProjectionFactor *f = new ProjectionFactor(pts_i, pts_j);
problem.AddResidualBlock(f, loss_function, para_Pose[imu_i], para_Pose[imu_j], para_Ex_Pose[0], para_Feature[feature_index]);
}
f_m_cnt++;
}
}
ROS_DEBUG("visual measurement count: %d", f_m_cnt);
ROS_DEBUG("prepare for ceres: %f", t_prepare.toc());
// 添加回环检测残差
if(relocalization_info)
{
//printf("set relocalization factor! \n");
ceres::LocalParameterization *local_parameterization = new PoseLocalParameterization();
problem.AddParameterBlock(relo_Pose, SIZE_POSE, local_parameterization);
int retrive_feature_index = 0;
int feature_index = -1;
for (auto &it_per_id : f_manager.feature)
{
it_per_id.used_num = it_per_id.feature_per_frame.size();
if (!(it_per_id.used_num >= 2 && it_per_id.start_frame < WINDOW_SIZE - 2))
continue;
++feature_index;
int start = it_per_id.start_frame;
if(start <= relo_frame_local_index)
{
while((int)match_points[retrive_feature_index].z() < it_per_id.feature_id)
{
retrive_feature_index++;
}
if((int)match_points[retrive_feature_index].z() == it_per_id.feature_id)
{
Vector3d pts_j = Vector3d(match_points[retrive_feature_index].x(), match_points[retrive_feature_index].y(), 1.0);
Vector3d pts_i = it_per_id.feature_per_frame[0].point;
ProjectionFactor *f = new ProjectionFactor(pts_i, pts_j);
problem.AddResidualBlock(f, loss_function, para_Pose[start], relo_Pose, para_Ex_Pose[0], para_Feature[feature_index]);
retrive_feature_index++;
}
}
}
}
ceres::Solver::Options options;
options.linear_solver_type = ceres::DENSE_SCHUR;
//options.num_threads = 2;
options.trust_region_strategy_type = ceres::DOGLEG;
options.max_num_iterations = NUM_ITERATIONS;
//options.use_explicit_schur_complement = true;
//options.minimizer_progress_to_stdout = true;
//options.use_nonmonotonic_steps = true;
if (marginalization_flag == MARGIN_OLD)
options.max_solver_time_in_seconds = SOLVER_TIME * 4.0 / 5.0;
else
options.max_solver_time_in_seconds = SOLVER_TIME;
TicToc t_solver;
ceres::Solver::Summary summary;
ceres::Solve(options, &problem, &summary);
//cout << summary.BriefReport() << endl;
ROS_DEBUG("Iterations : %d", static_cast<int>(summary.iterations.size()));
ROS_DEBUG("solver costs: %f", t_solver.toc());
double2vector();
//优化后进行边缘化处理,关于边缘化可参考[此博客](https://blog.csdn.net/heyijia0327/article/details/53707261)
TicToc t_whole_marginalization;
if (marginalization_flag == MARGIN_OLD)
{
MarginalizationInfo *marginalization_info = new MarginalizationInfo();
vector2double();
if (last_marginalization_info)
{
vector<int> drop_set;
for (int i = 0; i < static_cast<int>(last_marginalization_parameter_blocks.size()); i++)
{
if (last_marginalization_parameter_blocks[i] == para_Pose[0] ||
last_marginalization_parameter_blocks[i] == para_SpeedBias[0])
drop_set.push_back(i);
}
// construct new marginlization_factor
MarginalizationFactor *marginalization_factor = new MarginalizationFactor(last_marginalization_info);
ResidualBlockInfo *residual_block_info = new ResidualBlockInfo(marginalization_factor, NULL,
last_marginalization_parameter_blocks,
drop_set);
marginalization_info->addResidualBlockInfo(residual_block_info);
}
{
if (pre_integrations[1]->sum_dt < 10.0)
{
IMUFactor* imu_factor = new IMUFactor(pre_integrations[1]);
ResidualBlockInfo *residual_block_info = new ResidualBlockInfo(imu_factor, NULL,
vector<double *>{para_Pose[0], para_SpeedBias[0], para_Pose[1], para_SpeedBias[1]},
vector<int>{0, 1});
marginalization_info->addResidualBlockInfo(residual_block_info);
}
}
{
int feature_index = -1;
for (auto &it_per_id : f_manager.feature)
{
it_per_id.used_num = it_per_id.feature_per_frame.size();
if (!(it_per_id.used_num >= 2 && it_per_id.start_frame < WINDOW_SIZE - 2))
continue;
++feature_index;
int imu_i = it_per_id.start_frame, imu_j = imu_i - 1;
if (imu_i != 0)
continue;
Vector3d pts_i = it_per_id.feature_per_frame[0].point;
for (auto &it_per_frame : it_per_id.feature_per_frame)
{
imu_j++;
if (imu_i == imu_j)
continue;
Vector3d pts_j = it_per_frame.point;
if (ESTIMATE_TD)
{
ProjectionTdFactor *f_td = new ProjectionTdFactor(pts_i, pts_j, it_per_id.feature_per_frame[0].velocity, it_per_frame.velocity,
it_per_id.feature_per_frame[0].cur_td, it_per_frame.cur_td,
it_per_id.feature_per_frame[0].uv.y(), it_per_frame.uv.y());
ResidualBlockInfo *residual_block_info = new ResidualBlockInfo(f_td, loss_function,
vector<double *>{para_Pose[imu_i], para_Pose[imu_j], para_Ex_Pose[0], para_Feature[feature_index], para_Td[0]},
vector<int>{0, 3});
marginalization_info->addResidualBlockInfo(residual_block_info);
}
else
{
ProjectionFactor *f = new ProjectionFactor(pts_i, pts_j);
ResidualBlockInfo *residual_block_info = new ResidualBlockInfo(f, loss_function,
vector<double *>{para_Pose[imu_i], para_Pose[imu_j], para_Ex_Pose[0], para_Feature[feature_index]},
vector<int>{0, 3});
marginalization_info->addResidualBlockInfo(residual_block_info);
}
}
}
}
TicToc t_pre_margin;
marginalization_info->preMarginalize();
ROS_DEBUG("pre marginalization %f ms", t_pre_margin.toc());
TicToc t_margin;
marginalization_info->marginalize();
ROS_DEBUG("marginalization %f ms", t_margin.toc());
std::unordered_map<long, double *> addr_shift;
for (int i = 1; i <= WINDOW_SIZE; i++)
{
addr_shift[reinterpret_cast<long>(para_Pose[i])] = para_Pose[i - 1];
addr_shift[reinterpret_cast<long>(para_SpeedBias[i])] = para_SpeedBias[i - 1];
}
for (int i = 0; i < NUM_OF_CAM; i++)
addr_shift[reinterpret_cast<long>(para_Ex_Pose[i])] = para_Ex_Pose[i];
if (ESTIMATE_TD)
{
addr_shift[reinterpret_cast<long>(para_Td[0])] = para_Td[0];
}
vector<double *> parameter_blocks = marginalization_info->getParameterBlocks(addr_shift);
if (last_marginalization_info)
delete last_marginalization_info;
last_marginalization_info = marginalization_info;
last_marginalization_parameter_blocks = parameter_blocks;
}
else
{
//如果第二最新帧不是关键帧的话,则把这帧的视觉测量舍弃掉(边缘化)而保留IMU测量值在滑动窗口中。(其他步骤和上一步骤相同)
if (last_marginalization_info &&
std::count(std::begin(last_marginalization_parameter_blocks), std::end(last_marginalization_parameter_blocks), para_Pose[WINDOW_SIZE - 1]))
{
MarginalizationInfo *marginalization_info = new MarginalizationInfo();
vector2double();
if (last_marginalization_info)
{
vector<int> drop_set;
for (int i = 0; i < static_cast<int>(last_marginalization_parameter_blocks.size()); i++)
{
ROS_ASSERT(last_marginalization_parameter_blocks[i] != para_SpeedBias[WINDOW_SIZE - 1]);
if (last_marginalization_parameter_blocks[i] == para_Pose[WINDOW_SIZE - 1])
drop_set.push_back(i);
}
// construct new marginlization_factor
MarginalizationFactor *marginalization_factor = new MarginalizationFactor(last_marginalization_info);
ResidualBlockInfo *residual_block_info = new ResidualBlockInfo(marginalization_factor, NULL,
last_marginalization_parameter_blocks,
drop_set);
marginalization_info->addResidualBlockInfo(residual_block_info);
}
TicToc t_pre_margin;
ROS_DEBUG("begin marginalization");
marginalization_info->preMarginalize();
ROS_DEBUG("end pre marginalization, %f ms", t_pre_margin.toc());
TicToc t_margin;
ROS_DEBUG("begin marginalization");
marginalization_info->marginalize();
ROS_DEBUG("end marginalization, %f ms", t_margin.toc());
std::unordered_map<long, double *> addr_shift;
for (int i = 0; i <= WINDOW_SIZE; i++)
{
if (i == WINDOW_SIZE - 1)
continue;
else if (i == WINDOW_SIZE)
{
addr_shift[reinterpret_cast<long>(para_Pose[i])] = para_Pose[i - 1];
addr_shift[reinterpret_cast<long>(para_SpeedBias[i])] = para_SpeedBias[i - 1];
}
else
{
addr_shift[reinterpret_cast<long>(para_Pose[i])] = para_Pose[i];
addr_shift[reinterpret_cast<long>(para_SpeedBias[i])] = para_SpeedBias[i];
}
}
for (int i = 0; i < NUM_OF_CAM; i++)
addr_shift[reinterpret_cast<long>(para_Ex_Pose[i])] = para_Ex_Pose[i];
if (ESTIMATE_TD)
{
addr_shift[reinterpret_cast<long>(para_Td[0])] = para_Td[0];
}
vector<double *> parameter_blocks = marginalization_info->getParameterBlocks(addr_shift);
if (last_marginalization_info)
delete last_marginalization_info;
last_marginalization_info = marginalization_info;
last_marginalization_parameter_blocks = parameter_blocks;
}
}
ROS_DEBUG("whole marginalization costs: %f", t_whole_marginalization.toc());
ROS_DEBUG("whole time for ceres: %f", t_whole.toc());
}
滑动窗口更新
- 如果是删去最旧一帧,则每个Ps 等参数都要往后移动,第 i 个要与 i +1 交换,WINDOW_SIZE那一帧要清空
- 如果是删去次新帧,则只需要让次新和最新帧进行数据交换,然后把最新帧数据清空
void Estimator::slideWindow()
{
TicToc t_margin;
if (marginalization_flag == MARGIN_OLD)
{
back_R0 = Rs[0];
back_P0 = Ps[0];
if (frame_count == WINDOW_SIZE)
{
for (int i = 0; i < WINDOW_SIZE; i++)
{
Rs[i].swap(Rs[i + 1]);
std::swap(pre_integrations[i], pre_integrations[i + 1]);
dt_buf[i].swap(dt_buf[i + 1]);
linear_acceleration_buf[i].swap(linear_acceleration_buf[i + 1]);
angular_velocity_buf[i].swap(angular_velocity_buf[i + 1]);
Headers[i] = Headers[i + 1];
Ps[i].swap(Ps[i + 1]);
Vs[i].swap(Vs[i + 1]);
Bas[i].swap(Bas[i + 1]);
Bgs[i].swap(Bgs[i + 1]);
}
Headers[WINDOW_SIZE] = Headers[WINDOW_SIZE - 1];
Ps[WINDOW_SIZE] = Ps[WINDOW_SIZE - 1];
Vs[WINDOW_SIZE] = Vs[WINDOW_SIZE - 1];
Rs[WINDOW_SIZE] = Rs[WINDOW_SIZE - 1];
Bas[WINDOW_SIZE] = Bas[WINDOW_SIZE - 1];
Bgs[WINDOW_SIZE] = Bgs[WINDOW_SIZE - 1];
delete pre_integrations[WINDOW_SIZE];
pre_integrations[WINDOW_SIZE] = new IntegrationBase{acc_0, gyr_0, Bas[WINDOW_SIZE], Bgs[WINDOW_SIZE]};
dt_buf[WINDOW_SIZE].clear();
linear_acceleration_buf[WINDOW_SIZE].clear();
angular_velocity_buf[WINDOW_SIZE].clear();
if (true || solver_flag == INITIAL)
{
double t_0 = Headers[0].stamp.toSec();
map<double, ImageFrame>::iterator it_0;
it_0 = all_image_frame.find(t_0);
delete it_0->second.pre_integration;
all_image_frame.erase(all_image_frame.begin(), it_0);
}
slideWindowOld();
}
}
else
{
if (frame_count == WINDOW_SIZE)
{
for (unsigned int i = 0; i < dt_buf[frame_count].size(); i++)
{
double tmp_dt = dt_buf[frame_count][i];
Vector3d tmp_linear_acceleration = linear_acceleration_buf[frame_count][i];
Vector3d tmp_angular_velocity = angular_velocity_buf[frame_count][i];
pre_integrations[frame_count - 1]->push_back(tmp_dt, tmp_linear_acceleration, tmp_angular_velocity);
dt_buf[frame_count - 1].push_back(tmp_dt);
linear_acceleration_buf[frame_count - 1].push_back(tmp_linear_acceleration);
angular_velocity_buf[frame_count - 1].push_back(tmp_angular_velocity);
}
Headers[frame_count - 1] = Headers[frame_count];
Ps[frame_count - 1] = Ps[frame_count];
Vs[frame_count - 1] = Vs[frame_count];
Rs[frame_count - 1] = Rs[frame_count];
Bas[frame_count - 1] = Bas[frame_count];
Bgs[frame_count - 1] = Bgs[frame_count];
delete pre_integrations[WINDOW_SIZE];
pre_integrations[WINDOW_SIZE] = new IntegrationBase{acc_0, gyr_0, Bas[WINDOW_SIZE], Bgs[WINDOW_SIZE]};
dt_buf[WINDOW_SIZE].clear();
linear_acceleration_buf[WINDOW_SIZE].clear();
angular_velocity_buf[WINDOW_SIZE].clear();
slideWindowNew();
}
}
}
VINS 估计器之优化与边缘化的更多相关文章
- VINS 估计器之检查视差
为什么检查视差 VINS里为了控制优化计算量,在实时情况下,只对当前帧之前某一部分帧进行优化,而不是全部历史帧.局部优化帧的数量就是窗口大小.为了维持窗口大小,需要去除旧的帧添加新的帧,也就是边缘化 ...
- VINS 估计器之结构初始化
为什么要初始化 非线性VINS估计器的性能对于初始的速度,尺度,重力向量,空间点3D位置,以及外参等非常敏感.在很多场合中,能做到相机和IMU即插即用,线上自动校准与初始化,将会给用户带来极大的方便性 ...
- 经典案例:如何优化Oracle使用DBlink的SQL语句
转自 https://blog.csdn.net/Enmotech/article/details/78788083 作者介绍 赵全文 就职于太极计算机股份有限公司,在中央电化教育馆做Oracle D ...
- 机器学习(ML)十四之凸优化
优化与深度学习 优化与估计 尽管优化方法可以最小化深度学习中的损失函数值,但本质上优化方法达到的目标与深度学习的目标并不相同. 优化方法目标:训练集损失函数值 深度学习目标:测试集损失函数值(泛化性) ...
- L19深度学习中的优化问题和凸性介绍
优化与深度学习 优化与估计 尽管优化方法可以最小化深度学习中的损失函数值,但本质上优化方法达到的目标与深度学习的目标并不相同. 优化方法目标:训练集损失函数值 深度学习目标:测试集损失函数值(泛化性) ...
- Direct Visual-Inertial Odometry with Stereo Cameras
这对于直接方法是特别有益的:众所周知直接图像对准是非凸的,并且只有在足够准确的初始估计可用时才能预期收敛.虽然在实践中像粗到精跟踪这样的技术会增加收敛半径,但是紧密的惯性积分可以更有效地解决这个问题, ...
- Trifo-VIO:Roubst and Efficient Stero Visual Inertial Odometry using Points and Lines论文笔记
这是2018-IROS上的一篇文章,亮点是作者提出了Lines特征的VIO方案,还有就是提出一个新颖的回环检测,不是用传统的基于优化的方法或者BA,另外作者还发布了一个新的用于VIO的数据集.亮点主要 ...
- VIO的一些随笔
大公司跑在手机的似乎都是滤波MSCKF那种,有优化的但似乎功耗不行.还有就是杂交的前端滤波后面在挂地图,反正国内的似乎就是SVO, VINS, ORBSLAM,MSCKF组合起来. 缺啥补啥,那个太烂 ...
- 狼人杀BETA阶段计划简介
狼人杀beta阶段任务与目标 简介 一.前言 狼人杀alpha阶段终于在组团刷夜中结束了,我们取得了一些成绩,同时也暴露了团队的一些问题.但不管怎样,有了在alpha版本中收获的经验,我们将在beta ...
随机推荐
- vue.js(8)--v-for的使用
v-for遍历数组.对象数组.对象.迭代次数 <!DOCTYPE html> <html lang="en"> <head> <meta ...
- nodejs,事件轮询总结
宏任务 script,setTimeoout,setInterval,setlmmediate(node 独有),I/o,render渲染 微任务 process.nextTick(),promise ...
- robotframework API 源码阅读笔记----robot.utils.asserts
http://robot-framework.readthedocs.io/en/latest/autodoc/robot.utils.html#robot.utils.asserts.assert_ ...
- linux典型应用对系统资源使用的特点
- 解释ARP协议和RARP协议
解释ARP(地址解析协议) 首先,每个主机都会在自己的ARP缓冲区中建立一个ARP列表,以表示IP地址和MAC地址之间的对应关系. 当源主机要发送数据时,首先检查ARP列表中是否有对应IP地址的目的主 ...
- python爬虫:抓取下载视频文件,合并ts文件为完整视频
1.获取m3u8文件 2.代码 """@author :Eric-chen@contact :sygcrjgx@163.com@time :2019/6/16 15:32 ...
- js 弹窗并定时关闭
1. $('input').click(function() { prompt('点击成功', 2000) }) function prompt(newName, time, fn) { var $d ...
- hive之视图和索引
一.视图 1.视图定义 视图其实是一个虚表,视图可以允许保存一个查询,并像对待表一样对这个查询进行操作,视图是一个逻辑结构,并不会存储数据. 2.视图的创建 通过创建视图来限制数据访问可以用来保护信息 ...
- java中switch的用法以及判断的类型有哪些(String\byte\short\int\char\枚举类型)
switch关键字对于多数java学习者来说并不陌生,由于笔试和面试经常会问到它的用法,这里做了一个简单的总结: 能用于switch判断的类型有:byte.short.int.char(JDK1.6) ...
- SVN查看所有人的日志提交记录
1. svn默认显示最近一周的文件提交和修改记录,怎么查看更长时间的日志记录呢? 2. TortoiseSVN 3. 点击show all 或者NEXT 100,就可显示更长时间的文件提交记录.