对当前帧进行地图点重投影和特征对齐

  // map reprojection & feature alignment
SVO_START_TIMER("reproject");
reprojector_.reprojectMap(new_frame_, overlap_kfs_);
SVO_STOP_TIMER("reproject");

在processframe函数中在进行初始的稀疏图像对齐之后,进一步进行地图投影和特征对齐,对新一帧图像添加特征点,由reprojectMap接口进入

Reprojector reprojector_; //把其它关键帧中的点投影到当前帧中

FramePtr new_frame_;  // 当前帧
vector< pair<FramePtr,size_t> > overlap_kfs_;  // 具有重叠视野的所有关键帧。 配对数字指明观察到的公共地图点数。
void Reprojector::reprojectMap(
FramePtr frame,
std::vector< std::pair<FramePtr,std::size_t> >& overlap_kfs)
{
resetReprojGrid();
resetReprojGrid(); // 重置函数
  // Identify those Keyframes which share a common field of view.
SVO_START_TIMER("reproject_kfs");
list< pair<FramePtr,double> > close_kfs;
map_.getCloseKeyframes(frame, close_kfs);

void Map::getCloseKeyframes(
const FramePtr& frame,
std::list< std::pair<FramePtr,double> >& close_kfs);

const FramePtr& frame;    //当前图像帧

std::list< std::pair<FramePtr,double> >& close_kfs  ;///存储和当前帧接近的关键帧指针以及它们之间的距离。

根据它们的接近程度对KF进行重叠排序(列表中的第二个值)
  close_kfs.sort(boost::bind(&std::pair<FramePtr, double>::second, _1) <
boost::bind(&std::pair<FramePtr, double>::second, _2));

重新投影具有重叠的最近N kfs的所有地图特征。

  size_t n_kfs = ;
overlap_kfs.reserve(options_.max_n_kfs);
for(auto it_frame=close_kfs.begin(), ite_frame=close_kfs.end();
it_frame!=ite_frame && n_kfs<options_.max_n_kfs; ++it_frame, ++n_kfs)
{
FramePtr ref_frame = it_frame->first;
// add the current frame to the (output) list of keyframes with overlap
// initialize the counter of candidates from this frame (2nd element in pair) to zero
overlap_kfs.push_back(pair<FramePtr,size_t>(ref_frame,));
// Consider for candidate each mappoint in the ref KF that the current (input) KF observes
// We only store in which grid cell the points fall.
// Add each corresponding valid new Candidate to its cell in the grid.
int num_pt_success = setKfCandidates( frame, ref_frame->pt_fts_ );
overlap_kfs.back().second += num_pt_success;
// Add each line segment in the ref KF that the current (input) KF observes
int num_seg_success = setKfCandidates( frame, ref_frame->seg_fts_ );
overlap_kfs.back().second += num_seg_success;
}
SVO_STOP_TIMER("reproject_kfs");
考虑当前(输入)KF观察到的参考KF中的每个mappoint的候选者

我们只存储点落在哪个网格单元格中。

将每个对应的有效新Candidate添加到网格中的单元格中。

template<class FeatureT>
int Reprojector::setKfCandidates(FramePtr frame, list<FeatureT*> fts)
{
int candidate_counter = ;
for(auto it=fts.begin(), ite_ftr=fts.end(); it!=ite_ftr; ++it)
{
// check if the feature has a 3D object assigned
if((*it)->feat3D == NULL)
continue;
// make sure we project a point only once
if((*it)->feat3D->last_projected_kf_id_ == frame->id_)
continue;
(*it)->feat3D->last_projected_kf_id_ = frame->id_;
if(reproject(frame, (*it)->feat3D))
// increment the number of candidates taken successfully
candidate_counter++;
}
return candidate_counter;
}

int Reprojector::setKfCandidates(FramePtr frame, list<FeatureT*> fts)

把地图点和投影到当前帧中的像素点的匹配对存储到  grid_.cells 中

 
bool Reprojector::reproject(FramePtr frame, Point* point)
{
// get position in current frame image of the world 3D point
Vector2d cur_px(frame->w2c(point->pos_));
if(frame->cam_->isInFrame(cur_px.cast<int>(), )) // 8px is the patch size in the matcher
{
// get linear index (wrt row-wise vectorized grid matrix)
// of the image grid cell in which the point px lies
const int k = static_cast<int>(cur_px[]/grid_.cell_size)*grid_.grid_n_cols
+ static_cast<int>(cur_px[]/grid_.cell_size);
grid_.cells.at(k)->push_back(PointCandidate(point, cur_px));
return true;
}
return false;
}

数据类型的定义

  struct PointCandidate {
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
Point* pt; //!< 3D point.
Vector2d px; //!< projected 2D pixel location.
PointCandidate(Point* pt, Vector2d& px) : pt(pt), px(px) {}
};
  // A cell is just a list of Reprojector::Candidate
typedef std::list<PointCandidate, aligned_allocator<PointCandidate> > Cell;
typedef std::vector<Cell*> CandidateGrid; struct LineCandidate {
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
LineSeg* ls; //!< 3D point.
Vector2d spx;
Vector2d epx;
LineCandidate(LineSeg* ls, Vector2d& spx, Vector2d& epx) : ls(ls), spx(spx), epx(epx) {}
};
  /// The candidate segments are collected in a single list for the whole image.
/// There is no clear heuristic about how to discretize the image space for the segment case.
typedef std::list<LineCandidate, aligned_allocator<LineCandidate> > LineCandidates;
typedef std::vector<LineCandidates*> LineCandidateGrid; /// The grid stores a set of candidate matches. For every grid cell we try to find one match.
struct Grid
{
CandidateGrid cells;
vector<int> cell_order;
int cell_size;
int grid_n_cols;
int grid_n_rows;
}; struct GridLs
{
LineCandidateGrid cells;
vector<int> cell_order;
int cell_size;
int grid_n_cols;
int grid_n_rows;
}; Grid grid_;
GridLs gridls_;
Matcher matcher_;
Map& map_;

现在投影所有地图坐标

点坐标

  // Now project all map candidates
// (candidates in the map are created from converged seeds)
SVO_START_TIMER("reproject_candidates");
// Point candidates
// (same logic as above to populate the cell grid but taking candidate points from the map object)
setMapCandidates(frame, map_.point_candidates_);

线坐标

  // Segment candidates
setMapCandidates(frame, map_.segment_candidates_);
SVO_STOP_TIMER("reproject_candidates");

Map&    map_;  //地图类

MapPointCandidates point_candidates_;   //  收敛的3D点的容器,尚未分配给两个关键帧。

MapSegmentCandidates segment_candidates_;    /////尚未分配给两个关键帧的融合3D点的容器。

class MapPointCandidates
{
public:
typedef pair<Point*, PointFeat*> PointCandidate;
typedef list<PointCandidate> PointCandidateList; /// The depth-filter is running in a parallel thread and fills the canidate list.
/// This mutex controls concurrent access to point_candidates.
boost::mutex mut_; /// Candidate points are created from converged seeds.
/// Until the next keyframe, these points can be used for reprojection and pose optimization.
PointCandidateList candidates_;
list< Point* > trash_points_; MapPointCandidates();
~MapPointCandidates(); /// Add a candidate point.
void newCandidatePoint(Point* point, double depth_sigma2); /// Adds the feature to the frame and deletes candidate from list.
void addCandidatePointToFrame(FramePtr frame); /// Remove a candidate point from the list of candidates.
bool deleteCandidatePoint(Point* point); /// Remove all candidates that belong to a frame.
void removeFrameCandidates(FramePtr frame); /// Reset the candidate list, remove and delete all points.
void reset(); void deleteCandidate(PointCandidate& c); void emptyTrash();
};
void MapPointCandidates::deleteCandidate(PointCandidate& c)
{
// camera-rig: another frame might still be pointing to the candidate point
// therefore, we can't delete it right now.
delete c.second; c.second=NULL;
c.first->type_ = Point::TYPE_DELETED;
trash_points_.push_back(c.first);
}

template<class MapCandidatesT>
void Reprojector::setMapCandidates(FramePtr frame, MapCandidatesT &map_candidates)
{
boost::unique_lock<boost::mutex> lock(map_candidates.mut_); // the mutex will be unlocked when out of scope
auto it=map_candidates.candidates_.begin();
while(it!=map_candidates.candidates_.end())
{
if(!reproject(frame, it->first))
{
// if the reprojection of the map candidate point failed,
// increment the counter of failed reprojections (assess the point quality)
it->first->n_failed_reproj_ += ;
if(it->first->n_failed_reproj_ > )
{
// if the reprojection failed too many times, remove the map candidate point
map_candidates.deleteCandidate(*it);
it = map_candidates.candidates_.erase(it);
continue;
}
}
++it;
} // end-while-loop
}

现在我们浏览每个网格单元并选择一个匹配点。最后,我们每个细胞最多应该有一个重新投射点。


  SVO_START_TIMER("feature_align");
for(size_t i=; i<grid_.cells.size(); ++i)
{
// we prefer good quality points over unkown quality (more likely to match)
// and unknown quality over candidates (position not optimized)
// we use the random cell order to visit cells uniformly on the grid
if(refineBestCandidate(*grid_.cells.at(grid_.cell_order[i]), frame))
++n_matches_;
if(n_matches_ > (size_t) Config::maxFts())
break; // the random visit order of cells assures uniform distribution
// of the features even if we break early (maxFts reached soon)
}

我们更喜欢质量好的点而不是未知质量(更可能匹配)和未知质量而不是候选者(位置未优化)

我们使用随机单元格顺序在网格上统一访问单元格


bool Reprojector::refineBestCandidate(Cell& cell, FramePtr frame)
{
// sort the candidates inside the cell according to their quality
cell.sort(boost::bind(&Reprojector::pointQualityComparator, _1, _2));
Cell::iterator it=cell.begin();
// in principle, iterate through the whole list of features in the cell
// in reality, there is maximum one point per cell, so the loop returns if successful
while(it!=cell.end())
{
++n_trials_;
// Try to refine the point feature in frame from current initial estimate
bool success = refine( it->pt, it->px, frame );
// Failed or not, this candidate was finally being erased in original code
it = cell.erase(it); // it takes next position in the list as output of .erase
if(success)
// Maximum one point per cell.
return true;
}
return false;
}

尝试从当前初始估计中细化帧中的点要素

bool Reprojector::refine(Point* pt, Vector2d& px_est, FramePtr frame)
{
if(pt->type_ == Point::TYPE_DELETED)
return false; bool found_match = true;
if(options_.find_match_direct)
// refine px position in the candidate by directly applying subpix refinement
// internally, it is optimizing photometric error
// of the candidate px patch wrt the closest-view reference feature patch
found_match = matcher_.findMatchDirect(*pt, *frame, px_est);
// TODO: What happens if options_.find_match_direct is false??? Shouldn't findEpipolarMatchDirect be here? // apply quality logic
{
if(!found_match)
{
// if there is no match found for this point, decrease quality
pt->n_failed_reproj_++;
// consider removing the point from map depending on point type and quality
if(pt->type_ == Point::TYPE_UNKNOWN && pt->n_failed_reproj_ > )
map_.safeDeletePoint(pt);
if(pt->type_ == Point::TYPE_CANDIDATE && pt->n_failed_reproj_ > )
map_.point_candidates_.deleteCandidatePoint(pt);
return false;
}
// if there is successful match found for this point, increase quality
pt->n_succeeded_reproj_++;
if(pt->type_ == Point::TYPE_UNKNOWN && pt->n_succeeded_reproj_ > )
pt->type_ = Point::TYPE_GOOD;
} // create new point feature for this frame with the refined (aligned) candidate position in this image
PointFeat* new_feature = new PointFeat(frame.get(), px_est, matcher_.search_level_);
frame->addFeature(new_feature); // Here we add a reference in the feature to the 3D point, the other way
// round is only done if this frame is selected as keyframe.
// TODO: why not give it directly to the constructor PointFeat(frame.get(), pt, it->px, matcher_.serach_level_)
new_feature->feat3D = pt; PointFeat* pt_ftr = static_cast<PointFeat*>( matcher_.ref_ftr_ );
if(pt_ftr != NULL)
{
if(pt_ftr->type == PointFeat::EDGELET)
{
new_feature->type = PointFeat::EDGELET;
new_feature->grad = matcher_.A_cur_ref_*pt_ftr->grad;
new_feature->grad.normalize();
}
} // If the keyframe is selected and we reproject the rest, we don't have to
// check this point anymore.
// it = cell.erase(it); // Maximum one point per cell.
return true;
}

通过直接应用子像素细化来细化候选者中的px位置

在内部,它正在优化最近视图参考特征补丁的候选px补丁的光度误差

bool Matcher::findMatchDirect(
const Point& pt,
const Frame& cur_frame,
Vector2d& px_cur)
{
// get reference feature in closest view (frame)
if(!pt.getCloseViewObs(cur_frame.pos(), ref_ftr_))
return false; if(!ref_ftr_->frame->cam_->isInFrame(
ref_ftr_->px.cast<int>()/(<<ref_ftr_->level), halfpatch_size_+, ref_ftr_->level))
return false; // warp affine
warp::getWarpMatrixAffine(
*ref_ftr_->frame->cam_, *cur_frame.cam_, ref_ftr_->px, ref_ftr_->f,
(ref_ftr_->frame->pos() - pt.pos_).norm(),
cur_frame.T_f_w_ * ref_ftr_->frame->T_f_w_.inverse(), ref_ftr_->level, A_cur_ref_);
search_level_ = warp::getBestSearchLevel(A_cur_ref_, Config::nPyrLevels()-);
// is img of ref_frame fully available at any time? that means keeping stored previous images, for how long?
warp::warpAffine(A_cur_ref_, ref_ftr_->frame->img_pyr_[ref_ftr_->level], ref_ftr_->px,
ref_ftr_->level, search_level_, halfpatch_size_+, patch_with_border_);
// patch_with_border_ stores the square patch (of pixel intensities) around the reference feature
// once the affine transformation is applied to the original reference image
// the border is necessary for gradient operations (intensities at the border must be precomputed by interpolation too!)
createPatchFromPatchWithBorder(); // px_cur should be set
Vector2d px_scaled(px_cur/(<<search_level_)); bool success = false;
PointFeat* pt_ftr = static_cast<PointFeat*>(ref_ftr_);
if(pt_ftr->type == PointFeat::EDGELET)
{
Vector2d dir_cur(A_cur_ref_*pt_ftr->grad);
dir_cur.normalize();
success = feature_alignment::align1D(
cur_frame.img_pyr_[search_level_], dir_cur.cast<float>(),
patch_with_border_, patch_, options_.align_max_iter, px_scaled, h_inv_);
}
else
{
success = feature_alignment::align2D(
cur_frame.img_pyr_[search_level_], patch_with_border_, patch_,
options_.align_max_iter, px_scaled);
}
px_cur = px_scaled * (<<search_level_);
return success;
}

特征细化

bool align2D(
const cv::Mat& cur_img,
uint8_t* ref_patch_with_border,
uint8_t* ref_patch,
const int n_iter,
Vector2d& cur_px_estimate,
bool no_simd)
{
#ifdef __ARM_NEON__
if(!no_simd)
return align2D_NEON(cur_img, ref_patch_with_border, ref_patch, n_iter, cur_px_estimate);
#endif const int patch_size_ = ;
Patch patch( patch_size_, cur_img );
bool converged=false; /* Precomputation step: derivatives in reference patch */
// compute derivative of template and prepare inverse compositional
float __attribute__((__aligned__())) ref_patch_dx[patch.area];
float __attribute__((__aligned__())) ref_patch_dy[patch.area];
Matrix3f H; H.setZero(); // compute gradient and hessian
const int ref_step = patch_size_+; // assumes ref_patch_with_border comes from a specific Mat object with certain size!!! Bad way to do it?
float* it_dx = ref_patch_dx;
float* it_dy = ref_patch_dy;
for(int y=; y<patch_size_; ++y)
{
uint8_t* it = ref_patch_with_border + (y+)*ref_step + ;
for(int x=; x<patch_size_; ++x, ++it, ++it_dx, ++it_dy)
{
Vector3f J;
J[] = 0.5 * (it[] - it[-]);
J[] = 0.5 * (it[ref_step] - it[-ref_step]);
J[] = ;
*it_dx = J[];
*it_dy = J[];
H += J*J.transpose();
}
}
Matrix3f Hinv = H.inverse();
float mean_diff = ; /* Iterative loop: residues and updates with patch in current image */
// Compute pixel location in new image:
float u = cur_px_estimate.x();
float v = cur_px_estimate.y(); // termination condition
const float min_update_squared = 0.03*0.03;
const int cur_step = cur_img.step.p[];
// float chi2 = 0;
Vector3f update; update.setZero();
for(int iter = ; iter<n_iter; ++iter)
{
// TODO very rarely this can happen, maybe H is singular? should not be at corner.. check
// if(isnan(cur_px_estimate[0]) || isnan(cur_px_estimate[1]))
// return false; // set patch position for current feature location
patch.setPosition( cur_px_estimate );
// abort the optimization if the patch does not fully lie within the image
if(!patch.isInFrame(patch.halfsize))
break;
// compute interpolation weights
patch.computeInterpWeights(); // set ROI in the current image to traverse
patch.setRoi();
// loop through search_patch, interpolate
uint8_t* it_ref = ref_patch;
float* it_ref_dx = ref_patch_dx;
float* it_ref_dy = ref_patch_dy;
uint8_t* ptr; // pointer that will point to memory locations of the ROI (same memory as for the original full cur_img)
// float new_chi2 = 0.0;
Vector3f Jres; Jres.setZero();
for(int y=; y<patch.size; ++y)
{
// get the pointer to first element in row y of the patch ROI
ptr = patch.roi.ptr(y);
for(int x=; x<patch.size; ++x, ++ptr, ++it_ref, ++it_ref_dx, ++it_ref_dy)
{
float search_pixel = patch.wTL*ptr[] + patch.wTR*ptr[] + patch.wBL*ptr[cur_step] + patch.wBR*ptr[cur_step+];
float res = search_pixel - *it_ref + mean_diff;
Jres[] -= res*(*it_ref_dx);
Jres[] -= res*(*it_ref_dy);
Jres[] -= res;
// new_chi2 += res*res;
}
} /*
if(iter > 0 && new_chi2 > chi2)
{
#if SUBPIX_VERBOSE
cout << "error increased." << endl;
#endif
u -= update[0];
v -= update[1];
break;
}
chi2 = new_chi2;
*/
update = Hinv * Jres;
u += update[];
v += update[];
cur_px_estimate = Vector2d(u,v);
mean_diff += update[]; #if SUBPIX_VERBOSE
cout << "Iter " << iter << ":"
<< "\t u=" << u << ", v=" << v
<< "\t update = " << update[] << ", " << update[]
// << "\t new chi2 = " << new_chi2 << endl;
#endif if(update[]*update[]+update[]*update[] < min_update_squared)
{
#if SUBPIX_VERBOSE
cout << "converged." << endl;
#endif
converged=true;
break;
}
} cur_px_estimate << u, v;
return converged;
}

//为当前创建新的点特征,并在此图像中使用精确(对齐)的位置坐标

  PointFeat* new_feature = new PointFeat(frame.get(), px_est, matcher_.search_level_);
frame->addFeature(new_feature);

在这里,我们将特征中的引用添加到3D点,只有在选择此帧作为关键帧时才进行另一种方式。


  if(pt_ftr != NULL)
{
if(pt_ftr->type == PointFeat::EDGELET)
{
new_feature->type = PointFeat::EDGELET;
new_feature->grad = matcher_.A_cur_ref_*pt_ftr->grad;
new_feature->grad.normalize();
}
}

尝试优化每个细分线段坐标

即使我们提前破坏,细胞的随机访问顺序也能确保特征的均匀分布(maxFts即将到达)

  for(size_t i=; i<gridls_.cells.size(); ++i)
{
if(refineBestCandidate(*gridls_.cells.at(gridls_.cell_order[i]), frame))
++n_ls_matches_;
if(n_ls_matches_ > (size_t) Config::maxFtsSegs())
break; // the random visit order of cells assures uniform distribution
// of the features even if we break early (maxFts reached soon)
}
/*for(auto it = lines_.begin(), ite = lines_.end(); it!=ite; ++it)
{
if(refine(it->ls,it->spx,it->epx,frame))
++n_ls_matches_;
if(n_ls_matches_ > (size_t) Config::maxFtsSegs())
break;
}*/
SVO_STOP_TIMER("feature_align");

特征细化结束


 
 
 

PL-SVO公式推导及代码解析:地图点重投影和特征对齐的更多相关文章

  1. PL-SVO公式推导及代码解析:位姿优化

    通过跳过极线约束单独优化图像中每个特征的位置后,必须通过最小化3D特征与图像中相应的2D特征位置之间的重投影误差来进一步细化(3)中获得的相机姿态( 见图5).为此,我们考虑在世界坐标系中3D特征和相 ...

  2. loam详细代码解析与公式推导

    loam详细代码解析与公式推导(基础理论知识) 一.基础坐标变换 loam中欧拉角解算都采用R P Y 的解算方式,即先左乘R, 再左乘P, 最后左乘Y,用矩阵表示为: R = Ry * Rp * R ...

  3. 机器学习(公式推导与代码实现)--sklearn机器学习库

    一.scikit-learn概述 1.sklearn模型   sklearn全称是scikit-learn,它是一个基于Python的机器学习类库,主要建立在NumPy.Pandas.SciPy和Ma ...

  4. VBA常用代码解析

    031 删除工作表中的空行 如果需要删除工作表中所有的空行,可以使用下面的代码. Sub DelBlankRow() DimrRow As Long DimLRow As Long Dimi As L ...

  5. [nRF51822] 12、基础实验代码解析大全 · 实验19 - PWM

    一.PWM概述: PWM(Pulse Width Modulation):脉冲宽度调制技术,通过对一系列脉冲的宽度进行调制,来等效地获得所需要波形. PWM 的几个基本概念: 1) 占空比:占空比是指 ...

  6. [nRF51822] 11、基础实验代码解析大全 · 实验16 - 内部FLASH读写

     一.实验内容: 通过串口发送单个字符到NRF51822,NRF51822 接收到字符后将其写入到FLASH 的最后一页,之后将其读出并通过串口打印出数据. 二.nRF51822芯片内部flash知识 ...

  7. [nRF51822] 10、基础实验代码解析大全 · 实验15 - RTC

    一.实验内容: 配置NRF51822 的RTC0 的TICK 频率为8Hz,COMPARE0 匹配事件触发周期为3 秒,并使能了TICK 和COMPARE0 中断. TICK 中断中驱动指示灯D1 翻 ...

  8. [nRF51822] 9、基础实验代码解析大全 · 实验12 - ADC

    一.本实验ADC 配置 分辨率:10 位. 输入通道:5,即使用输入通道AIN5 检测电位器的电压. ADC 基准电压:1.2V. 二.NRF51822 ADC 管脚分布 NRF51822 的ADC ...

  9. java集合框架之java HashMap代码解析

     java集合框架之java HashMap代码解析 文章Java集合框架综述后,具体集合类的代码,首先以既熟悉又陌生的HashMap开始. 源自http://www.codeceo.com/arti ...

随机推荐

  1. UOJ #109「APIO2013」TASKSAUTHOR

    貌似是最入门的题答题 刚好我就是入门选手 就这样吧 UOJ #109 题意 太热了不讲了 $ Solution$ 第一个点:$ 105$个数字卡掉$ Floyd$ 直接$101$个点无出边一次询问就好 ...

  2. v-model

    仅用于以下控件: <input> <select> <textarea> 组件 v-model以Vue 实例的数据作为数据来源,应当在组件的 data 选项中声明初 ...

  3. 整理一下C++语言中的头文件

    对于每一个像我一样的蒟蒻来说,C++最重要的东西就是头文件的使用了.由于初学,直到现在我打代码还是靠一些事先写好的的头文件,仍然不能做到使用自己需要的.最近看了几位大佬打代码,心中突然闪过要把自己冗长 ...

  4. Exif格式分析

    转载链接: http://blog.csdn.net/simonhehe/article/details/8593354 http://blog.csdn.net/lsiyun/article/det ...

  5. 【原创】运维基础之Zabbix(1)简介、安装、使用

    zabbix 4 官方:https://www.zabbix.com/ 一 简介 Monitor anythingSolutions for any kind of IT infrastructure ...

  6. linux软件管理

    People who cannot find time for recreation are obliged sooner or later to find time for illness.腾不出时 ...

  7. python数据类型小测试

    # 1. 有两个列表,其中一个列表a,每一项映射到另一个列表b每一项,先对a排序,要求b的中映射关系位置保持不变,给b也按照排序, (b的8对应a的[1,2], 7对应[3,4] ... ) a = ...

  8. 梯有N阶,上楼可以一步上一阶,也可以一步上二阶。编写一个程序,计算共有多少中不同的走法?

    c语言实现,小伙伴们谁要有更好的实现方法,要告诉我呦 #include int main(void) { int f,i,f1=1,f2=2; printf("请输入楼梯数"); ...

  9. linux下安装redis并开机自启动

    分享一个博客地址, 写的太好了, 满满的都是干货 ! https://www.cnblogs.com/renzhicai/p/7773080.html

  10. RxJS操作符(一)

    一.创建类操作符 创建类操作符是连接传统编程和响应式编程的强梁 from: 可以把数组.Promise.以及Iterable转化为Observable. fromEvent: 可以把事件转化为Obse ...