使用OpenCV作图像检测, Adaboost+haar决策过程,其中一部分源代码如下:

函数调用堆栈的底层为:

1、使用有序决策桩进行预测

template<class FEval>
inline int predictOrderedStump( CascadeClassifier& cascade, Ptr<FeatureEvaluator> &_featureEvaluator, double& sum )
{
int nodeOfs = 0, leafOfs = 0;
FEval& featureEvaluator = (FEval&)*_featureEvaluator;
float* cascadeLeaves = &cascade.data.leaves[0];
CascadeClassifier::Data::DTreeNode* cascadeNodes = &cascade.data.nodes[0];
CascadeClassifier::Data::Stage* cascadeStages = &cascade.data.stages[0]; //每一层进行计算,第一次训练为19层 nstages=19
//
 int nstages = (int)cascade.data.stages.size();
for( int stageIdx = 0; stageIdx < nstages; stageIdx++ )
{
CascadeClassifier::Data::Stage& stage = cascadeStages[stageIdx];
sum = 0.0; //每一层树的个数
int ntrees = stage.ntrees;
for( int i = 0; i < ntrees; i++, nodeOfs++, leafOfs+= 2 )
{
CascadeClassifier::Data::DTreeNode& node = cascadeNodes[nodeOfs]; //收集累积和//没有显示否定的特性?
double value = featureEvaluator(node.featureIdx);
sum += cascadeLeaves[ value < node.threshold ? leafOfs : leafOfs + 1 ];
} if( sum < stage.threshold )
return -stageIdx;
} return 1;
}

2.上层调用:在某个点之处进行计算

int CascadeClassifier::runAt( Ptr<FeatureEvaluator>& evaluator, Point pt, double& weight )
{
CV_Assert( oldCascade.empty() ); assert( data.featureType == FeatureEvaluator::HAAR ||
data.featureType == FeatureEvaluator::LBP ||
data.featureType == FeatureEvaluator::HOG ); if( !evaluator->setWindow(pt) )
return -1;
if( data.isStumpBased )
{
//若使用haar特征,则进行haar检测过程 wishchin
 if( data.featureType == FeatureEvaluator::HAAR )
return predictOrderedStump<HaarEvaluator>( *this, evaluator, weight );
else if( data.featureType == FeatureEvaluator::LBP )
return predictCategoricalStump<LBPEvaluator>( *this, evaluator, weight );
else if( data.featureType == FeatureEvaluator::HOG )
return predictOrderedStump<HOGEvaluator>( *this, evaluator, weight );
else
return -2;
}
else
{
if( data.featureType == FeatureEvaluator::HAAR )
return predictOrdered<HaarEvaluator>( *this, evaluator, weight );
else if( data.featureType == FeatureEvaluator::LBP )
return predictCategorical<LBPEvaluator>( *this, evaluator, weight );
else if( data.featureType == FeatureEvaluator::HOG )
return predictOrdered<HOGEvaluator>( *this, evaluator, weight );
else
return -2;
}
}

3. CascadeClassifierInvoker初始化时产生的 CascadeClassifier,

其中 每个inVoker继承于并行循环的body:例如 class CascadeClassifier : public ParallelLoopBody,完成并行计算过程

其初始化过程,完成检测。

    void operator()(const Range& range) const
{
Ptr<FeatureEvaluator> evaluator = classifier->featureEvaluator->clone(); Size winSize(cvRound(classifier->data.origWinSize.width * scalingFactor), cvRound(classifier->data.origWinSize.height * scalingFactor)); int y1 = range.start * stripSize;
int y2 = min(range.end * stripSize, processingRectSize.height);
for( int y = y1; y < y2; y += yStep )
{
for( int x = 0; x < processingRectSize.width; x += yStep )
{
if ( (!mask.empty()) && (mask.at<uchar>(Point(x,y))==0)) {
continue;
} double gypWeight; //作为起始点检测图像是否为目标!!! wishchin 2017 03 20
int result = classifier->runAt(evaluator, Point(x, y), gypWeight); #if defined (LOG_CASCADE_STATISTIC) logger.setPoint(Point(x, y), result);
#endif
if( rejectLevels )
{
if( result == 1 )
result = -(int)classifier->data.stages.size();
if( classifier->data.stages.size() + result < 4 )
{
mtx->lock();
rectangles->push_back(Rect(cvRound(x*scalingFactor), cvRound(y*scalingFactor), winSize.width, winSize.height));
rejectLevels->push_back(-result);
levelWeights->push_back(gypWeight);
mtx->unlock();
}
}
else if( result > 0 )
{
mtx->lock();
rectangles->push_back(Rect(cvRound(x*scalingFactor), cvRound(y*scalingFactor),
winSize.width, winSize.height));
mtx->unlock();
}
if( result == 0 )
x += yStep;
}
}
} CascadeClassifier* classifier;
vector<Rect>* rectangles;
Size processingRectSize;
int stripSize, yStep;
double scalingFactor;
vector<int> *rejectLevels;
vector<double> *levelWeights;
Mat mask;
Mutex* mtx;
};

4.使用多尺度计算过程,对每一层进行单层结果计算

bool CascadeClassifier::detectSingleScale( const Mat& image, int stripCount, Size processingRectSize,
int stripSize, int yStep, double factor, vector<Rect>& candidates,
vector<int>& levels, vector<double>& weights, bool outputRejectLevels )
{
if( !featureEvaluator->setImage( image, data.origWinSize ) )
return false; #if defined (LOG_CASCADE_STATISTIC)
logger.setImage(image);
#endif Mat currentMask;
if (!maskGenerator.empty()) {
currentMask=maskGenerator->generateMask(image);
} vector<Rect> candidatesVector;
vector<int> rejectLevels;
vector<double> levelWeights;
Mutex mtx;
if( outputRejectLevels )
{
parallel_for_(Range(0, stripCount), CascadeClassifierInvoker( *this, processingRectSize, stripSize, yStep, factor,
candidatesVector, rejectLevels, levelWeights, true, currentMask, &mtx));
levels.insert( levels.end(), rejectLevels.begin(), rejectLevels.end() );
weights.insert( weights.end(), levelWeights.begin(), levelWeights.end() );
}
else
{
//并行处理过程,对每一层初始化一个CascadeClassifierInvoker,完成计算
  parallel_for_(Range(0, stripCount), CascadeClassifierInvoker( *this, processingRectSize, stripSize, yStep, factor,
candidatesVector, rejectLevels, levelWeights, false, currentMask, &mtx));
}
candidates.insert( candidates.end(), candidatesVector.begin(), candidatesVector.end() ); #if defined (LOG_CASCADE_STATISTIC)
logger.write();
#endif return true;
}

5. 进行多尺度检测

void CascadeClassifier::detectMultiScale( const Mat& image, vector<Rect>& objects,
vector<int>& rejectLevels,
vector<double>& levelWeights,
double scaleFactor, int minNeighbors,
int flags, Size minObjectSize, Size maxObjectSize,
bool outputRejectLevels )
{
const double GROUP_EPS = 0.2; CV_Assert( scaleFactor > 1 && image.depth() == CV_8U ); if( empty() )
return; if( isOldFormatCascade() )
{
MemStorage storage(cvCreateMemStorage(0));
CvMat _image = image;
CvSeq* _objects = cvHaarDetectObjectsForROC( &_image, oldCascade, storage, rejectLevels, levelWeights, scaleFactor,
minNeighbors, flags, minObjectSize, maxObjectSize, outputRejectLevels );
vector<CvAvgComp> vecAvgComp;
Seq<CvAvgComp>(_objects).copyTo(vecAvgComp);
objects.resize(vecAvgComp.size());
std::transform(vecAvgComp.begin(), vecAvgComp.end(), objects.begin(), getRect());
return;
} objects.clear(); if (!maskGenerator.empty()) {
maskGenerator->initializeMask(image);
} if( maxObjectSize.height == 0 || maxObjectSize.width == 0 )
maxObjectSize = image.size(); Mat grayImage = image;
if( grayImage.channels() > 1 )
{
Mat temp;
cvtColor(grayImage, temp, CV_BGR2GRAY);
grayImage = temp;
} Mat imageBuffer(image.rows + 1, image.cols + 1, CV_8U);
vector<Rect> candidates; for( double factor = 1; ; factor *= scaleFactor )
{
Size originalWindowSize = getOriginalWindowSize(); Size windowSize( cvRound(originalWindowSize.width*factor), cvRound(originalWindowSize.height*factor) );
Size scaledImageSize( cvRound( grayImage.cols/factor ), cvRound( grayImage.rows/factor ) );
Size processingRectSize( scaledImageSize.width - originalWindowSize.width, scaledImageSize.height - originalWindowSize.height ); if( processingRectSize.width <= 0 || processingRectSize.height <= 0 )
break;
if( windowSize.width > maxObjectSize.width || windowSize.height > maxObjectSize.height )
break;
if( windowSize.width < minObjectSize.width || windowSize.height < minObjectSize.height )
continue; Mat scaledImage( scaledImageSize, CV_8U, imageBuffer.data );
resize( grayImage, scaledImage, scaledImageSize, 0, 0, CV_INTER_LINEAR ); int yStep;
if( getFeatureType() == cv::FeatureEvaluator::HOG )
{
yStep = 4;
}
else
{
yStep = factor > 2. ? 1 : 2;
} int stripCount, stripSize; const int PTS_PER_THREAD = 1000;
stripCount = ((processingRectSize.width/yStep)*(processingRectSize.height + yStep-1)/yStep + PTS_PER_THREAD/2)/PTS_PER_THREAD;
stripCount = std::min(std::max(stripCount, 1), 100);
stripSize = (((processingRectSize.height + stripCount - 1)/stripCount + yStep-1)/yStep)*yStep; //对每一个尺度进行目标检测 wishchin 2017 03 21
if( !detectSingleScale( scaledImage, stripCount, processingRectSize, stripSize, yStep, factor, candidates,
rejectLevels, levelWeights, outputRejectLevels ) )
break;
} objects.resize(candidates.size());
std::copy(candidates.begin(), candidates.end(), objects.begin()); if( outputRejectLevels )
{
groupRectangles( objects, rejectLevels, levelWeights, minNeighbors, GROUP_EPS );
}
else
{
groupRectangles( objects, minNeighbors, GROUP_EPS );
}
}

以上为objectDetect过程的OpenCV的源代码,外层调用的使用函数接口可以为:

    // 人眼检测
m_cascade.detectMultiScale(
smallImg,
eyes,
fAdaBoostScale, // originally 1.1, 4 is faster
2, //minNeighbors
//|CV_HAAR_FIND_BIGGEST_OBJECT
//|CV_HAAR_DO_ROUGH_SEARCH
CV_HAAR_DO_CANNY_PRUNING,
Size(48, 32)
);
//cout << "eyes size=:" << eyes.size() << endl;

总结:

上述过程即是Haar+Adaboost检测计算大致的函数调用堆栈。

OpenCV:OpenCV目标检测Adaboost+haar源代码分析的更多相关文章

  1. OpenCV:OpenCV目标检测Hog+SWindow源代码分析

    参考文章:OpenCV中的HOG+SVM物体分类 此文主要描述出HOG分类的调用堆栈. 使用OpenCV作图像检测, 使用HOG检测过程,其中一部分源代码如下: 1.HOG 检测底层栈的检测计算代码: ...

  2. OpenCV亚像素角点cornerSubPixel()源代码分析

    上一篇博客中讲到了goodFeatureToTrack()这个API函数能够获取图像中的强角点.但是获取的角点坐标是整数,但是通常情况下,角点的真实位置并不一定在整数像素位置,因此为了获取更为精确的角 ...

  3. 10分钟学会使用YOLO及Opencv实现目标检测(下)|附源码

    将YOLO应用于视频流对象检测 首先打开 yolo_video.py文件并插入以下代码: # import the necessary packages import numpy as np impo ...

  4. OpenCV两种畸变校正模型源代码分析以及CUDA实现

    图像算法中会经常用到摄像机的畸变校正,有必要总结分析OpenCV中畸变校正方法,其中包括普通针孔相机模型和鱼眼相机模型fisheye两种畸变校正方法. 普通相机模型畸变校正函数针对OpenCV中的cv ...

  5. 运动目标前景检测之ViBe源代码分析

    一方面为了学习,一方面按照老师和项目的要求接触到了前景提取的相关知识,具体的方法有很多,帧差.背景减除(GMM.CodeBook. SOBS. SACON. VIBE. W4.多帧平均……).光流(稀 ...

  6. 深度学习 + OpenCV,Python实现实时视频目标检测

    使用 OpenCV 和 Python 对实时视频流进行深度学习目标检测是非常简单的,我们只需要组合一些合适的代码,接入实时视频,随后加入原有的目标检测功能. 在本文中我们将学习如何扩展原有的目标检测项 ...

  7. 如何用OpenCV自带的adaboost程序训练并检测目标

    参考博文: 1.http://blog.csdn.net/wuxiaoyao12/article/details/39227189 2.http://www.cnblogs.com/easymind2 ...

  8. OpenCV:OpenCV目标检测Boost方法训练

    在古老的CNN方法出现以后,并不能适用于图像中目标检测.20世纪60年代,Hubel和Wiesel( 百度百科 )在研究猫脑皮层中用于局部敏感和方向选择的神经元时发现其独特的网络结构可以有效地降低反馈 ...

  9. OpenCV 学习笔记 07 目标检测与识别

    目标检测与识别是计算机视觉中最常见的挑战之一.属于高级主题. 本章节将扩展目标检测的概念,首先探讨人脸识别技术,然后将该技术应用到显示生活中的各种目标检测. 1 目标检测与识别技术 为了与OpenCV ...

随机推荐

  1. ActiveMQ学习总结(5)——Java消息服务JMS详解

    JMS: Java消息服务(Java Message Service) JMS是用于访问企业消息系统的开发商中立的API.企业消息系统可以协助应用软件通过网络进行消息交互. JMS的编程过程很简单,概 ...

  2. Drop all tables in MySQL database

    Drop all tables in MySQL database Answer: MySQL does not have a command for removing all database ta ...

  3. JAVA大数据项目+整理的Mysql数据库32条军规

    http://www.jianshu.com/users/a9b2d43bb94e/latest_articles

  4. cppunit 的使用

    原文: http://blog.csdn.net/abcdef0966/article/details/5699248

  5. getAttribute for IE7

    getAttribute 大部分介绍都说仅仅有一个.包含w3cschool. 事实上这种方法在iE7下有两个參数. msdn 上查到的. 简单翻一下 0 是默认情况,不区分大写和小写! 1 区分大写和 ...

  6. Sahara中的数据模型

    声明: 本博客欢迎转载.但请保留原作者信息,并请注明出处! 作者:郭德清 团队:华为杭州OpenStack团队 本文主要是介绍下Sahara中一些常见的数据模型. 1.Config 用于描写叙述配置信 ...

  7. PHP发展的现状和前景

    本人小菜鸟一仅仅,为了自我学习和交流PHP(jquery,linux,lamp,shell,javascript,server)等一系列的知识.小菜鸟创建了一个群. 希望光临本博客的人能够进来交流.寻 ...

  8. Codesys——常用快捷键列表

    F1——打开Help文档: F2——打开Input Assistant: F5——执行程序(Start): F9——添加或取消断点(Toggle Breakpoint): F8——单步进入(Step ...

  9. 在MTK平台里,,函数kal_prompt_trace起什么作用???Kal_prompt_trace的参数有表示什么?

    在MTK平台里,,函数kal_prompt_trace起什么作用???Kal_prompt_trace的参数有表示什么?一直弄不明白,但是很多函数的开头就是这个函数,,而且一般有三个参数-- kal_ ...

  10. Django基础必备三件套: HttpResponse render redirect

    1. HttpResponse :  它的作用是内部传入一个字符串参数, 然后发给浏览器 def index(request): return HttpResponse('ok') 2. render ...