OpenCV Template Matching Subpixel Accuracy
OpenCV has function matchTemplate to easily do the template matching. But its accuracy can only reach pixel level, to achieve subpixel accuracy, need to use other find to refine the result.
Here i to use cv::findTransformECC. Ecc means Enhanced Correlation Coefficient. In this function, it use Guassian Newton iteration to find the maximum correlation coefficient.
int _refineSrchTemplate(const cv::Mat &mat, cv::Mat &matTmpl, cv::Point2f &ptResult)
{
cv::Mat matWarp = cv::Mat::eye(, , CV_32FC1);
matWarp.at<float>(,) = ptResult.x;
matWarp.at<float>(,) = ptResult.y;int number_of_iterations = ;
double termination_eps = 1e-; cv::findTransformECC ( matTmpl, mat, matWarp, MOTION_TRANSLATION, TermCriteria (TermCriteria::COUNT+TermCriteria::EPS,
number_of_iterations, termination_eps));
ptResult.x = matWarp.at<float>(,);
ptResult.y = matWarp.at<float>(,);
return ;
} int matchTemplate(const cv::Mat &mat, cv::Mat &matTmpl, cv::Point2f &ptResult)
{
cv::Mat img_display, matResult;
const int match_method = CV_TM_SQDIFF; mat.copyTo(img_display); /// Create the result matrix
int result_cols = mat.cols - matTmpl.cols + ;
int result_rows = mat.rows - matTmpl.rows + ; matResult.create(result_rows, result_cols, CV_32FC1); /// Do the Matching and Normalize
cv::matchTemplate(mat, matTmpl, matResult, match_method);
cv::normalize ( matResult, matResult, , , cv::NORM_MINMAX, -, cv::Mat() ); /// Localizing the best match with minMaxLoc
double minVal; double maxVal;
cv::Point minLoc, maxLoc, matchLoc; cv::minMaxLoc(matResult, &minVal, &maxVal, &minLoc, &maxLoc, cv::Mat()); /// For SQDIFF and SQDIFF_NORMED, the best matches are lower values. For all the other methods, the higher the better
if (match_method == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED)
matchLoc = minLoc;
else
matchLoc = maxLoc; ptResult.x = (float)matchLoc.x;
ptResult.y = (float)matchLoc.y;
_refineSrchTemplate ( mat, matTmpl, ptResult ); ptResult.x += (float)( matTmpl.cols / + 0.5 ); // +0.5 is the center of the template is between 2 pixels. For example, if template size is 20, the center of the image is 10.5.
ptResult.y += (float)( matTmpl.rows / + 0.5 ); //The refine returned result is the left upper corner cooridnate.
return ;
}
There is also another way to refine the template matching result. It is by minimizing the difference between template and search image. In this method i use Levenberg–Marquardt method to iterate. It has been introduced in detail in paper http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf. And pseudo code has been given in page 15. I implemented in C++ based on OpenCv. The source code is as below.
void filter2D_Conv(InputArray src, OutputArray dst, int ddepth,
InputArray kernel, Point anchor = Point(-,-),
double delta = , int borderType = BORDER_DEFAULT )
{
cv::Mat newKernel;
const int FLIP_H_Z = -;
cv::flip ( kernel, newKernel, FLIP_H_Z );
cv::Point newAnchor = anchor;
if ( anchor.x > && anchor.y >= )
newAnchor = cv::Point ( newKernel.cols - anchor.x - , newKernel.rows - anchor.y - );
cv::filter2D ( src, dst, ddepth, newKernel, newAnchor, delta, borderType );
}
float GuassianValue2D(float ssq, float x, float y )
{
return exp( -(x*x + y*y) / ( 2.0 *ssq ) ) / ( 2.0 * CV_PI * ssq );
} template<typename _tp>
void meshgrid ( float xStart, float xInterval, float xEnd, float yStart, float yInterval, float yEnd, cv::Mat &matX, cv::Mat &matY )
{
std::vector<_tp> vectorX, vectorY;
_tp xValue = xStart;
while ( xValue <= xEnd ) {
vectorX.push_back(xValue);
xValue += xInterval;
} _tp yValue = yStart;
while ( yValue <= yEnd ) {
vectorY.push_back(yValue);
yValue += yInterval;
}
cv::Mat matCol ( vectorX );
matCol = matCol.reshape ( , ); cv::Mat matRow ( vectorY );
matRow = matRow.reshape ( , vectorY.size() );
matX = cv::repeat ( matCol, vectorY.size(), );
matY = cv::repeat ( matRow, , vectorX.size() );
} int _refineWithLMIteration( const cv::Mat &mat, cv::Mat &matTmpl, cv::Point2f &ptResult )
{
cv::Mat matGuassian;
int width = ;
float ssq = .;
matGuassian.create(width * + , width * + , CV_32FC1 );
cv::Mat matI, matT;
mat.convertTo ( matI, CV_32FC1);
matTmpl.convertTo ( matT, CV_32FC1 ); cv::Mat matX, matY;
meshgrid<float> ( -width, , width, -width, , width, matX, matY );
for ( int row = ; row < matX.rows; ++ row )
for ( int col = ; col < matX.cols; ++ col )
{
matGuassian.at<float>(row, col) = GuassianValue2D( ssq, matX.at<float>(row, col), matY.at<float>(row, col) );
}
matGuassian = matGuassian.mul(-matX);
cv::Mat matTmp( matGuassian, Range::all(), cv::Range(,));
float fSum = cv::sum(matTmp)[];
cv::Mat matGuassianKernalX, matGuassianKernalY;
matGuassianKernalX = matGuassian / fSum; //XSG question, the kernel is reversed?
cv::transpose( matGuassianKernalX, matGuassianKernalY ); /**************** Using LM Iteration ****************/
int N = , v = ;
cv::Mat matD;
matD.create( ,, CV_32FC1 );
matD.at<float>(, ) = ptResult.x;
matD.at<float>(, ) = ptResult.y; cv::Mat matDr = matD.clone(); cv::Mat matInputNew; auto interp2 = [matI, matT](cv::Mat &matOutput, const cv::Mat &matD) {
cv::Mat map_x, map_y;
map_x.create(matT.size(), CV_32FC1);
map_y.create(matT.size(), CV_32FC1);
cv::Point2f ptStart(matD.at<float>(, ), matD.at<float>(, ) );
for (int row = ; row < matT.rows; ++ row )
for (int col = ; col < matT.cols; ++ col )
{
map_x.at<float>(row, col) = ptStart.x + col;
map_y.at<float>(row, col) = ptStart.y + row;
}
cv::remap ( matI, matOutput, map_x, map_y, cv::INTER_LINEAR );
}; interp2 ( matInputNew, matD ); cv::Mat matR = matT - matInputNew;
cv::Mat matRn = matR.clone();
float fRSum = cv::sum ( matR.mul ( matR ) )[];
float fRSumN = fRSum; cv::Mat matDerivativeX, matDerivativeY;
filter2D_Conv ( matInputNew, matDerivativeX, CV_32F, matGuassianKernalX, cv::Point(-, - ), 0.0, BORDER_REPLICATE );
filter2D_Conv ( matInputNew, matDerivativeY, CV_32F, matGuassianKernalY, cv::Point(-, - ), 0.0, BORDER_REPLICATE ); cv::Mat matRt = matR.reshape ( , );
cv::Mat matRtTranspose;
cv::transpose ( matRt, matRtTranspose );
matDerivativeX = matDerivativeX.reshape ( , );
matDerivativeY = matDerivativeY.reshape ( , ); const float* p = matDerivativeX.ptr<float>();
std::vector<float> vecDerivativeX(p, p + matDerivativeX.cols); cv::Mat matJacobianT, matJacobian;
matJacobianT.push_back ( matDerivativeX );
matJacobianT.push_back ( matDerivativeY );
cv::transpose ( matJacobianT, matJacobian ); cv::Mat matE = cv::Mat::eye(, , CV_32FC1); cv::Mat A = matJacobianT * matJacobian;
cv::Mat g = - matJacobianT * matRtTranspose; double min, max;
cv::minMaxLoc(A, &min, &max);
float mu = .f * max;
float err1 = 1e-, err2 = 1e-;
auto Nmax = ;
while ( cv::norm ( matDr ) > err2 && N < Nmax ) {
++ N;
cv::solve ( A + mu * matE, -g, matDr ); // equal to matlab matDr = (A+mu*E)\(-g); cv::Mat matDn = matD + matDr;
if ( cv::norm ( matDr ) < err2 ) {
interp2 ( matInputNew, matDn );
matRn = matT - matInputNew;
fRSumN = cv::sum ( matR.mul ( matR ) )[];
matD = matDn;
break;
}else {
if (matDn.at<float> ( , ) > matI.cols - matT.cols ||
matDn.at<float> ( , ) < ||
matDn.at<float> ( , ) > matI.rows - matT.rows ||
matDn.at<float> ( , ) < ) {
mu *= v;
v *= ;
}else {
interp2 ( matInputNew, matDn );
matRn = matT - matInputNew;
fRSumN = cv::sum ( matRn.mul ( matRn ) )[]; cv::Mat matDrTranspose;
cv::transpose ( matDr, matDrTranspose );
cv::Mat matL = ( matDrTranspose * ( mu * matDr - g ) ); // L(0) - L(hlm) = 0.5 * h' ( uh - g)
auto L = matL.at<float>(, );
auto F = fRSum - fRSumN;
float rho = F / L; if ( rho > ) {
matD = matDn.clone();
matR = matRn.clone();
fRSum = fRSumN; filter2D_Conv ( matInputNew, matDerivativeX, CV_32F, matGuassianKernalX, cv::Point(-, - ), 0.0, BORDER_REPLICATE );
filter2D_Conv ( matInputNew, matDerivativeY, CV_32F, matGuassianKernalY, cv::Point(-, - ), 0.0, BORDER_REPLICATE );
matRt = matR.reshape(, );
cv::transpose ( matRt, matRtTranspose ); matDerivativeX = matDerivativeX.reshape(, );
matDerivativeY = matDerivativeY.reshape(, ); matJacobianT.release();
matJacobianT.push_back(matDerivativeX);
matJacobianT.push_back(matDerivativeY);
cv::transpose(matJacobianT, matJacobian); A = matJacobianT * matJacobian;
g = - matJacobianT * matRtTranspose; mu *= max ( .f/.f, - pow ( * rho-, ) );
}else {
mu *= v; v *= ;
}
}
}
} ptResult.x = matD.at<float>(, );
ptResult.y = matD.at<float>(, );
return ;
} int matchTemplate(const cv::Mat &mat, cv::Mat &matTmpl, cv::Point2f &ptResult)
{
cv::Mat img_display, matResult;
const int match_method = CV_TM_SQDIFF; mat.copyTo(img_display); /// Create the result matrix
int result_cols = mat.cols - matTmpl.cols + ;
int result_rows = mat.rows - matTmpl.rows + ; matResult.create(result_rows, result_cols, CV_32FC1); /// Do the Matching and Normalize
cv::matchTemplate(mat, matTmpl, matResult, match_method);
cv::normalize ( matResult, matResult, , , cv::NORM_MINMAX, -, cv::Mat() ); /// Localizing the best match with minMaxLoc
double minVal; double maxVal;
cv::Point minLoc, maxLoc, matchLoc; cv::minMaxLoc(matResult, &minVal, &maxVal, &minLoc, &maxLoc, cv::Mat()); /// For SQDIFF and SQDIFF_NORMED, the best matches are lower values. For all the other methods, the higher the better
if (match_method == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED)
matchLoc = minLoc;
else
matchLoc = maxLoc; ptResult.x = (float)matchLoc.x;
ptResult.y = (float)matchLoc.y;
_refineWithLMIteration(mat, matTmpl, ptResult); ptResult.x += (float)( matTmpl.cols / 2 + 0.5 );
ptResult.y += (float)( matTmpl.rows / 2 + 0.5 ); return ;
}
OpenCV Template Matching Subpixel Accuracy的更多相关文章
- OpenCV stereo matching BM 算法
一直找不到opencv stereo matching的根据和原理出处,下面这个文章贴了个链接,有时间看看: Basically OpenCV provides 2 methods to calcul ...
- OpenCV stereo matching 代码 matlab实现视差显示
转载请注明出处:http://blog.csdn.net/wangyaninglm/article/details/44151213, 来自:shiter编写程序的艺术 基础知识 计算机视觉是一门研究 ...
- [OpenCV] Feature Matching
得到了杂乱无章的特征点后,要筛选出好的特征点,也就是good matches. BruteForceMatcher FlannBasedMatcher 两者的区别:http://yangshen998 ...
- [ICRA 2019]Multi-Task Template Matching for Object Detection, Segmentation and Pose Estimation Using Depth Images
简介 本文作者提出新的框架(MTTM),使用模板匹配来完成多个任务,从深度图的模板上找到目标物体,通过比较模板特征图与场景特征图来预测分割mask和模板与检测物体之间的位姿变换.作者提 ...
- Get Intensity along a line based on OpenCV
The interpolate function is used to get intensity of a point which is not on exactly a pixel. The co ...
- Opencv 摄像头矫正
摄像机有6个外参数(3个旋转,3个平移),5个内参数(fx,fy,cx,cy,θ),摄像机的内参数在不同的视场,分辨率中是一样的,但是不同的视角下6个外参数是变化的,一个平面物体可以固定8个参数,(为 ...
- OpenCV 编程简单介绍(矩阵/图像/视频的基本读写操作)
PS. 因为csdn博客文章长度有限制,本文有部分内容被截掉了.在OpenCV中文站点的wiki上有可读性更好.而且是完整的版本号,欢迎浏览. OpenCV Wiki :<OpenCV 编程简单 ...
- Opencv——相机标定
相机标定的目的:获取摄像机的内参和外参矩阵(同时也会得到每一幅标定图像的选择和平移矩阵),内参和外参系数可以对之后相机拍摄的图像就进行矫正,得到畸变相对很小的图像. 相机标定的输入:标定图像上所有内角 ...
- [OpenCV-Python] OpenCV 中的图像处理 部分 IV (六)
部分 IVOpenCV 中的图像处理 OpenCV-Python 中文教程(搬运)目录 23 图像变换 23.1 傅里叶变换目标本小节我们将要学习: • 使用 OpenCV 对图像进行傅里叶变换 • ...
随机推荐
- Linux常用的安全工具 转自https://yq.aliyun.com/articles/52540?spm=5176.100239.blogcont24250.8.CfBYE9
摘要: 原创作品,允许转载,转载时请务必以超链接形式标明文章 原始出处 .作者信息和本声明.否则将追究法律责任.http://chenguang.blog.51cto.com/350944/85790 ...
- 如何判断exe或dll的目标平台及是否是.NET?
1. COFF文件头中偏移0处的Machine指示目标机器类型(IMAGE_FILE_MACHINE_AMD64等),偏移18处的Characteristics位指示文件属性(IMAGE_FILE_3 ...
- C#实现略缩图
public class GenerateThumbnail { private Image imageFrom; /// <summary> /// 源图的路径(含文件名及扩展名 /// ...
- Microsoft Visual Studio 2012注册密钥
Microsoft Visual Studio Ultimate 2012 旗舰版 有效注册密钥:YKCW6-BPFPF-BT8C9-7DCTH-QXGWCMicrosoft Visual Studi ...
- asp.net下出现其中的组件“访问被拒绝”的解决方法
一.一般情况下,对该组件重新授权即可.附上ASP,NETWORK SERVICE用户的可修改权限. 二.其中最常见的原因是Indexing service服务引起的.解决方法就是停用Indexing ...
- CSS3常用30种选择器总结
CSS3常用30种选择器总结 HTML5/CSS3时尚的圆盘时钟动画 带当前日期 www.html5tricks.com/demo/html5-css3-clock-with-date/index.h ...
- javascript type操作
var a; type a;//结果为undefined type b;//结果也是undefinedalert(a);//弹出undefinedalert(b);//错误代码 上述代码,type ...
- 获取Finacial dimension value的description 值
public static Description findDimensionValueDescription(DimensionValue _dimensionValue, Name _dimens ...
- C++设计模式-Proxy代理模式
Proxy代理模式 作用:为其他对象提供一种代理以控制对这个对象的访问. 代理的种类: 如果按照使用目的来划分,代理有以下几种: 远程(Remote)代理:为一个位于不同的地址空间的对象提供一个局域代 ...
- XShell 无法匹配的outgoing encryption算法 ,No matching outgoing encryption algorithm found
在链接的属性(SSH -> 安全性) 的加密算法列表中选择 aes256-ctr, mac加密列表中选择hmac-sha2-256,保存即可 To enable hmac-sha2-256 an ...