OpenCV3 SVM ANN Adaboost KNN 随机森林等机器学习方法对OCR分类
转摘自http://www.cnblogs.com/denny402/p/5032839.html
opencv3中的ml类与opencv2中发生了变化,下面列举opencv3的机器学习类方法实例:
用途是opencv自带的ocr样本的分类功能,其中神经网络和adaboost训练速度很慢,效果还是knn的最好;
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
using namespace cv::ml; // 读取文件数据
bool read_num_class_data(const string& filename, int var_count, Mat* _data, Mat* _responses)
{
const int M = ;
char buf[M + ]; Mat el_ptr(, var_count, CV_32F);
int i;
vector<int> responses; _data->release();
_responses->release();
FILE *f;
fopen_s(&f, filename.c_str(), "rt");
if (!f)
{
cout << "Could not read the database " << filename << endl;
return false;
} for (;;)
{
char* ptr;
if (!fgets(buf, M, f) || !strchr(buf, ','))
break;
responses.push_back((int)buf[]);
ptr = buf + ;
for (i = ; i < var_count; i++)
{
int n = ;
sscanf_s(ptr, "%f%n", &el_ptr.at<float>(i), &n);
ptr += n + ;
}
if (i < var_count)
break;
_data->push_back(el_ptr);
}
fclose(f);
Mat(responses).copyTo(*_responses);
return true;
} //准备训练数据
Ptr<TrainData> prepare_train_data(const Mat& data, const Mat& responses, int ntrain_samples)
{
Mat sample_idx = Mat::zeros(, data.rows, CV_8U);
Mat train_samples = sample_idx.colRange(, ntrain_samples);
train_samples.setTo(Scalar::all()); int nvars = data.cols;
Mat var_type(nvars + , , CV_8U);
var_type.setTo(Scalar::all(VAR_ORDERED));
var_type.at<uchar>(nvars) = VAR_CATEGORICAL; return TrainData::create(data, ROW_SAMPLE, responses,
noArray(), sample_idx, noArray(), var_type);
} //设置迭代条件
inline TermCriteria TC(int iters, double eps)
{
return TermCriteria(TermCriteria::MAX_ITER + (eps > ? TermCriteria::EPS : ), iters, eps);
} //分类预测
void test_and_save_classifier(const Ptr<StatModel>& model, const Mat& data, const Mat& responses,
int ntrain_samples, int rdelta)
{
int i, nsamples_all = data.rows;
double train_hr = , test_hr = ; // compute prediction error on train and test data
for (i = ; i < nsamples_all; i++)
{
Mat sample = data.row(i); float r = model->predict(sample);
r = std::abs(r + rdelta - responses.at<int>(i)) <= FLT_EPSILON ? .f : .f; if (i < ntrain_samples)
train_hr += r;
else
test_hr += r;
} test_hr /= nsamples_all - ntrain_samples;
train_hr = ntrain_samples > ? train_hr / ntrain_samples : .; printf("Recognition rate: train = %.1f%%, test = %.1f%%\n",
train_hr*., test_hr*.);
} //随机树分类
bool build_rtrees_classifier(const string& data_filename)
{
Mat data;
Mat responses;
read_num_class_data(data_filename, , &data, &responses); int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8); Ptr<RTrees> model;
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
model = RTrees::create();
model->setMaxDepth();
model->setMinSampleCount();
model->setRegressionAccuracy();
model->setUseSurrogates(false);
model->setMaxCategories();
model->setPriors(Mat());
model->setCalculateVarImportance(true);
model->setActiveVarCount();
model->setTermCriteria(TC(, 0.01f));
model->train(tdata);
test_and_save_classifier(model, data, responses, ntrain_samples, );
cout << "Number of trees: " << model->getRoots().size() << endl; // Print variable importance
Mat var_importance = model->getVarImportance();
if (!var_importance.empty())
{
double rt_imp_sum = sum(var_importance)[];
printf("var#\timportance (in %%):\n");
int i, n = (int)var_importance.total();
for (i = ; i < n; i++)
printf("%-2d\t%-4.1f\n", i, .f*var_importance.at<float>(i) / rt_imp_sum);
} return true;
} //adaboost分类
bool build_boost_classifier(const string& data_filename)
{
const int class_count = ;
Mat data;
Mat responses;
Mat weak_responses; read_num_class_data(data_filename, , &data, &responses);
int i, j, k;
Ptr<Boost> model; int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.5);
int var_count = data.cols; Mat new_data(ntrain_samples*class_count, var_count + , CV_32F);
Mat new_responses(ntrain_samples*class_count, , CV_32S); for (i = ; i < ntrain_samples; i++)
{
const float* data_row = data.ptr<float>(i);
for (j = ; j < class_count; j++)
{
float* new_data_row = (float*)new_data.ptr<float>(i*class_count + j);
memcpy(new_data_row, data_row, var_count * sizeof(data_row[]));
new_data_row[var_count] = (float)j;
new_responses.at<int>(i*class_count + j) = responses.at<int>(i) == j + 'A';
}
} Mat var_type(, var_count + , CV_8U);
var_type.setTo(Scalar::all(VAR_ORDERED));
var_type.at<uchar>(var_count) = var_type.at<uchar>(var_count + ) = VAR_CATEGORICAL; Ptr<TrainData> tdata = TrainData::create(new_data, ROW_SAMPLE, new_responses,
noArray(), noArray(), noArray(), var_type);
vector<double> priors();
priors[] = ;
priors[] = ; model = Boost::create();
model->setBoostType(Boost::GENTLE);
model->setWeakCount();
model->setWeightTrimRate(0.95);
model->setMaxDepth();
model->setUseSurrogates(false);
model->setPriors(Mat(priors));
model->train(tdata);
Mat temp_sample(, var_count + , CV_32F);
float* tptr = temp_sample.ptr<float>(); // compute prediction error on train and test data
double train_hr = , test_hr = ;
for (i = ; i < nsamples_all; i++)
{
int best_class = ;
double max_sum = -DBL_MAX;
const float* ptr = data.ptr<float>(i);
for (k = ; k < var_count; k++)
tptr[k] = ptr[k]; for (j = ; j < class_count; j++)
{
tptr[var_count] = (float)j;
float s = model->predict(temp_sample, noArray(), StatModel::RAW_OUTPUT);
if (max_sum < s)
{
max_sum = s;
best_class = j + 'A';
}
} double r = std::abs(best_class - responses.at<int>(i)) < FLT_EPSILON ? : ;
if (i < ntrain_samples)
train_hr += r;
else
test_hr += r;
} test_hr /= nsamples_all - ntrain_samples;
train_hr = ntrain_samples > ? train_hr / ntrain_samples : .;
printf("Recognition rate: train = %.1f%%, test = %.1f%%\n",
train_hr*., test_hr*.); cout << "Number of trees: " << model->getRoots().size() << endl;
return true;
} //多层感知机分类(ANN)
bool build_mlp_classifier(const string& data_filename)
{
const int class_count = ;
Mat data;
Mat responses; read_num_class_data(data_filename, , &data, &responses);
Ptr<ANN_MLP> model; int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8);
Mat train_data = data.rowRange(, ntrain_samples);
Mat train_responses = Mat::zeros(ntrain_samples, class_count, CV_32F); // 1. unroll the responses
cout << "Unrolling the responses...\n";
for (int i = ; i < ntrain_samples; i++)
{
int cls_label = responses.at<int>(i) - 'A';
train_responses.at<float>(i, cls_label) = .f;
} // 2. train classifier
int layer_sz[] = { data.cols, , , class_count };
int nlayers = (int)(sizeof(layer_sz) / sizeof(layer_sz[]));
Mat layer_sizes(, nlayers, CV_32S, layer_sz); #if 1
int method = ANN_MLP::BACKPROP;
double method_param = 0.001;
int max_iter = ;
#else
int method = ANN_MLP::RPROP;
double method_param = 0.1;
int max_iter = ;
#endif Ptr<TrainData> tdata = TrainData::create(train_data, ROW_SAMPLE, train_responses);
model = ANN_MLP::create();
model->setLayerSizes(layer_sizes);
model->setActivationFunction(ANN_MLP::SIGMOID_SYM, , );
model->setTermCriteria(TC(max_iter, ));
model->setTrainMethod(method, method_param);
model->train(tdata);
return true;
} //K最近邻分类
bool build_knearest_classifier(const string& data_filename, int K)
{
Mat data;
Mat responses;
read_num_class_data(data_filename, , &data, &responses);
int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8); Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
Ptr<KNearest> model = KNearest::create();
model->setDefaultK(K);
model->setIsClassifier(true);
model->train(tdata); test_and_save_classifier(model, data, responses, ntrain_samples, );
return true;
} //贝叶斯分类
bool build_nbayes_classifier(const string& data_filename)
{
Mat data;
Mat responses;
read_num_class_data(data_filename, , &data, &responses); int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8); Ptr<NormalBayesClassifier> model;
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
model = NormalBayesClassifier::create();
model->train(tdata); test_and_save_classifier(model, data, responses, ntrain_samples, );
return true;
} //svm分类
bool build_svm_classifier(const string& data_filename)
{
Mat data;
Mat responses;
read_num_class_data(data_filename, , &data, &responses); int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8); Ptr<SVM> model;
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
model = SVM::create();
model->setType(SVM::C_SVC);
model->setKernel(SVM::LINEAR);
model->setC();
model->train(tdata); test_and_save_classifier(model, data, responses, ntrain_samples, );
return true;
} int main()
{
string data_filename = "D:\\Program Files\\opencv\\sources\\samples\\data\\letter-recognition.data"; //字母数据 cout << "svm分类:" << endl;
build_svm_classifier(data_filename); cout << "贝叶斯分类:" << endl;
build_nbayes_classifier(data_filename); cout << "K最近邻分类:" << endl;
build_knearest_classifier(data_filename, ); cout << "随机树分类:" << endl;
build_rtrees_classifier(data_filename); cout << "adaboost分类:" << endl;
build_boost_classifier(data_filename); cout << "ANN(多层感知机)分类:" << endl;
build_mlp_classifier(data_filename); system("pause");
return ;
}
OpenCV3 SVM ANN Adaboost KNN 随机森林等机器学习方法对OCR分类的更多相关文章
- paper 130:MatLab分类器大全(svm,knn,随机森林等)
train_data是训练特征数据, train_label是分类标签.Predict_label是预测的标签.MatLab训练数据, 得到语义标签向量 Scores(概率输出).1.逻辑回归(多项式 ...
- 美团店铺评价语言处理以及分类(tfidf,SVM,决策树,随机森林,Knn,ensemble)
第一篇 数据清洗与分析部分 第二篇 可视化部分, 第三篇 朴素贝叶斯文本分类 支持向量机分类 支持向量机 网格搜索 临近法 决策树 随机森林 bagging方法 import pandas as pd ...
- MatLab2012b/MatLab2013b 分类器大全(svm,knn,随机森林等)
train_data是训练特征数据, train_label是分类标签.Predict_label是预测的标签.MatLab训练数据, 得到语义标签向量 Scores(概率输出). 1.逻辑回归(多项 ...
- Adaboost和随机森林
在集成学习中,主要分为bagging算法和boosting算法.随机森林属于集成学习(Ensemble Learning)中的bagging算法. Bagging和Boosting的概念与区别该部分主 ...
- 机器学习第5周--炼数成金-----决策树,组合提升算法,bagging和adaboost,随机森林。
决策树decision tree 什么是决策树输入:学习集输出:分类觃则(决策树) 决策树算法概述 70年代后期至80年代初期,Quinlan开发了ID3算法(迭代的二分器)Quinlan改迚了ID3 ...
- [Python] 波士顿房价的7种模型(线性拟合、二次多项式、Ridge、Lasso、SVM、决策树、随机森林)的训练效果对比
目录 1. 载入数据 列解释Columns: 2. 数据分析 2.1 预处理 2.2 可视化 3. 训练模型 3.1 线性拟合 3.2 多项式回归(二次) 3.3 脊回归(Ridge Regressi ...
- 100天搞定机器学习|Day33-34 随机森林
前情回顾 机器学习100天|Day1数据预处理 100天搞定机器学习|Day2简单线性回归分析 100天搞定机器学习|Day3多元线性回归 100天搞定机器学习|Day4-6 逻辑回归 100天搞定机 ...
- R语言︱决策树族——随机森林算法
每每以为攀得众山小,可.每每又切实来到起点,大牛们,缓缓脚步来俺笔记葩分享一下吧,please~ --------------------------- 笔者寄语:有一篇<有监督学习选择深度学习 ...
- sklearn_随机森林random forest原理_乳腺癌分类器建模(推荐AAA)
sklearn实战-乳腺癌细胞数据挖掘(博主亲自录制视频) https://study.163.com/course/introduction.htm?courseId=1005269003& ...
随机推荐
- HTTP长连接、短连接究竟是什么?
1. HTTP协议与TCP/IP协议的关系 HTTP的长连接和短连接本质上是TCP长连接和短连接.HTTP属于应用层协议,在传输层使用TCP协议,在网络层使用IP协议. IP协议主要解决网络路由和寻址 ...
- [c/c++] programming之路(14)、数组+冒泡和选择排序
一.数组的基本知识 #include<stdio.h> #include<stdlib.h> void main0(){ ]={,,,,};//数组在内存里是连续排列的 int ...
- while(scanf("%d %d",&a,&b)!=EOF)
scanf("%d %d",&a,&b)返回输入的数据和格式字符串中匹配次数.当dos或windows中输入ctrl+z(模拟文件结束符EOF)时,scanf返回E ...
- JS设计模式(4)迭代器模式
什么是迭代器模式? 定义:提供一种方法顺序访问一个聚合对象中各个元素, 而又无须暴露该对象的内部表示. 主要解决:不同的方式来遍历整个整合对象. 何时使用:遍历一个聚合对象. 如何解决:把在元素之间游 ...
- UVA1400 "Ray, Pass me the dishes!"
思路 线段树维护最大子段和,只不过这题还要维护左右端点 还是维护pre,suf,sum,ans,只不过每个再多出一个维护端点的变量即可 注意多解讨论的大于号和大于等于号 代码 #include < ...
- 浅谈JS中的原型对象和原型链
我们知道原型是一个对象,其他对象可以用它实现属性继承,除了prototype,又有__proto__ 1. prototype和__proto__的区别 prototype是函数才有的属性 ...
- CTF显隐术:九连环
题目:http://www.shiyanbar.com/ctf/2007 这个也挺基础的,需要注意的是:1.不要因为binwalk扫不出来就以为没有隐藏嵌入数据.2.千万不要暴力破解压缩包,如果是这样 ...
- WordPress 本地建站
1.搭建环境 appserv下载链接:http://www.onlinedown.net/soft/35753.htm 安装 1.直接运行 2.选择安装路径 3.选择所需环境,若已经有,则可不勾选 4 ...
- 简单的class及运算符重载
#include <iostream> #include <vector> #include <algorithm> using namespace std; cl ...
- LINUX介绍
Linux操作系统被称为领先的服务器操作系统之一,它被普遍和广泛使用着.全球大约有数百款的Linux系统版本,每个系统版本都有自己的特性和目标人群. Linux的发行版本可以大体分为两类,一类是商业公 ...