#include <fstream>
#include <sstream>
#include <iostream>
#include <string.h> #include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp> using namespace cv;
using namespace dnn;
using namespace std; // Initialize the parameters
float confThreshold = 0.5; // Confidence threshold
float maskThreshold = 0.3; // Mask threshold vector<string> classes;
vector<Scalar> colors; // Draw the predicted bounding box
void drawBox(Mat& frame, int classId, float conf, Rect box, Mat& objectMask); // Postprocess the neural network's output for each frame
void postprocess(Mat& frame, const vector<Mat>& outs); int main()
{
// Load names of classes
string classesFile = "./mask_rcnn_inception_v2_coco_2018_01_28/mscoco_labels.names";
ifstream ifs(classesFile.c_str());
string line;
while (getline(ifs, line)) classes.push_back(line); // Load the colors
string colorsFile = "./mask_rcnn_inception_v2_coco_2018_01_28/colors.txt";
ifstream colorFptr(colorsFile.c_str());
while (getline(colorFptr, line))
{
char* pEnd;
double r, g, b;
r = strtod(line.c_str(), &pEnd);
g = strtod(pEnd, NULL);
b = strtod(pEnd, NULL);
Scalar color = Scalar(r, g, b, 255.0);
colors.push_back(Scalar(r, g, b, 255.0));
} // Give the configuration and weight files for the model
String textGraph = "./mask_rcnn_inception_v2_coco_2018_01_28/mask_rcnn_inception_v2_coco_2018_01_28.pbtxt";
String modelWeights = "./mask_rcnn_inception_v2_coco_2018_01_28/frozen_inference_graph.pb"; // Load the network
Net net = readNetFromTensorflow(modelWeights, textGraph);
net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(DNN_TARGET_CPU); // Open a video file or an image file or a camera stream.
string str, outputFile;
VideoCapture cap();//根据摄像头端口id不同,修改下即可
//VideoWriter video;
Mat frame, blob; // Create a window
static const string kWinName = "Deep learning object detection in OpenCV";
namedWindow(kWinName, WINDOW_NORMAL); // Process frames.
while (waitKey() < )
{
// get frame from the video
cap >> frame; // Stop the program if reached end of video
if (frame.empty())
{
cout << "Done processing !!!" << endl;
cout << "Output file is stored as " << outputFile << endl;
waitKey();
break;
}
// Create a 4D blob from a frame.
blobFromImage(frame, blob, 1.0, Size(frame.cols, frame.rows), Scalar(), true, false);
//blobFromImage(frame, blob); //Sets the input to the network
net.setInput(blob); // Runs the forward pass to get output from the output layers
std::vector<String> outNames();
outNames[] = "detection_out_final";
outNames[] = "detection_masks";
vector<Mat> outs;
net.forward(outs, outNames); // Extract the bounding box and mask for each of the detected objects
postprocess(frame, outs); // Put efficiency information. The function getPerfProfile returns the overall time for inference(t) and the timings for each of the layers(in layersTimes)
vector<double> layersTimes;
double freq = getTickFrequency() / ;
double t = net.getPerfProfile(layersTimes) / freq;
string label = format("Mask-RCNN on 2.5 GHz Intel Core i7 CPU, Inference time for a frame : %0.0f ms", t);
putText(frame, label, Point(, ), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(, , )); // Write the frame with the detection boxes
Mat detectedFrame;
frame.convertTo(detectedFrame, CV_8U); imshow(kWinName, frame); }
cap.release();
return ;
} // For each frame, extract the bounding box and mask for each detected object
void postprocess(Mat& frame, const vector<Mat>& outs)
{
Mat outDetections = outs[];
Mat outMasks = outs[]; // Output size of masks is NxCxHxW where
// N - number of detected boxes
// C - number of classes (excluding background)
// HxW - segmentation shape
const int numDetections = outDetections.size[];
const int numClasses = outMasks.size[]; outDetections = outDetections.reshape(, outDetections.total() / );
for (int i = ; i < numDetections; ++i)
{
float score = outDetections.at<float>(i, );
if (score > confThreshold)
{
// Extract the bounding box
int classId = static_cast<int>(outDetections.at<float>(i, ));
int left = static_cast<int>(frame.cols * outDetections.at<float>(i, ));
int top = static_cast<int>(frame.rows * outDetections.at<float>(i, ));
int right = static_cast<int>(frame.cols * outDetections.at<float>(i, ));
int bottom = static_cast<int>(frame.rows * outDetections.at<float>(i, )); left = max(, min(left, frame.cols - ));
top = max(, min(top, frame.rows - ));
right = max(, min(right, frame.cols - ));
bottom = max(, min(bottom, frame.rows - ));
Rect box = Rect(left, top, right - left + , bottom - top + ); // Extract the mask for the object
Mat objectMask(outMasks.size[], outMasks.size[], CV_32F, outMasks.ptr<float>(i, classId)); // Draw bounding box, colorize and show the mask on the image
drawBox(frame, classId, score, box, objectMask); }
}
} // Draw the predicted bounding box, colorize and show the mask on the image
void drawBox(Mat& frame, int classId, float conf, Rect box, Mat& objectMask)
{
//Draw a rectangle displaying the bounding box
rectangle(frame, Point(box.x, box.y), Point(box.x + box.width, box.y + box.height), Scalar(, , ), ); //Get the label for the class name and its confidence
string label = format("%.2f", conf);
if (!classes.empty())
{
CV_Assert(classId < (int)classes.size());
label = classes[classId] + ":" + label;
} //Display the label at the top of the bounding box
int baseLine;
Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, , &baseLine);
box.y = max(box.y, labelSize.height);
rectangle(frame, Point(box.x, box.y - round(1.5*labelSize.height)), Point(box.x + round(1.5*labelSize.width), box.y + baseLine), Scalar(, , ), FILLED);
putText(frame, label, Point(box.x, box.y), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(, , ), ); Scalar color = colors[classId%colors.size()]; // Resize the mask, threshold, color and apply it on the image
resize(objectMask, objectMask, Size(box.width, box.height));
Mat mask = (objectMask > maskThreshold);
Mat coloredRoi = (0.3 * color + 0.7 * frame(box));
coloredRoi.convertTo(coloredRoi, CV_8UC3); // Draw the contours on the image
vector<Mat> contours;
Mat hierarchy;
mask.convertTo(mask, CV_8U);
findContours(mask, contours, hierarchy, RETR_CCOMP, CHAIN_APPROX_SIMPLE);
drawContours(coloredRoi, contours, -, color, , LINE_8, hierarchy, );
coloredRoi.copyTo(frame(box), mask); }

https://github.com/spmallick/learnopencv/tree/master/Mask-RCNN

https://www.learnopencv.com/deep-learning-based-object-detection-and-instance-segmentation-using-mask-r-cnn-in-opencv-python-c/

line: (303,392) (313,104) (0.0532837,-0.178375,0.288),(0,0,0)

0.0930815

----------------------------------------------------

line: (304,393) (465,390) (0.0540282,-0.178375,0.288),(0,0,0)

0.0931887

----------------------------------------------------

line: (302,391) (314,34) (0.0525392,-0.178375,0.288),(-1.27422,-1.06591,1.721)

0.798125

----------------------------------------------------

line: (307,189) (312,34) (-0.096823,-0.176517,0.285),(-1.27422,-1.06591,1.721)

0.737782

----------------------------------------------------

line: (304,392) (512,385) (0.0532837,-0.178375,0.288),(0,0,0)

0.0930815

----------------------------------------------------

line: (307,395) (410,393) (0,0,0),(0.0540282,-0.178375,0.288)

0.0931887

----------------------------------------------------

line: (305,278) (313,34) (0,0,0),(-1.27422,-1.06591,1.721)

0.830633

----------------------------------------------------

line: (271,377) (279,130) (0.0421168,-0.178375,0.288),(0,0,0)

0.0916397

----------------------------------------------------

line: (272,378) (278,192) (0.0428612,-0.178375,0.288),(0,0,0)

0.0917259

----------------------------------------------------

line: (63,361) (275,354) (0.0301005,-0.177755,0.287),(0.0249073,-0.177755,0.287)

0.00259657

----------------------------------------------------

line: (308,38) (477,62) (-1.263,-1.07149,1.73),(0,0,0)

0.828137

----------------------------------------------------

line: (305,391) (311,198) (0.0525392,-0.178375,0.288),(-0.0901926,-0.176517,0.285)

0.071372

----------------------------------------------------

line: (307,394) (445,391) (0,0,0),(0.0525392,-0.178375,0.288)

0.0929756

----------------------------------------------------

line: (273,379) (276,292) (0.0436057,-0.178375,0.288),(-0.0210156,-0.177136,0.286)

0.0323166

----------------------------------------------------

line: (401,385) (512,389) (0.0480725,-0.178375,0.288),(0,0,0)

0.0923694

----------------------------------------------------

line: (315,35) (402,49) (-1.27346,-1.06901,1.726),(0,0,0)

0.831336

----------------------------------------------------

line: (418,384) (514,382) (0.047328,-0.178375,0.288),(0,0,0)

0.0922733

----------------------------------------------------

line: (176,359) (271,356) (0.0286167,-0.177755,0.287),(0,0,0)

0.090022

----------------------------------------------------

line: (385,386) (511,390) (0.0488169,-0.178375,0.288),(0,0,0)

0.092467

----------------------------------------------------

line: (67,359) (181,355) (0.0286167,-0.177755,0.287),(0,0,0)

0.090022

----------------------------------------------------

line: (311,37) (425,53) (-1.26747,-1.07149,1.73),(0,0,0)

0.829843

----------------------------------------------------

line: (275,131) (278,69) (-0.138573,-0.175278,0.283),(0,0,0)

0.111719

----------------------------------------------------

line: (66,360) (241,354) (0.0293586,-0.177755,0.287),(0.0249073,-0.177755,0.287)

0.00222563

----------------------------------------------------

line: (118,360) (275,355) (0.0293586,-0.177755,0.287),(0,0,0)

0.0900817

----------------------------------------------------

line: (441,383) (510,382) (0.0465835,-0.178375,0.288),(0,0,0)

0.0921785

----------------------------------------------------

line: (309,92) (310,37) (0,0,0),(-1.26747,-1.07149,1.73)

0.829843

----------------------------------------------------

line: (306,391) (308,321) (0.0525392,-0.178375,0.288),(0.000425389,-0.177755,0.287)

0.0260588

----------------------------------------------------

line: (274,248) (278,154) (0,0,0),(-0.121747,-0.175278,0.283)

0.106706

----------------------------------------------------

line: (454,384) (511,386) (0.047328,-0.178375,0.288),(0,0,0)

0.0922733

----------------------------------------------------

line: (362,387) (440,389) (0.0495614,-0.178375,0.288),(0,0,0)

0.092566

----------------------------------------------------

line: (221,359) (271,357) (0.0286167,-0.177755,0.287),(0,0,0)

0.090022

----------------------------------------------------

line: (344,390) (396,393) (0,0,0),(0.0540282,-0.178375,0.288)

0.0931887

----------------------------------------------------

line: (314,393) (376,395) (0.0540282,-0.178375,0.288),(0,0,0)

0.0931887

----------------------------------------------------

line: (115,355) (180,354) (0,0,0),(0.0249073,-0.177755,0.287)

0.0897459

----------------------------------------------------

line: (296,388) (358,391) (0,0,0),(0.0525392,-0.178375,0.288)

0.0929756

----------------------------------------------------

line: (449,329) (514,378) (0.00638257,-0.178375,0.288),(0,0,0)

0.0892444

常用模型下载地址:https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md

opencv C++ mask_rcnn的更多相关文章

  1. opencv4 mask_rcnn模型调(c++)

    昨天有人问我关于调用mask_rcnn模型的问题,忽然想到最近三个月都没用opencv调用训练好的mask_rcnn模型了,今晚做个尝试,所以重新编译了 opencv4,跑个案例试试 #include ...

  2. opencv在图像显示中文

    在图像定位和模式识别时,经常需要把结果标注到图片上,标注内容可以是数字字母.矩形框等(opencv支持的)或者是中文汉字(借助freetype). 1.显示数字/矩形框 #include <op ...

  3. opencv中Mat与IplImage,CVMat类型之间转换

    opencv中对图像的处理是最基本的操作,一般的图像类型为IplImage类型,但是当我们对图像进行处理的时候,多数都是对像素矩阵进行处理,所以这三个类型之间的转换会对我们的工作带来便利. Mat类型 ...

  4. opencv源码:cascadedetect

    级联分类器检测类CascadeClassifier,提供了两个重要的方法: CascadeClassifier cascade_classifier; cascade_classifier.load( ...

  5. 基于OpenCV的车辆检测与追踪的实现

    最近老师布置了一个作业,是做一个基于视频的车辆检测与追踪,用了大概两周的时间做了一个简单的,效果不是很理想,但抑制不住想把自己的一些认识写下来,这里就把一些网络上的博客整理一下分享给大家,希望帮助到大 ...

  6. OpenCV人脸识别Eigen算法源码分析

    1 理论基础 学习Eigen人脸识别算法需要了解一下它用到的几个理论基础,现总结如下: 1.1 协方差矩阵 首先需要了解一下公式: 共公式可以看出:均值描述的是样本集合的平均值,而标准差描述的则是样本 ...

  7. OpenCV人脸识别LBPH算法源码分析

    1 背景及理论基础 人脸识别是指将一个需要识别的人脸和人脸库中的某个人脸对应起来(类似于指纹识别),目的是完成识别功能,该术语需要和人脸检测进行区分,人脸检测是在一张图片中把人脸定位出来,完成的是搜寻 ...

  8. OpenCV模板匹配算法详解

    1 理论介绍 模板匹配是在一幅图像中寻找一个特定目标的方法之一,这种方法的原理非常简单,遍历图像中的每一个可能的位置,比较各处与模板是否“相似”,当相似度足够高时,就认为找到了我们的目标.OpenCV ...

  9. android studio 使用 jni 编译 opencv 完整实例 之 图像边缘检测!从此在andrid中自由使用 图像匹配、识别、检测

    目录: 1,过程感慨: 2,运行环境: 3,准备工作: 4,编译 .so 5,遇到的关键问题及其解决方法 6,实现效果截图. (原创:转载声明出处:http://www.cnblogs.com/lin ...

随机推荐

  1. RT-Thread--简介

    RT-Thread 概述 RT-Thread,全称是 Real Time-Thread,它是一个嵌入式实时多线程操作系统,基本属性之一是支持多任务,允许多个任务同时运行,但并不是真正的同时运行,而是宏 ...

  2. 【Leetcode】【简单】【283. 移动零】【JavaScript】

    题目描述 283. 移动零 给定一个数组 nums,编写一个函数将所有 0 移动到数组的末尾,同时保持非零元素的相对顺序. 示例: 输入: [0,1,0,3,12]输出: [1,3,12,0,0] 说 ...

  3. Derby数据库操作 常见错误和异常

    一.插入(INSERT时报错) 1.错误:java.sql.SQLIntegrityConstraintViolationException: 列“test”无法接受空值. 可能原因:建表时test列 ...

  4. java-利用BitSet查找素数

    高效存储为序列可以使用位积,由于位集将位包装在字节里,所以位集要比使用Boolean对象的ArrayList更高效. 自己的代码,素数是false public class Sieve { @Test ...

  5. rm -rf修改为mv命令的脚本

    使用mv命令代替 rm命令,将文件移动到一个垃圾箱文件夹中,并记录删除日志. PARA_CNT=$# TRASH_DIR="/main/.rm_trash/files" for i ...

  6. UEditor设置内容setContent()失效的解决方法

    ueditor常见用法: https://blog.csdn.net/qq_31879707/article/details/54894735#UE.Editor:setContent() UEdit ...

  7. idea maven配置

    转载自:https://www.cnblogs.com/Silencepeng/p/7444012.html 一.下载maven的包 http://www.apache.org/ 1.在网页中打开上面 ...

  8. Enum 类型

    枚举类型(Enumerated Type) 什么是枚举? 枚举是一个被命名的整型常数的集合.在多种编程语言中都有使用(C/C++/c#/java等). 示例 public enum Size { S, ...

  9. Codeforces Round #429 (Div. 2/Div. 1) [ A/_. Generous Kefa ] [ B/_. Godsend ] [ C/A. Leha and Function ] [ D/B. Leha and another game about graph ] [ E/C. On the Bench ] [ _/D. Destiny ]

    PROBLEM A/_ - Generous Kefa 题 OvO http://codeforces.com/contest/841/problem/A cf 841a 解 只要不存在某个字母,它的 ...

  10. P4317 花神的数论题 动态规划?数位DP

    思路:数位$DP$ 提交:5次(其实之前A过,但是调了调当初的程序.本次是2次AC的) 题解: 我们分别求出$sum(x)=i$,对于一个$i$,有几个$x$,然后我们就可以快速幂解决. 至于求个数用 ...