安装

  • KinectSDK-v1.8-Setup.exe
  • OpenNI-Windows-x86-2.1.0.msi

Qt工程

拷贝 Redist 下内容到 编译的exe所在目录





#include <stdlib.h>
#include <iostream>
#include <conio.h>
#include <string>
#include "OpenNI.h"
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp" #define RESOLUTION 640,480
#define RECORDRESOLUTION 590,440
#define ADRESOLUTION 45,40,590,440
#define FPS 20
#define GRAYTH 10
#define REPAIRERANGE 5
#define COLORTH 10 using namespace std;
using namespace cv;
using namespace openni; //Openni status
Status result = STATUS_OK;
//open device
Device device;
//OpenNI2 image
VideoFrameRef oniDepthImg;
VideoFrameRef oniColorImg;
//create stream
VideoStream oniDepthStream;
VideoStream oniColorStream;
//set video mode
VideoMode modeDepth;
VideoMode modeColor;
//OpenCV image
cv::Mat cvDepthImg;
cv::Mat cvDepthImg2;
cv::Mat cvColorImg;
cv::Mat cvColorImg2;
//OpenCV adjusted image
cv::Mat cvAdjustDepthImg;
cv::Mat cvAdjustColorImg;
//Resolution
Size se=Size(RESOLUTION);
Size recordse=Size(RECORDRESOLUTION); void CheckOpenNIError( Status result, string status )
{
if( result != STATUS_OK )
cerr << status << " Error: " << OpenNI::getExtendedError() << endl;
}
void iniOpenNI(void );
void releaseResource(void );
void RemoveNoiseRecord(void );
void RemoveNoise(void );
bool pixelRepaire(int ,int ,int );
bool rangeRepaire(int ,int ,int ); int main( int argc, char** argv )
{
iniOpenNI();
RemoveNoiseRecord();
releaseResource();
return 0;
} void iniOpenNI()
{
// initialize OpenNI2
result = OpenNI::initialize();
CheckOpenNIError( result, "initialize context" ); //open video device
result = device.open( openni::ANY_DEVICE );
CheckOpenNIError( result, "initialize context" ); //creat depth stream
result = oniDepthStream.create( device, openni::SENSOR_DEPTH );
CheckOpenNIError( result, "initialize context" );
//set depth mode
modeDepth.setResolution( RESOLUTION );
modeDepth.setFps( FPS );
modeDepth.setPixelFormat( PIXEL_FORMAT_DEPTH_1_MM );
oniDepthStream.setVideoMode(modeDepth);
// start depth stream
result = oniDepthStream.start();
CheckOpenNIError( result, "initialize context" ); // create color stream
result = oniColorStream.create( device, openni::SENSOR_COLOR );
CheckOpenNIError( result, "initialize context" );
// set color video mode
modeColor.setResolution( RESOLUTION );
modeColor.setFps( FPS );
modeColor.setPixelFormat( PIXEL_FORMAT_RGB888 );
oniColorStream.setVideoMode( modeColor);
// start color stream
result = oniColorStream.start();
CheckOpenNIError( result, "initialize context" ); // set depth and color imge registration mode
if( device.isImageRegistrationModeSupported(IMAGE_REGISTRATION_DEPTH_TO_COLOR ) )
{
cout << "support" << endl;
device.setImageRegistrationMode( IMAGE_REGISTRATION_DEPTH_TO_COLOR );
} } void releaseResource()
{
//OpenNI2 destroy
oniDepthStream.destroy();
oniColorStream.destroy();
device.close();
OpenNI::shutdown();
} void RemoveNoiseRecord()
{
std::cout<<"\t\t\t------RemoveNoiseRecord-------"<<std::endl; char *DepthFilename = "oringindepthvideo.avi";
char *ColorFilename = "oringincolorvideo.avi";
char *removeDepthFilename = "removedepthvideo.avi";
char *removeColorFilename = "removecolorvideo.avi"; VideoWriter removecolorVideoWriter=VideoWriter(removeColorFilename,CV_FOURCC('X','V','I','D'),FPS,recordse);
VideoWriter removedepthVideoWriter=VideoWriter(removeDepthFilename,CV_FOURCC('X','V','I','D'),FPS,recordse);
VideoWriter colorVideoWriter=VideoWriter(ColorFilename,CV_FOURCC('X','V','I','D'),FPS,recordse);
VideoWriter depthVideoWriter=VideoWriter(DepthFilename,CV_FOURCC('X','V','I','D'),FPS,recordse);
// VideoWriter colorVideoWriter=VideoWriter(ColorFilename,CV_FOURCC('X','V','I','D'),FPS,se);
// VideoWriter depthVideoWriter=VideoWriter(DepthFilename,CV_FOURCC('X','V','I','D'),FPS,se);
namedWindow("after-gray",1);
namedWindow("after-depth",1);
namedWindow("befor-color",1);
namedWindow("befor-depth",1);
while(true)
{
if( oniColorStream.readFrame( &oniColorImg ) == STATUS_OK )
{
// convert data into OpenCV type
cv::Mat cvRGBImg( oniColorImg.getHeight(), oniColorImg.getWidth(), CV_8UC3, (void*)oniColorImg.getData() );
cv::cvtColor( cvRGBImg, cvColorImg2, CV_RGB2BGR );
cvColorImg2=Mat(cvColorImg2,Rect(ADRESOLUTION));
colorVideoWriter.write(cvColorImg2);
cv::imshow("befor-color", cvColorImg2 );
cvtColor(cvRGBImg,cvColorImg,CV_RGB2GRAY);
//colorVideoWriter.write(cvColorImg);
}
if( oniDepthStream.readFrame( &oniDepthImg ) == STATUS_OK )
{
cv::Mat cvRawImg16U( oniDepthImg.getHeight(), oniDepthImg.getWidth(), CV_16UC1, (void*)oniDepthImg.getData() );
cvRawImg16U.convertTo( cvDepthImg, CV_8UC1, 255.0/(oniDepthStream.getMaxPixelValue()));
cv::cvtColor(cvDepthImg,cvDepthImg2,CV_GRAY2BGR);
cvDepthImg2=Mat(cvDepthImg2,Rect(ADRESOLUTION));
depthVideoWriter.write(cvDepthImg2);
cv::imshow( "befor-depth", cvDepthImg2 );
} cvAdjustDepthImg=Mat(cvDepthImg,Rect(ADRESOLUTION));
cvAdjustColorImg=Mat(cvColorImg,Rect(ADRESOLUTION)); RemoveNoise(); cvtColor(cvAdjustColorImg,cvAdjustColorImg,CV_GRAY2BGR);
cvtColor(cvAdjustDepthImg,cvAdjustDepthImg,CV_GRAY2BGR); removecolorVideoWriter.write(cvAdjustColorImg);
removedepthVideoWriter.write(cvAdjustDepthImg);
std::cout<<"removecolorVideoWriter"<<std::endl;
std::cout<<"\tremovedepthVideoWriter"<<std::endl; imshow("after-gray",cvAdjustColorImg);
imshow("after-depth",cvAdjustDepthImg); int key;
key=waitKey(10);
if(key==27)
{
break;
}
}
removecolorVideoWriter.release ();
removedepthVideoWriter.release ();
colorVideoWriter.release ();
depthVideoWriter.release (); destroyWindow("after-gray");
destroyWindow("after-depth");
destroyWindow("befor-color");
destroyWindow("befor-depth");
} void RemoveNoise()
{
clock_t start,finish;
double totaltime=0.0;
start=clock();
for(int j=(cvAdjustDepthImg.rows-1);j>=0;j--)//depthImage.rows
{
const uchar* mj=cvAdjustDepthImg.ptr<uchar>(j);
for(int i=(cvAdjustDepthImg.cols-1);i>=0;i--)//depthImage.cols
{
if(mj[i]<=GRAYTH)
{
uchar colorpixel=cvAdjustColorImg.at<uchar>(j,i);
bool reResult=false;
if(colorpixel<GRAYTH*5)
{
for(int k=1;k<REPAIRERANGE*3;k++)
{
reResult=pixelRepaire(i,j,k);
if(reResult)
break;
}
//go down
if(!reResult)
{
for(int k=1;k<=30;k++)
{
if((j+k)<440)
{
if(cvAdjustDepthImg.at<uchar>(j+k,i)>GRAYTH)
{
cvAdjustDepthImg.at<uchar>(j,i)=cvAdjustDepthImg.at<uchar>(j+k,i);
reResult=true;
break;
}
}
else
{
break;
}
}
}
//go up
if(!reResult)
{
for(int k=1;k<=30;k++)
{
if((j-k)>=0)
{
if(cvAdjustDepthImg.at<uchar>(j-k,i)>GRAYTH)
{
cvAdjustDepthImg.at<uchar>(j,i)=cvAdjustDepthImg.at<uchar>(j-k,i);
reResult=true;
break;
}
}
else
{
break;
}
}
}
}
else
{
for(int k=1;k<30;k++)
{
if((i+k)<590 && !reResult)
{
if(abs(cvAdjustColorImg.at<uchar>(j,i+k)-colorpixel)<=COLORTH && cvAdjustDepthImg.at<uchar>(j,i+k)>GRAYTH)
{
cvAdjustDepthImg.at<uchar>(j,i)=cvAdjustDepthImg.at<uchar>(j,i+k);
reResult=true;
}
}
else
{
break;
}
}
}
if(!reResult)
{
for(int k=1;k<REPAIRERANGE;k++)
{
reResult=pixelRepaire(i,j,k);
if(reResult)
break;
}
}
if(!reResult)
{
for(int k=0;k<REPAIRERANGE*3;k++)
{
reResult=rangeRepaire(i,j,k);
if(reResult)
break;
}
}
}
}
}
finish=clock();
totaltime=(double)(finish-start)/CLOCKS_PER_SEC;
} bool pixelRepaire(int i,int j,int repaireRange)
{
uchar colorpixel=cvAdjustColorImg.at<uchar>(j,i);
int x=0;
int y=0;
int n=0;
int sum=0;
for(y=j-repaireRange;y<=j+repaireRange;y++)
{
if(y>=0 && y<440)
{ if(y==(j-repaireRange) || y==(j+repaireRange))
{
for(x=i-repaireRange;x<=i+repaireRange;x++)
{
if(x>=0 && x<590)
{
if(abs(cvAdjustColorImg.at<uchar>(y,x)-colorpixel)<=COLORTH && cvAdjustDepthImg.at<uchar>(y,x)>GRAYTH)
{
n++;
sum=sum+cvAdjustDepthImg.at<uchar>(y,x);
}
} }
} else
{
x=i-repaireRange;
if(x>=0 && x<590)
{
if(abs(cvAdjustColorImg.at<uchar>(y,x)-colorpixel)<=COLORTH && cvAdjustDepthImg.at<uchar>(y,x)>GRAYTH)
{
n++;
sum=sum+cvAdjustDepthImg.at<uchar>(y,x);
}
}
x=i+repaireRange;
if(x>=0 && x<590)
{
if(abs(cvAdjustColorImg.at<uchar>(y,x)-colorpixel)<=COLORTH && cvAdjustDepthImg.at<uchar>(y,x)>GRAYTH)
{
n++;
sum=sum+cvAdjustDepthImg.at<uchar>(y,x);
}
}
}
}
}
if(n<repaireRange*2)
{return false;}
else
{
cvAdjustDepthImg.at<uchar>(j,i)=(uchar)(sum/n);
return true;
} } bool rangeRepaire(int i,int j,int repaireRange)
{
uchar colorpixel=cvAdjustColorImg.at<uchar>(j,i);
int x=0;
int y=0;
int n=0;
int sum=0;
for(y=j-repaireRange;y<=j+repaireRange;y++)
{
if(y>=0 && y<440)
{
for(x=i-repaireRange;x<=i+repaireRange;x++)
{
if(x>=0 && x<590)
{
if(cvAdjustDepthImg.at<uchar>(y,x)>GRAYTH)
{
n++;
sum=sum+cvAdjustDepthImg.at<uchar>(y,x);
}
}
}
}
}
if(n<=repaireRange*2)
{
return false;
}
else
{
cvAdjustDepthImg.at<uchar>(j,i)=(uchar)(sum/n);
return true;
}
}

source code

http://git.oschina.net/yuliyang/kinectRecorder

kinect 录制彩色和深度视频的更多相关文章

  1. Kinect SDK(1):读取彩色、深度、骨骼信息并用OpenCV显示

    Kinect SDK 读取彩色.深度.骨骼信息并用OpenCV显示 一.原理说明 对于原理相信大家都明白大致的情况,因此,在此只说比较特别的部分. 1.1 深度流数据: 深度数据流所提供的图像帧中,每 ...

  2. Kinect v1 (Microsoft Kinect for Windows v1 )彩色和深度图像对的采集步骤

    Kinect v1 (Microsoft Kinect for Windows v1 )彩色和深度图像对的采集步骤 一.在ubuntu下尝试 1. 在虚拟机VWware Workstation 12. ...

  3. Realsense 提取彩色和深度视频流

    一.简要介绍 关于realsense的介绍,网上很多,这里不再赘述,sdk及相关文档可参考realsense SDK,也可参考开发人员专区. 运行代码之前,要确保你已经安装好了realsense的DC ...

  4. 使用Flash Media Server(FMS)录制mp4格式的视频

    最近在做一个有关视频直播和点播的项目,客户的一个需求就是可以控制对直播流的录制,直播的实现采用的是Adobe的Flash Media Server,具体方式就是:视频采集端采集视频并编码->rt ...

  5. OpenNI结合Unity3D Kinect进行体感游戏开发(转)

    OpenNI结合Unity3D Kinect进行体感游戏开发(转) 楼主# 更多 发布于:2012-07-17 16:42     1. 下载安装Unity3D(目前版本为3.4)2. 下载OpenN ...

  6. ROS indigo下Kinect v1的驱动安装与调试

    ROS indigo下Kinect v1的驱动安装与调试 本文简要叙述了在ROS indigo版本下Kinect v1的驱动安装与调试过程. 1. 实验环境 (1)硬件:  台式机和Kinect v1 ...

  7. 高翔《视觉SLAM十四讲》从理论到实践

    目录 第1讲 前言:本书讲什么:如何使用本书: 第2讲 初始SLAM:引子-小萝卜的例子:经典视觉SLAM框架:SLAM问题的数学表述:实践-编程基础: 第3讲 三维空间刚体运动 旋转矩阵:实践-Ei ...

  8. 我的第一个项目:用kinect录视频库

    kinect深度视频去噪 kinectmod32.dll http://pan.baidu.com/s/1DsGqX 下载后改名kinect.dll 替换掉Redist\OpenNI2\Drivers ...

  9. 【计算机视觉】深度相机(五)--Kinect v2.0

    原文:http://blog.csdn.NET/qq1175421841/article/details/50412994 ----微软Build2012大会:Kinect for Windows P ...

随机推荐

  1. 282. Expression Add Operators

    题目: Given a string that contains only digits 0-9 and a target value, return all possibilities to add ...

  2. Error LNK2019: unresolved external symbol "char * __stdcall _com_util::ConvertBSTRToString(wchar_t *)"

    Error 2 error LNK2019: unresolved external symbol "char * __stdcall _com_util::ConvertBSTRToStr ...

  3. android 更改avd路径

    第一种方法,适合还没有建立 AVD 的情况 即:在计算机右击的属性 选择环境变量,然后添加一个用户的环境变量,名字为 "ANDROID_SDK_HOME”,然后把变量值改为你想将" ...

  4. 可辨别iPhone真假的网址

    在如下的网址中输入iPhone的序列号,可知道该iPhone的型号,生产日期,激活状态等. 1.http://www.app111.org/ 2.http://act.weiphone.com/wet ...

  5. js与flash结合使用

    最近,做个了一个falsh和js 通信的小东西. flash负责接收参数和返回结果.js负责处理信息,接收返回结果,将结果返回到服务器端. 听着很复杂,做起来页面还是很简单的.用的技术还是不少的,fl ...

  6. MDM平台学习笔记

    最近和将来一段时间都会花很多时间在主数据管理平台的学习和开发上,从现在开始打算记录此过程中的知识点和学习心得,加油! 1.IBM全新的产品文档网站IBM Knowledge Center,软件硬件产品 ...

  7. NET垃圾回收机制【Copy By Internet】

    尽管在.NET framework下我们并不需要担心内存管理和垃圾回收(Garbage Collection),但是我们还是应该了解它们,以优化我们的应用程序.同时,还需要具备一些基础的内存管理工作机 ...

  8. 在blade中定义一个可以被模版使用的变量

    laravel的blade中的数据一般由控制器传入,但是有没有什么办法临时在blade模版中创建并且被blade所使用吗? 答案是肯定的,不过语法稍微复杂一点 {{-- */$variableAvai ...

  9. networking常用命令

    nc -l 3000 将开一个临时的3000端口并且侦听,用于测试

  10. Spring 实践 -AOP

    Spring 实践 标签: Java与设计模式 AOP引介 AOP(Aspect Oriented Programing)面向切面编程采用横向抽取机制,以取代传统的纵向继承体系的重复性代码(如性能监控 ...