V4L(video4linux是一些视频系统,视频软件、音频软件的基础,经常时候在需要采集图像的场合,如视频监控,webcam,可视电话,经常
使用在embedded linux中是linux嵌入式开发中经常使用的系统接口。它是linux内核提供给用户空间的编程接口,各种的视频和音频设备开
发相应的驱动程序后,就可以通过v4l提供的系统API来控制视频和音频设备,也就是说v4l分为两层,底层为音视频设备在内核中的驱动,上
层为系统提供的API,而对于我们来说需要的就是使用这些系统API。
V4L2是V4L的升级版本,为linux下视频设备程序提供了一套接口规范。包括一套数据结构和底层V4L2驱动接口。V4L2采用流水线的方式,
操作更简单直观,基本遵循打开视频设备、设置格式、处理数据、关闭设备,更多的具体操作通过ioctl函数来实现。

1、打开设备
  int open(const char *device_name, int flags);
  int fd = open("/dev/video0", O_RDONLY); //O_NONBLOCK --非阻塞(不推荐使用)
2、关闭设备 int close(int fd)
  int ret = close(fd);
3、v4l2_capability 查看属性
  int ioctl(int fd, int request, struct v4l2_capability *argp);
  struct v4l2_capability
  {
    u8 driver[16]; // 驱动名字
    u8 card[32]; // 设备名字
    u8 bus_info[32]; // 设备在系统中的位置
    u32 version; // 驱动版本号
    u32 capabilities; // 设备支持的操作
    u32 reserved[4]; // 保留字段
  };

4、设置视频格式与制式
相关函数:
int ioctl(int fd, int request, struct v4l2_fmtdesc *argp);
int ioctl(int fd, int request, struct v4l2_format *argp);

相关结构体:
v4l2_cropcap 结构体用来设置摄像头的捕捉能力,在捕捉上视频时应先先设置
v4l2_cropcap 的 type 域,再通过 VIDIO_CROPCAP 操作命令获取设备捕捉能力的参数,保存于 v4l2_cropcap 结构体中,包括 bounds
(最大捕捉方框的左上角坐标和宽高),defrect(默认捕捉方框的左上角坐标和宽高)等。
v4l2_format 结构体用来设置摄像头的视频制式、帧格式等,在设置这个参数时应先填 好 v4l2_format 的各个域,如 type(传输流类型),
fmt.pix.width(宽),fmt.pix.heigth(高),fmt.pix.field(采样区域,如隔行采样),fmt.pix.pixelformat(采样类型,如 YUV4:2:2),
然后通过 VIDIO_S_FMT 操作命令设置视频捕捉格式。
struct v4l2_fmtdesc
{
u32 index; // 要查询的格式序号,应用程序设置
enum v4l2_buf_type type; // 帧类型,应用程序设置
u32 flags; // 是否为压缩格式
u8 description[32]; // 格式名称
u32 pixelformat; // 格式
u32 reserved[4]; // 保留
};
所有的视频格式可以能下面的方法查看
#define v4l2_fourcc(a, b, c, d) ((a) | ((b) << 8) | ((c) << 16) | ((d) << 24))
All format : VIDIOC_ENUM_FMT, v4l2_fmtdesc

struct v4l2_format
{
enum v4l2_buf_type type; // 帧类型,应用程序设置
union fmt
{
struct v4l2_pix_format pix; // 视频设备使用
struct v4l2_window win;
struct v4l2_vbi_format vbi;
struct v4l2_sliced_vbi_format sliced;
u8 raw_data[200];
};
};

struct v4l2_pix_format
{
u32 width; // 帧宽,单位像素
u32 height; // 帧高,单位像素
u32 pixelformat; // 帧格式
enum v4l2_field field;
u32 bytesperline;
u32 sizeimage;
enum v4l2_colorspace colorspace;
u32 priv;
};

5、查看视频的帧率
int ioctl(int fd, int request, struct v4l2_streamparm parm* argp);
struct v4l2_streamparm parm;
parm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
其中VIDIOC_G_PARM是用取帧率的,VIDIOC_S_PARM是用来设定帧率

6、定制ioctl函数
在内核目录下找到kernel/include/linux/videodev2.h头文件,你可查看所有的io控制的命令
/*
* Experimental, third param 0--video, 1--tracking
*/
#define VIDIOC_POCCESS_NOTIFY _IOW('V', 99, int) //add by Henry.Wen 20131126

实例:

#ifndef CAMERA_V4L2CAMERA_H
#define CAMERA_V4L2CAMERA_H #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <getopt.h> /* getopt_long() */
#include <fcntl.h> /* low-level i/o */
#include <unistd.h>
#include <errno.h>
#include <malloc.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include <asm/types.h> /* for videodev2.h */
#include <linux/videodev2.h> #define CONFIG_CAMERA_UVC_INVAL_FRAMECNT 5 namespace v4l2
{
/**
* return error code
*/
enum _RET_ERROR_CODE
{
RET_ERROR_FAIL = -1,
RET_ERROR_OK = 0,
RET_ERROR_CAPTURE_NULL = -9999,
RET_ERROR_CAPTURE_NAME,
RET_ERROR_CAPTURE_CAPABILITY,
RET_ERROR_CAPTURE_FORMAT,
RET_ERROR_CAPTURE_BUFFER,
RET_ERROR_CAPTURE_OUTMEMORY,
RET_ERROR_CAPTURE_MMAP,
RET_ERROR_CAPTURE_FORM,
RET_ERROR_CAPTURE_MUMAP,
RET_ERROR_CAPTURE_VIDIOC,
}; /**
* Name: video_format enum
* Function: Describe formats V4L2 will support
*/
typedef enum _pFormat
{
UNKNOWN,
YUYV,
MJPEG,
YV12,
YU12,
NV12,
NV21,
H264,
}pFormat; /**
* frame width and height infomation
*/
typedef struct _V4l2Info
{
unsigned int width;
unsigned int height;
unsigned int stepWidth;
unsigned int length;
void* buffer;
}V4l2Info; /**
* caputre properties
*/
typedef struct _V4l2Capture
{
pFormat format;
char name[31];//dev_name
int fd;
unsigned int rate;
unsigned int quality;
unsigned int brightness;
V4l2Info v4l2Info;
}V4l2Capture; /**
*
*/
class V4l2Camera
{
public:
V4l2Camera();
virtual ~V4l2Camera(); public:
/**
* get the number of cameras
*
* @return the number of camera
*/
static int getNumberOfCameras(); /**
* initialize v4l2 device
* @param capture v4l2 capture handl
* @param width frame width
* @param height fram height
*
* @return 0/other successful or failure
*/
int InitDevice(V4l2Capture *capture, pFormat format, const char* name, unsigned int rate, unsigned int width, unsigned int height); /**
* initialize v4l2 device
* @param capture v4l2 capture handl
* @param width frame width
* @param height fram height
*
* @return 0/other successful or failure
*/
int UninitDevice(V4l2Capture *capture); /**
* Set v4l2 device brightness
* @param capture v4l2 capture handl
* @param value brightness value
*
* @return 0/other successful or failure
*/
int SetBrightness(V4l2Capture *capture, unsigned int value); /**
* start v4l2 device
* @param fd v4l2 capture handl
*
* @return 0/other successful or failure
*/
int StartDevice(int fd); /**
* stop v4l2 device
* @fd capture v4l2 capture handl
*
* @return 0/other successful or failure
*/
int StopDevice(int fd); /**
* Get frame data
* @param capture v4l2 capture handl
*
* @return 0/other successful or failure
*/
int GetFrame(V4l2Capture *capture); private:
int InitMmap(int fd); int xioctl(int fd, int request, void *arg); unsigned int GetCameraFormat(pFormat format); int AdjustV4l2Info(unsigned int& width, unsigned int& height); int MSleep(int fd, unsigned int msec); int MatchCameraAuto(int cameraId); private:
typedef struct _Buffers
{
void *start;
size_t length;
}V4l2Buffers; int m_stime;
V4l2Buffers* m_buffers;
unsigned int m_nBuffers;
V4l2Capture* m_capture; static int mNumberOfCameras;
static int mCameraIndex[10];
int mUsbCameraIvalidFrameCnt;
bool m_InitDevice;
};
}//end namespace
#endif //CAMERA_V4L2CAMERA_H

  V4l2Camera.cpp

#include "V4l2Camera.h"

namespace v4l2
{
#define BUFFERS_COUNT 4
#define ARRAY_LEN(a) (sizeof(a) / sizeof(a[0]))
#define MEMST_VALUE(x) memset(&(x), 0, sizeof (x)) const V4l2Info g_v4l2Info[] = {{160, 120}, {320, 240}, {640, 480}, {1024, 768}, {1200, 900}, {1440, 1080}, {1600, 900}, {1600, 1200}}; V4l2Camera::V4l2Camera()
{
// TODO Auto-generated constructor stub
m_capture = NULL;
mUsbCameraIvalidFrameCnt = 0;
m_InitDevice = false;
} V4l2Camera::~V4l2Camera()
{
// TODO Auto-generated destructor stub
if(m_capture)
UninitDevice(m_capture);
} int V4l2Camera::xioctl(int fd, int request, void *arg)
{
int r;
int nCount= 0; do
{
r = ioctl (fd, request, arg);
}while (RET_ERROR_FAIL == r && (EINTR == errno) && (++nCount < 100)); return r;
} int V4l2Camera::mNumberOfCameras = 0;
int V4l2Camera::mCameraIndex[] = {0}; unsigned int V4l2Camera::GetCameraFormat(pFormat format)
{
unsigned int ret = UNKNOWN;
switch(format)
{
case YUYV:
ret = V4L2_PIX_FMT_YUYV;
break;
case MJPEG:
ret = V4L2_PIX_FMT_MJPEG;
break;
case YV12:
ret = V4L2_PIX_FMT_YVU420;
break;
case YU12:
ret = V4L2_PIX_FMT_YUV420;
break;
case NV12:
ret = V4L2_PIX_FMT_NV12;
break;
case NV21:
ret = V4L2_PIX_FMT_NV21;
break;
case H264:
ret = V4L2_PIX_FMT_MPEG;
break;
default:
break;
}
return ret;
} int V4l2Camera::InitMmap(int fd)
{
int ret = RET_ERROR_OK;
struct v4l2_requestbuffers req;
req.count = BUFFERS_COUNT;
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_MMAP; if (RET_ERROR_FAIL == xioctl (fd, VIDIOC_REQBUFS, &req) || req.count < 2)
{
return RET_ERROR_CAPTURE_BUFFER;
} m_buffers = (V4l2Buffers*)calloc(req.count, sizeof(V4l2Buffers)); if (!m_buffers)
{
return RET_ERROR_CAPTURE_OUTMEMORY;
} for (m_nBuffers = 0; m_nBuffers < req.count; ++m_nBuffers)
{
struct v4l2_buffer buf; MEMST_VALUE (buf); buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = m_nBuffers; if (RET_ERROR_FAIL == xioctl (fd, VIDIOC_QUERYBUF, &buf))
{
ret = RET_ERROR_CAPTURE_BUFFER;
break;
} m_buffers[m_nBuffers].length = buf.length;
m_buffers[m_nBuffers].start = mmap(NULL, buf.length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, buf.m.offset); if (MAP_FAILED == m_buffers[m_nBuffers].start)
{
ret = RET_ERROR_CAPTURE_MMAP;
break;
}
}
return ret;
} int V4l2Camera::AdjustV4l2Info(unsigned int& width, unsigned int& height)
{
int ret = RET_ERROR_FAIL;
int index = 0;
for(int nCount = ARRAY_LEN(g_v4l2Info) - 1, i = nCount; i >= 0; --i)
{
if(width <= (g_v4l2Info[i].width + 50))
{
index = i;
ret = RET_ERROR_OK;
}
else if(0 != nCount)
{
width = g_v4l2Info[index].width;
height = g_v4l2Info[index].height;
break;
}
}
return ret;
} int V4l2Camera::getNumberOfCameras()
{
char cam_path[20];
int fd = -1, i=0;
struct v4l2_capability capability; mNumberOfCameras = 0;
memset(mCameraIndex,0x00,sizeof(mCameraIndex)); for (i = 0; i < 10; ++i)
{
memset(cam_path,0x00,20);
sprintf(cam_path, "/dev/video%d",i);
fd = open(cam_path, O_RDONLY);
if (fd < 0)
continue; memset(&capability, 0, sizeof(struct v4l2_capability));
if (ioctl(fd, VIDIOC_QUERYCAP, &capability) < 0)
{
//LOGE("Video device(%s): query capability not supported.\n", cam_path);
goto loop_continue;
} if ((capability.capabilities
& (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING))
!= (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING))
{
}
else
{
mCameraIndex[mNumberOfCameras] = i;
mNumberOfCameras++;
} loop_continue:
if (fd > 0)
{
close(fd);
fd = -1;
}
continue;
} return mNumberOfCameras;
} int V4l2Camera::MatchCameraAuto(int cameraId)
{
if (mNumberOfCameras <= 0 || mNumberOfCameras > 10 || cameraId < 0)
{
return -1;
} // search for camera ID normally
for (int i = 0; i < mNumberOfCameras; ++i)
{
if (cameraId == mCameraIndex[i])
{
return cameraId;
}
} if (mNumberOfCameras == 1)
{
return mCameraIndex[0];
} return cameraId > mCameraIndex[mNumberOfCameras -1] ? mCameraIndex[mNumberOfCameras -1] : mCameraIndex[0];
} int V4l2Camera::InitDevice(V4l2Capture *capture, pFormat format, const char* name, unsigned int rate, unsigned int width, unsigned int height)
{
// TODO Auto-generated function stub
struct stat st;
int fd = 0, nRealCameraNameLen = 0, nMinCameraNameLen = 0;
int ret = RET_ERROR_OK;
char szcameraID[4], szCameraName[20];
int cameraId = 0;
struct v4l2_capability cap;
struct v4l2_cropcap cropcap;
struct v4l2_crop crop; if(NULL == capture || NULL == name || 0 == rate || 0 == width || 0 == height) {
ret = RET_ERROR_CAPTURE_NULL;
goto InitDeviceFAILED;
} if ((nRealCameraNameLen = strlen(name)) < (nMinCameraNameLen =strlen("/dev/video0"))) {
ret = RET_ERROR_CAPTURE_NULL;
goto InitDeviceFAILED;
}
// Get camera ID memset(szcameraID,0x00,4);
for (int i=0;i<3;i++) {
if (nRealCameraNameLen >= (nMinCameraNameLen + i))
szcameraID[i] = name[nMinCameraNameLen - 1 + i];
} cameraId = atoi(szcameraID);
mNumberOfCameras = 0;
memset(mCameraIndex, 0x00, sizeof(mCameraIndex));
if (0 == getNumberOfCameras()) {
//LOGE("There is NO camera!");
ret = RET_ERROR_CAPTURE_NAME;
goto InitDeviceFAILED;
} if (-1 == (cameraId = MatchCameraAuto(cameraId))) {
//LOGE("There is NO camera!");
ret = RET_ERROR_CAPTURE_NAME;
goto InitDeviceFAILED;
} memset(szCameraName, 0x00, 20);
sprintf(szCameraName, "/dev/video%d", cameraId);
//LOGI("camera name is %s.", name);
//
if ((RET_ERROR_FAIL == stat (szCameraName, &st)) || (!S_ISCHR (st.st_mode))
|| (RET_ERROR_FAIL == (fd = open(szCameraName, O_RDWR | O_NONBLOCK, 0))))
{
ret = RET_ERROR_CAPTURE_NAME;
goto InitDeviceFAILED;
} if (RET_ERROR_FAIL == xioctl(fd, VIDIOC_QUERYCAP, &cap) || !(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)
|| !(cap.capabilities & V4L2_CAP_STREAMING))
{
ret = RET_ERROR_CAPTURE_CAPABILITY;
goto InitDeviceFAILED;
} MEMST_VALUE(cropcap);
cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; if(RET_ERROR_OK == xioctl (fd, VIDIOC_CROPCAP, &cropcap))
{
crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
crop.c = cropcap.defrect; /* reset to default */ xioctl(fd, VIDIOC_S_CROP, &crop);
} struct v4l2_format fmt;
MEMST_VALUE (fmt); ret = GetCameraFormat(format);
if(UNKNOWN == ret) {
return RET_ERROR_CAPTURE_FORMAT;
goto InitDeviceFAILED;
} if(RET_ERROR_OK != AdjustV4l2Info(width, height))
{
ret = RET_ERROR_CAPTURE_FORM;
goto InitDeviceFAILED;
} //SetBrightness(capture, capture->brightness);
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
fmt.fmt.pix.width = width;
fmt.fmt.pix.height = height;
fmt.fmt.pix.pixelformat = ret;
fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
printf("%s(%d) pixel format is %d\n", __FUNCTION__,__LINE__,ret); if (RET_ERROR_FAIL == xioctl (fd, VIDIOC_S_FMT, &fmt)) {
ret = RET_ERROR_CAPTURE_FORMAT;
goto InitDeviceFAILED;
} if(RET_ERROR_OK != (ret = InitMmap(fd)) ) {// || RET_ERROR_OK != (ret = SetBrightness(capture, capture->brightness)))
goto InitDeviceFAILED;
} // set video frame rate
struct v4l2_streamparm parm;
parm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (RET_ERROR_OK != ioctl(fd, VIDIOC_G_PARM, &parm)) {
//LOGI("VIDIOC_G_PARM fail....");
} parm.parm.capture.timeperframe.numerator = 1;
parm.parm.capture.timeperframe.denominator = rate;
if (RET_ERROR_OK != ioctl(fd, VIDIOC_S_PARM, &parm)) {
//LOGI("VIDIOC_S_PARM Fail....");
} //check setting of frame rate
memset(&parm, 0x00, sizeof(v4l2_streamparm));
parm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ret = ioctl(fd, VIDIOC_G_PARM, &parm);
if (ret != RET_ERROR_OK) {
//LOGI("VIDIOC_G_PARM fail....");
} capture->fd = fd;
capture->v4l2Info.width = fmt.fmt.pix.width;
capture->v4l2Info.height = fmt.fmt.pix.height;
capture->format = format;
capture->rate = rate;
capture->v4l2Info.length = fmt.fmt.pix.sizeimage;
capture->v4l2Info.stepWidth = fmt.fmt.pix.bytesperline;
capture->v4l2Info.buffer = malloc(fmt.fmt.pix.sizeimage);
strncpy(capture->name, name, sizeof(capture->name)); m_stime = 1000 / rate;
m_capture = capture;
m_InitDevice = true; return ret; InitDeviceFAILED:
if (fd >= 0)
{
close(fd);
fd = -1;
} return ret;
} int V4l2Camera::UninitDevice(V4l2Capture *capture)
{
// TODO Auto-generated function stub
printf("%s(%d)...[BEGIN]\n", __FUNCTION__, __LINE__);
int ret = RET_ERROR_OK;
if (!m_InitDevice) {
return ret;
} else {
m_InitDevice = false;
}
if (m_buffers) {
for (unsigned int i = 0; i < m_nBuffers; ++i) {
printf("%s(%d) munmap() i = %d\n", __FUNCTION__, __LINE__, i);
if (RET_ERROR_FAIL == munmap(m_buffers[i].start, m_buffers[i].length)) {
ret = RET_ERROR_CAPTURE_MUMAP;
break;
}
} if (RET_ERROR_OK == ret) {
printf("%s(%d) free(m_buffers)\n", __FUNCTION__, __LINE__);
free(m_buffers);
}
m_buffers = NULL;
if (capture) {
if (capture->v4l2Info.buffer) {
printf("%s(%d) free(capture->v4l2Info.buffer)\n", __FUNCTION__,
__LINE__);
free(capture->v4l2Info.buffer);
capture->v4l2Info.buffer = NULL;
} if (capture->fd >= 0) {
printf("%s(%d) close(capture->fd)\n", __FUNCTION__, __LINE__);
ret = close(capture->fd);
}
} }
printf("%s(%d)...[END]\n", __FUNCTION__, __LINE__);
return ret;
} int V4l2Camera::SetBrightness(V4l2Capture *capture, unsigned int value)
{
// TODO Auto-generated function stub
if(!capture || value > 10000)
return RET_ERROR_FAIL; struct v4l2_control control;
control.id = V4L2_CID_BRIGHTNESS;
control.value = 255; if(RET_ERROR_FAIL == xioctl(capture->fd, VIDIOC_S_CTRL, &control))
{
return RET_ERROR_FAIL;
}
capture->brightness = control.value; return RET_ERROR_OK;
} int V4l2Camera::StartDevice(int fd)
{
// TODO Auto-generated function stub
if(fd < 0)
return RET_ERROR_FAIL; int ret = RET_ERROR_OK;
for (unsigned int i = 0; i < m_nBuffers; ++i)
{
struct v4l2_buffer buf;
MEMST_VALUE(buf); buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = i; if (RET_ERROR_FAIL == xioctl(fd, VIDIOC_QBUF, &buf))
{
ret = RET_ERROR_CAPTURE_VIDIOC;
break;
}
} enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE; if (RET_ERROR_FAIL == xioctl(fd, VIDIOC_STREAMON, &type))
ret = RET_ERROR_CAPTURE_VIDIOC; return ret;
} int V4l2Camera::StopDevice(int fd)
{
// TODO Auto-generated function stub
enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (RET_ERROR_FAIL == xioctl(fd, VIDIOC_STREAMOFF, &type))
{
return RET_ERROR_FAIL;
}
return 0;
} int V4l2Camera::MSleep(int fd, unsigned int msec)
{
fd_set fds;
FD_ZERO (&fds);
FD_SET (fd, &fds); struct timeval tv;
tv.tv_sec = msec;
tv.tv_usec = 0; return select (fd + 1, &fds, NULL, NULL, &tv);
} int V4l2Camera::GetFrame(V4l2Capture *capture)
{
// TODO Auto-generated function stub
if(!capture)
return RET_ERROR_FAIL; int fd = capture->fd; if(RET_ERROR_FAIL == MSleep(capture->fd, m_stime))
return RET_ERROR_FAIL; struct v4l2_buffer buf; MEMST_VALUE(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.reserved = 0; // Skip the first CONFIG_CAMERA_UVC_INVAL_FRAMECNT video frames
// because they are possibly invalid
if (mUsbCameraIvalidFrameCnt< CONFIG_CAMERA_UVC_INVAL_FRAMECNT) {
mUsbCameraIvalidFrameCnt++;
if(xioctl(fd, VIDIOC_DQBUF, &buf) >= 0) {
xioctl(fd, VIDIOC_QBUF, &buf);
}
return RET_ERROR_OK;
} if(RET_ERROR_FAIL == xioctl(fd, VIDIOC_DQBUF, &buf) || buf.index >= m_nBuffers)
{
return RET_ERROR_FAIL;
} memcpy(capture->v4l2Info.buffer, m_buffers[buf.index].start, buf.bytesused); if(RET_ERROR_FAIL == xioctl(fd, VIDIOC_QBUF, &buf))
return RET_ERROR_FAIL; return RET_ERROR_OK;
}
}//end namespace

  

Linux下实现视频读取的更多相关文章

  1. Linux下实现视频读取(二)---camera參数设定

    Camera的可设置项极多,V4L2 支持了不少.但Sam之前对这些设置的使用方法和涵义都是在看videodev2.h中边看边理解.感觉很生涩. 直到写这篇blog时,才发现v4l2有专门的SPEC来 ...

  2. Linux下实现视频读取(三)---Buffer的准备和数据读取

    前面主要介绍的是:V4L2 的一些设置接口,如亮度,饱和度.曝光时间,帧数,增益.白平衡等.今天看看V4L2 得到数据的几个关键ioctl,Buffer的申请和数据的抓取. 1. 初始化 Memory ...

  3. Linux下的视频字幕编辑

    一.Linux下的字幕编辑软件 常用的有subtitleeditor, gnome-subtitles, gaupol 1.gnome-subtitles:不支持多字幕文件批量处理2.gaupol:全 ...

  4. 嵌入式Linux下MP4视频录制库MP4V2移植和简单介绍

    **************************************************************************************************** ...

  5. Linux下从视频提取音频的方法

    Linux下可以利用mencoder将视频里的音频提取出来.方法如下: 1.首先安装mencoder.对于Ubuntu来说,软件仓库里就有mencoder,可直接输入如下命令安装 sudo apt-g ...

  6. Linux 下 Bash配置文件读取

    Linux安装时可能要修改的配置文件:/etc/profile./etc/bashrc(ubuntu没有这个文件,对应地,其有/etc/bash.bashrc文件.我用的是ubuntu系统,所以下面将 ...

  7. Linux下安装视频转换工具ffmpeg

    ffmpeg下载地址:http://ffmpeg.org/releases/ 1.首先需要安装解码器集合(包含安装ffmpeg用到的所有解码器)下载地址: 链接:https://pan.baidu.c ...

  8. Linux下用C读取配置文件。类似ini这样。

    Introduction ccl is the customizable configuration library, a collection of functions for applicatio ...

  9. 基于Linux的v4l2视频架构驱动编写(转载)

    转自:http://www.linuxidc.com/Linux/2011-03/33022.htm 其实,我刚开始一直都不知道怎么写驱动,什么都不懂的,只知道我需要在做项目的过程中学习,所以,我就自 ...

随机推荐

  1. (转)Openlayers 2.X加载天地图

    http://blog.csdn.net/gisshixisheng/article/details/44621923 概述: 在前面的章节,讲到了Arcgis for js加载天地图,在本节讲述如何 ...

  2. Polymorphism (computer science)

    In programming languages and type theory, polymorphism (from Greek πολύς, polys, "many, much&qu ...

  3. Challenge–response authentication 挑战(询问)应答机制

    In computer security, challenge–response authentication is a family of protocols in which one party ...

  4. mvc重定向

    出处 : https://www.cnblogs.com/lgxlsm/p/5441149.html .重定向方法:Redirect / RedirectToAction / RedirectToRo ...

  5. js消息框

    <script> function del(obj, id) { layer.confirm('是否要删除信息!', { btn: ['确定', '取消'] }, function (in ...

  6. jsonp的作用

    jsonp(即JSON with padding),也就是json填充. 背景: json格式的数据对比xml格式的数据,性能上已经有了很大的提升.但是json可以被本地执行仍然会导致几个重要的性能问 ...

  7. PAT_A1122#Hamiltonian Cycle

    Source: PAT A1122 Hamiltonian Cycle (25 分) Description: The "Hamilton cycle problem" is to ...

  8. (C/C++学习)3.C++中cin的成员函数(cin.get();cin.getine()……)

    说明:流输入运算符,在一定程度上为C++程序的开发提供了很多便利,我们可以避免C语言那种繁琐的输入格式,比如在输入一个数值时,还需指定其格式,而cin以及cout则不需要.但是cin也有一些缺陷,比如 ...

  9. Unity 利用FFmpeg实现录屏、直播推流、音频视频格式转换、剪裁等功能

    目录 一.FFmpeg简介. 二.FFmpeg常用参数及命令. 三.FFmpeg在Unity 3D中的使用. 1.FFmpeg 录屏. 2.FFmpeg 推流. 3.FFmpeg 其他功能简述. 一. ...

  10. [bzoj3307]雨天的尾巴_线段树合并

    雨天的尾巴 bzoj-3307 题目大意:N个点,形成一个树状结构.有M次发放,每次选择两个点x,y对于x到y的路径上(含x,y)每个点发一袋Z类型的物品.完成所有发放后,每个点存放最多的是哪种物品. ...