FFmpeg滤镜代码级分析
http://blog.chinaunix.net/uid-26000296-id-3322071.html
前一篇文章《为FFmpeg添加自定义滤镜》详细讲述了FFmpeg的滤镜添加步骤,并给出了代码实例。
本文将以FFmpeg自带的deinterlace滤镜”yadif – yet another deinterlace filter”为例分析
FFmpeg滤镜的代码级实现机制。
总的来说,FFmpeg的滤镜机制和MicroSoft Directshow机制基本相同——不知道谁学了谁的,还是程
序员所见略同:
构建机制上:FFmpeg自己实现了统一的对象模型,DShow是COM的实现;
框架实现上:都使用filter graph来管理滤镜;
连接方式上:FFmpeg使用Pad结构体,DShow使用Pin,两者都是分配器的机制;
下面是以层级来分析代码:
FFmpeg转码层的伪码如下所示,红色粗体是和filter相关的函数:
点击(此处)折叠或打开
- int transcode(AVFormatContext **output_files,
- int nb_output_files,
- AVInputFile *input_files,
- int nb_input_files,
- AVStreamMap *stream_maps,
- int nb_stream_maps)
- {
- …
- 解码参数计算;
- #if CONFIG_AVFILTER
- if (configure_video_filters(ist, ost)) {
- fprintf(stderr, "Error opening filters!\n");
- ret = AVERROR(EINVAL);
- goto fail;
- }
- #endif
- 编码参数计算;
- output_packet(ist, ist_index, ost_table,
- nb_ostreams, &pkt, &errorflag);
- ……
- return ret;
- }
函数configure_video_filters是用来初始化filter graph 和filter本身的初始化;
函数output_packet是FFmpeg整个解码,滤镜处理,编码调用的实现。
1. 先来看configure_video_filters的实现:
点击(此处)折叠或打开
- int configure_video_filters(AVInputStream *ist,
- AVOutputStream *ost)
- {
- // 类似于directshow一样初始化filter graph等;
- ost->graph = avfilter_graph_alloc();
- …
- avfilter_graph_parse(ost->graph, ost->avfilter,
- &inputs, &outputs, NULL)
- // 检查filter支持的媒体类型
- avfilter_graph_config(ost->graph, NULL);
- 。。。
- return 0;
- }
filter graph和filter的初始化:
点击(此处)折叠或打开
- int avfilter_graph_parse(AVFilterGraph *graph,
- const char *filters,
- AVFilterInOut **open_inputs,
- AVFilterInOut **open_outputs,
- void *log_ctx)
- {
- do {
- parse_inputs(&filters, &curr_inputs, open_outputs, log_ctx);
- parse_filter(&filter, &filters, graph, index, log_ctx);
- if (filter->input_count == 1 && !curr_inputs && !index) {
- /* First input can be omitted if it is "[in]" */
- const char *tmp = "[in]";
- if ((ret = parse_inputs(&tmp, &curr_inputs, open_outputs,
- log_ctx)) < 0)
- goto fail;
- }
- link_filter_inouts(filter, &curr_inputs, open_inputs, log_ctx);
- parse_outputs(&filters, &curr_inputs, open_inputs,
- open_outputs, log_ctx);
- filters += strspn(filters, WHITESPACES);
- chr = *filters++;
- index++;
- } while (chr == ',' || chr == ';');
- if (open_inputs && *open_inputs &&
- !strcmp((*open_inputs)->name, ""out"") && curr_inputs) {
- /* Last output can be omitted if it is "[out]" */
- const char *tmp = "[out]";
- if ((ret = parse_outputs(&tmp, &curr_inputs, open_inputs,
- open_outputs, log_ctx)) < 0)
- goto fail;
- }
- return 0;
- }
点击(此处)折叠或打开
- static int parse_filter(AVFilterContext **filt_ctx,
- const char **buf, AVFilterGraph *graph,"
- int index, void *log_ctx)
- {
- char *opts = NULL;
- char *name = av_get_token(buf, "=,;[\n");
- int ret;
- if (**buf == '=') {
- (*buf)++;
- opts = av_get_token(buf, "[],;\n");
- }
- ret = create_filter(filt_ctx, graph, index, name, opts, log_ctx);
- av_free(name);
- av_free(opts);
- return ret;
- }
到了真正的Filter graph初始化,这部分和DShow很相似:
点击(此处)折叠或打开
- int create_filter(AVFilterContext **filt_ctx,
- AVFilterGraph *ctx, int index,
- const char *filt_name, const char *args,
- void *log_ctx)
- {
- AVFilter *filt;
- char inst_name[30];
- char tmp_args[256];
- int ret;
- // 注册当前filter到静态数组中
- filt = avfilter_get_by_name(filt_name);
- // 分配并初始化输入输出Pad
- ret = avfilter_open(filt_ctx, filt, inst_name);
- // 将当前filter添加到filter graph中
- ret = avfilter_graph_add_filter(ctx, *filt_ctx);
- // 通过函数指针,调用当前filter的初始化函数
- ret = avfilter_init_filter(*filt_ctx, args, NULL);
- return 0;
- }
点击(此处)折叠或打开
- int avfilter_init_filter(AVFilterContext *filter,
- const char *args,
- void *opaque)
- {
- int ret=0;
- if (filter->filter->init)
- ret = filter->filter->init(filter, args, opaque);
- return ret;
- }
点击(此处)折叠或打开
- static av_cold int init(AVFilterContext *ctx,
- const char *args,
- void *opaque)
- {
- YADIFContext *yadif = ctx->priv;
- av_unused int cpu_flags = av_get_cpu_flags();
- yadif->mode = 0;
- yadif->parity = -1;
- yadif->csp = NULL;
- // 滤镜函数指针初始化赋值
- yadif->filter_line = filter_line_c;
- return 0;
- }
2. 接着来看两个filter的Pad连接时的媒体类型的协商机制
点击(此处)折叠或打开
- int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
- {
- int ret;
- if ((ret = ff_avfilter_graph_check_validity(graphctx, log_ctx)))
- return ret;
- // 检查Pad支持的媒体类型
- if ((ret = ff_avfilter_graph_config_formats(graphctx, log_ctx)))
- return ret;
- if ((ret = ff_avfilter_graph_config_links(graphctx, log_ctx)))
- return ret;
- return 0;
- }
检查Pad支持的媒体类型 :
点击(此处)折叠或打开
- int ff_avfilter_graph_config_formats(AVFilterGraph *graph,
- AVClass *log_ctx)
- {
- int ret;
- /* find supported formats from sub-filters, and merge along links */
- if ((ret = query_formats(graph, log_ctx)) < 0)
- return ret;
- /* Once everything is merged, it's possible that we'll still have
- * multiple valid media format choices. We pick the first one. */
- pick_formats(graph);
- return 0;
- }
点击(此处)折叠或打开
- static int query_formats(AVFilterGraph *graph,
- AVClass *log_ctx)
- {
- int i, j, ret;
- int scaler_count = 0;
- char inst_name[30];
- /* ask all the sub-filters for their supported media formats */
- for (i = 0; i < graph->filter_count; i++) {
- if (graph->filters[i]->filter->query_formats)
- graph->filters[i]->filter->query_formats(graph->filters[i]);
- else
- avfilter_default_query_formats(graph->filters[i]);
- }
- /* go through and merge as many format lists as possible */
- for (i = 0; i < graph->filter_count; i++) {
- …
- }
- return 0;
- }
点击(此处)折叠或打开
- /* 检查连接用Pad支持的媒体类型 */
- static int query_formats(AVFilterContext *ctx)
- {
- static const enum PixelFormat pix_fmts[] = {
- PIX_FMT_YUV420P,
- PIX_FMT_YUV422P,
- PIX_FMT_YUV444P,
- PIX_FMT_YUV410P,
- PIX_FMT_YUV411P,
- PIX_FMT_GRAY8,
- PIX_FMT_YUVJ420P,
- PIX_FMT_YUVJ422P,
- PIX_FMT_YUVJ444P,
- AV_NE( PIX_FMT_GRAY16BE, PIX_FMT_GRAY16LE ),
- PIX_FMT_YUV440P,
- PIX_FMT_YUVJ440P,
- AV_NE( PIX_FMT_YUV420P16BE, PIX_FMT_YUV420P16LE ),
- AV_NE( PIX_FMT_YUV422P16BE, PIX_FMT_YUV422P16LE ),
- AV_NE( PIX_FMT_YUV444P16BE, PIX_FMT_YUV444P16LE ),
- PIX_FMT_NONE
- };
- avfilter_set_common_pixel_formats(ctx,
- avfilter_make_format_list(pix_fmts));
- return 0;
- }
3. 最后来看滤镜的连接和对每帧图像处理时的调用
点击(此处)折叠或打开
- int output_packet(AVInputStream *ist, int ist_index,
- AVOutputStream **ost_table,
- int nb_ostreams,
- const AVPacket *pkt,
- int *errorflag)
- {
- /* decode the packet if needed */
- if (ist->decoding_needed) {
- 解码;
- #if CONFIG_AVFILTER
- if(ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
- if (start_time == 0 || ist->pts >= start_time) {
- for(i=0;i<nb_ostreams;i++) {
- ost = ost_table[i];
- if (ost->input_video_filter && ost->source_index == ist_index) {
- if (!picture.sample_aspect_ratio.num)
- picture.sample_aspect_ratio = ist->st->sample_aspect_ratio;
- picture.pts = ist->pts;
- av_vsrc_buffer_add_frame(ost->input_video_filter, &picture, AV_VSRC_BUF_FLAG_OVERWRITE);
- }
- }
- }
- #endif
- #if CONFIG_AVFILTER
- frame_available = ist->st->codec->codec_type
- != AVMEDIA_TYPE_VIDEO ||
- !ost->output_video_filter ||
- avfilter_poll_frame(ost->output_video_filter->inputs[0]);
- while (frame_available) {
- if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
- ost->output_video_filter) {
- AVRational ist_pts_tb = ost->output_video_filter->inputs[0]->time_base;
- if (av_vsink_buffer_get_video_buffer_ref(ost->output_video_filter,
- &ost->picref, 0) < 0)
- goto cont;
- if (ost->picref) {
- AVRational tempVar = {1, AV_TIME_BASE};
- avfilter_fill_frame_from_video_buffer_ref(&picture, ost->picref);
- //ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
- ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, tempVar);//modify by chenrui
- }
- }
- #endif
- 编码;
- return 0;
- fail_decode:
- return -1;
- }
- }
- }
先来看滤镜的连接:
点击(此处)折叠或打开
- int av_vsrc_buffer_add_frame(AVFilterContext *buffer_src,
- const AVFrame *frame, int flags)
- {
- // 从帧列表中得到当前帧
- AVFilterBufferRef *picref =
- avfilter_get_video_buffer_ref_from_frame(frame,
- AV_PERM_WRITE);
- if (!picref)
- return AVERROR(ENOMEM);
- ret = av_vsrc_buffer_add_video_buffer_ref(buffer_src,
- picref, flags);
- picref->buf->data[0] = NULL;
- avfilter_unref_buffer(picref);
- return ret;
- }
- int av_vsrc_buffer_add_video_buffer_ref(AVFilterContext *buffer_filter,
- AVFilterBufferRef *picref, int flags)"
- {
- 智能插入相应的filter;
- c->picref = avfilter_get_video_buffer(outlink, AV_PERM_WRITE,
- picref->video->w, picref->video->h);
- av_image_copy(c->picref->data, c->picref->linesize,
- picref->data, picref->linesize,
- picref->format, picref->video->w, picref->video->h);
- avfilter_copy_buffer_ref_props(c->picref, picref);
- return 0;
- }
- AVFilterBufferRef *avfilter_get_video_buffer(AVFilterLink *link,
- int perms, int w, int h)
- {
- AVFilterBufferRef *ret = NULL;
- if (link->dstpad->get_video_buffer)
- ret = link->dstpad->get_video_buffer(link, perms, w, h);
- if (!ret)
- ret = avfilter_default_get_video_buffer(link, perms, w, h);
- if (ret)
- ret->type = AVMEDIA_TYPE_VIDEO;
- return ret;
- }
- AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms,
- int w, int h)
- {
- AVFilterBufferRef *picref;
- int width = FFALIGN(w, 32);
- int height= FFALIGN(h+2, 32);
- int i;
- picref = avfilter_default_get_video_buffer(link, perms, width, height);
- picref->video->w = w;
- picref->video->h = h;
- for (i = 0; i < 3; i++)
- picref->data[i] += picref->linesize[i];
- return picref;
- }
最后来看图像处理函数的实现:
点击(此处)折叠或打开
- int avfilter_poll_frame(AVFilterLink *link)
- {
- int i, min = INT_MAX;
- if (link->srcpad->poll_frame)
- return link->srcpad->poll_frame(link);
- for (i = 0; i < link->src->input_count; i++) {
- int val;
- if (!link->src->inputs[i])
- return -1;
- val = avfilter_poll_frame(link->src->inputs[i]);
- min = FFMIN(min, val);
- }
- return min;
- }
- int poll_frame(AVFilterLink *link)
- {
- YADIFContext *yadif = link->src->priv;
- int ret, val;
- if (yadif->frame_pending)
- return 1;
- val = avfilter_poll_frame(link->src->inputs[0]);
- if (val==1 && !yadif->next) {
- //FIXME change API to not requre this red tape
- if ((ret = avfilter_request_frame(link->src->inputs[0])) < 0)
- return ret;
- val = avfilter_poll_frame(link->src->inputs[0]);
- }
- assert(yadif->next || !val);
- return val * ((yadif->mode&1)+1);
- }
- int avfilter_request_frame(AVFilterLink *link)
- {
- if (link->srcpad->request_frame)
- return link->srcpad->request_frame(link);
- else if (link->src->inputs[0])
- return avfilter_request_frame(link->src->inputs[0]);
- else return -1;
- }
- static int request_frame(AVFilterLink *link)
- {
- BufferSourceContext *c = link->src->priv;
- if (!c->picref) {
- av_log(link->src, AV_LOG_WARNING,
- "request_frame() called with no available frame!\n");
- return AVERROR(EINVAL);
- }
- avfilter_start_frame(link, avfilter_ref_buffer(c->picref, ~0));
- avfilter_draw_slice(link, 0, link->h, 1);
- avfilter_end_frame(link);
- avfilter_unref_buffer(c->picref);
- c->picref = NULL;
- return 0;
- }
完结。
FFmpeg滤镜代码级分析的更多相关文章
- FFmpeg滤镜实现区域视频增强 及 D3D实现视频播放区的拉大缩小
1.区域视频增强 FFmpeg滤镜功能十分强大,用滤镜可以实现视频的区域增强功能. 用eq滤镜就可以实现亮度.对比度.饱和度等的常用视频增强功能. 推荐两篇写得不错的博文: (1)ffmpeg综合应用 ...
- MapReduce的ReduceTask任务的运行源码级分析
MapReduce的MapTask任务的运行源码级分析 这篇文章好不容易恢复了...谢天谢地...这篇文章讲了MapTask的执行流程.咱们这一节讲解ReduceTask的执行流程.ReduceTas ...
- MapReduce的MapTask任务的运行源码级分析
TaskTracker任务初始化及启动task源码级分析 这篇文章中分析了任务的启动,每个task都会使用一个进程占用一个JVM来执行,org.apache.hadoop.mapred.Child方法 ...
- 监听器初始化Job、JobTracker相应TaskTracker心跳、调度器分配task源码级分析
JobTracker和TaskTracker分别启动之后(JobTracker启动流程源码级分析,TaskTracker启动过程源码级分析),taskTracker会通过心跳与JobTracker通信 ...
- TableInputFormat分片及分片数据读取源码级分析
我们在MapReduce中TextInputFormat分片和读取分片数据源码级分析 这篇中以TextInputFormat为例讲解了InputFormat的分片过程以及RecordReader读取分 ...
- MapReduce中TextInputFormat分片和读取分片数据源码级分析
InputFormat主要用于描述输入数据的格式(我们只分析新API,即org.apache.hadoop.mapreduce.lib.input.InputFormat),提供以下两个功能: (1) ...
- TaskTracker任务初始化及启动task源码级分析
在监听器初始化Job.JobTracker相应TaskTracker心跳.调度器分配task源码级分析中我们分析的Tasktracker发送心跳的机制,这一节我们分析TaskTracker接受JobT ...
- FFmpeg资料来源简单分析:libswscale的sws_getContext()
===================================================== FFmpeg库函数的源代码的分析文章: [骨架] FFmpeg源码结构图 - 解码 FFmp ...
- FFmpeg源码简单分析:libswscale的sws_scale()
===================================================== FFmpeg的库函数源码分析文章列表: [架构图] FFmpeg源码结构图 - 解码 FFm ...
随机推荐
- Usaco_Contest_2013_Open_Bovine Problem 1. Bovine Ballet
Problem 1: Bovine Ballet [Brian Dean, 2013] In an attempt to challenge the stereotypical perception ...
- Ext grid单元格编辑时获取获取Ext.grid.column.Column
item2.width = 80; //item2.flex = 1; item2.align = 'center'; item2.menuDisabled = true; //禁止显示列头部右侧菜单 ...
- Java中使用new Date()和System.currentTimeMillis()获取当前时间戳的区别(转)(Java进阶-性能提升)
在开发过程中,通常很多人都习惯使用new Date()来获取当前时间,使用起来也比较方便,同时还可以获取与当前时间有关的各方面信息,例如获取小时,分钟等等,而且还可以格式化输出,包含的信息是比较丰富的 ...
- P2P技术简介(包括BT软件的分析)(转)
这是一篇别人发表的论文,里面很全面的解释了P2P技术的实现,以及BT网络中应用P2P技术所设计的原理,并列举BT软件的一些专业名词的定义.由于论文发表的比较早,2005年时还没有DHT技术. (链接: ...
- android_浅析canvas的save()和restore()方法
<span style="font-size:18px;"> </span> <span style="font-size:18px;&qu ...
- 转置卷积&&膨胀卷积
Convolution arithmetic tutorial theano Convolution arithmetric github 如何理解深度学习中的deconvolution networ ...
- Sublime Text 2搭建Go开发环境
Sublime Text 2搭建Go开发环境,代码提示+补全+调试 cceevv · 2014-10-11 00:00:06 · 10496 次点击 · 预计阅读时间 3 分钟 · 5分钟之前 开始浏 ...
- 怎么在SQL查询的结果里加行号?
怎么在SQL查询的结果里加行号? 学习了:https://zhidao.baidu.com/question/91188037.html mysql : ) as rowNo From a, () ) ...
- 无线热点登陆认证原理探究---captive portal
什么是Captive Portal 大家肯定都连过公共场所的wifi热点,比如麦当劳等地方的.他们的wifi往往一连上去就会弹出一个要求登录或者微信关注之类的页面,只有在这个页面完成操作了才能正常访问 ...
- linux查看进程、端口
1 查看进程pidps -ef|grep tomcat 2 查看进程占用的端口netstat -ntlp|grep pid 3 查看端口对应的进程号lsof -i:portid