分析ffmpeg_3.3.2 muxing

1:分析主函数,代码如下:

  

 int main(int argc, char **argv)
{
OutputStream video_st = { }, audio_st = { };
const char *filename;
AVOutputFormat *fmt;
AVFormatContext *oc;
AVCodec *audio_codec, *video_codec;
int ret;
int have_video = , have_audio = ;
int encode_video = , encode_audio = ;
AVDictionary *opt = NULL;
int i; /* Initialize libavcodec, and register all codecs and formats. */
av_register_all(); if (argc < ) {
printf("usage: %s output_file\n"
"API example program to output a media file with libavformat.\n"
"This program generates a synthetic audio and video stream, encodes and\n"
"muxes them into a file named output_file.\n"
"The output format is automatically guessed according to the file extension.\n"
"Raw images can also be output by using '%%d' in the filename.\n"
"\n", argv[]);
return ;
} filename = argv[];
for (i = ; i+ < argc; i+=) {
if (!strcmp(argv[i], "-flags") || !strcmp(argv[i], "-fflags"))
av_dict_set(&opt, argv[i]+, argv[i+], );
} /* allocate the output media context */
avformat_alloc_output_context2(&oc, NULL, NULL, filename);
if (!oc) {
printf("Could not deduce output format from file extension: using MPEG.\n");
avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
}
if (!oc)
return ; fmt = oc->oformat; /* Add the audio and video streams using the default format codecs
* and initialize the codecs. */
if (fmt->video_codec != AV_CODEC_ID_NONE) {
add_stream(&video_st, oc, &video_codec, fmt->video_codec);
have_video = ;
encode_video = ;
}
if (fmt->audio_codec != AV_CODEC_ID_NONE) {
add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
have_audio = ;
encode_audio = ;
} /* Now that all the parameters are set, we can open the audio and
* video codecs and allocate the necessary encode buffers. */
if (have_video)
open_video(oc, video_codec, &video_st, opt); if (have_audio)
open_audio(oc, audio_codec, &audio_st, opt); av_dump_format(oc, , filename, ); /* open the output file, if needed */
if (!(fmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
if (ret < ) {
fprintf(stderr, "Could not open '%s': %s\n", filename,
av_err2str(ret));
return ;
}
} /* Write the stream header, if any. */
ret = avformat_write_header(oc, &opt);
if (ret < ) {
fprintf(stderr, "Error occurred when opening output file: %s\n",
av_err2str(ret));
return ;
} while (encode_video || encode_audio) {
/* select the stream to encode */
if (encode_video &&
(!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,
audio_st.next_pts, audio_st.enc->time_base) <= )) {
encode_video = !write_video_frame(oc, &video_st);
} else {
encode_audio = !write_audio_frame(oc, &audio_st);
}
} /* Write the trailer, if any. The trailer must be written before you
* close the CodecContexts open when you wrote the header; otherwise
* av_write_trailer() may try to use memory that was freed on
* av_codec_close(). */
av_write_trailer(oc); /* Close each codec. */
if (have_video)
close_stream(oc, &video_st);
if (have_audio)
close_stream(oc, &audio_st); if (!(fmt->flags & AVFMT_NOFILE))
/* Close the output file. */
avio_closep(&oc->pb); /* free the stream */
avformat_free_context(oc); return ;
}

2:首先,定义了一个自定义结构体OutputStream,分别来代表一个音频,视频流。

  

 // a wrapper around a single output AVStream
typedef struct OutputStream {
AVStream *st;
AVCodecContext *enc; /* pts of the next frame that will be generated */
int64_t next_pts;
int samples_count; AVFrame *frame;
AVFrame *tmp_frame; float t, tincr, tincr2; struct SwsContext *sws_ctx;
struct SwrContext *swr_ctx;
} OutputStream;

  st是创建的视频,音频流,enc为编码器上下文等等。

3:主函数中首先使用av_register_all()注册所有编码器和格式.

  创建一个输出媒体上下文:avformat_alloc_output_context2(&oc,NULL,NULL,filename);

  创建一个视屏轨道和音频轨道:add_stream(&video_st, oc, &video_codec, fmt->video_codec);

4:分析add_stream函数:

  

 static void add_stream(OutputStream *ost, AVFormatContext *oc,
AVCodec **codec,
enum AVCodecID codec_id)
{
AVCodecContext *c;
int i; /* find the encoder */
*codec = avcodec_find_encoder(codec_id);
if (!(*codec)) {
fprintf(stderr, "Could not find encoder for '%s'\n",
avcodec_get_name(codec_id));
exit();
} ost->st = avformat_new_stream(oc, NULL);
if (!ost->st) {
fprintf(stderr, "Could not allocate stream\n");
exit();
}
ost->st->id = oc->nb_streams-;
c = avcodec_alloc_context3(*codec);
if (!c) {
fprintf(stderr, "Could not alloc an encoding context\n");
exit();
}
ost->enc = c; switch ((*codec)->type) {
case AVMEDIA_TYPE_AUDIO:
c->sample_fmt = (*codec)->sample_fmts ?
(*codec)->sample_fmts[] : AV_SAMPLE_FMT_FLTP;
c->bit_rate = ;
c->sample_rate = ;
if ((*codec)->supported_samplerates) {
c->sample_rate = (*codec)->supported_samplerates[];
for (i = ; (*codec)->supported_samplerates[i]; i++) {
if ((*codec)->supported_samplerates[i] == )
c->sample_rate = ;
}
}
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
c->channel_layout = AV_CH_LAYOUT_STEREO;
if ((*codec)->channel_layouts) {
c->channel_layout = (*codec)->channel_layouts[];
for (i = ; (*codec)->channel_layouts[i]; i++) {
if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
c->channel_layout = AV_CH_LAYOUT_STEREO;
}
}
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
ost->st->time_base = (AVRational){ , c->sample_rate };
break; case AVMEDIA_TYPE_VIDEO:
c->codec_id = codec_id; c->bit_rate = ;
/* Resolution must be a multiple of two. */
c->width = ;
c->height = ;
/* timebase: This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented. For fixed-fps content,
* timebase should be 1/framerate and timestamp increments should be
* identical to 1. */
ost->st->time_base = (AVRational){ , STREAM_FRAME_RATE };
c->time_base = ost->st->time_base; c->gop_size = ; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
/* just for testing, we also add B-frames */
c->max_b_frames = ;
}
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
/* Needed to avoid using macroblocks in which some coeffs overflow.
* This does not happen with normal video, it just happens here as
* the motion of the chroma plane does not match the luma plane. */
c->mb_decision = ;
}
break; default:
break;
} /* Some formats want stream headers to be separate. */
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}

5:创建完音频,视屏轨道后,则打开视频,音频编码器,准备编码数据 open_video(oc, video_codec, &video_st, opt);

  

 static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
{
int ret;
AVCodecContext *c = ost->enc;
AVDictionary *opt = NULL; av_dict_copy(&opt, opt_arg, ); /* open the codec */
ret = avcodec_open2(c, codec, &opt);
av_dict_free(&opt);
if (ret < ) {
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
exit();
} /* allocate and init a re-usable frame */
ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
if (!ost->frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit();
} /* If the output format is not YUV420P, then a temporary YUV420P
* picture is needed too. It is then converted to the required
* output format. */
ost->tmp_frame = NULL;
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
if (!ost->tmp_frame) {
fprintf(stderr, "Could not allocate temporary picture\n");
exit();
}
} /* copy the stream parameters to the muxer */
ret = avcodec_parameters_from_context(ost->st->codecpar, c);
if (ret < ) {
fprintf(stderr, "Could not copy the stream parameters\n");
exit();
}
}

注意:static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height),创建一个指定格式,宽和高的帧。

  tmp_frame:如果格式不是YUV420P,则该帧用于转换格式。否则直接使用frame则可。

 static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
{
AVFrame *picture;
int ret; picture = av_frame_alloc();
if (!picture)
return NULL; picture->format = pix_fmt;
picture->width = width;
picture->height = height; /* allocate the buffers for the frame data */
ret = av_frame_get_buffer(picture, );
if (ret < ) {
fprintf(stderr, "Could not allocate frame data.\n");
exit();
} return picture;
}

6:打开输出文件:

  

 if (!(fmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
if (ret < ) {
fprintf(stderr, "Could not open '%s': %s\n", filename,
av_err2str(ret));
return ;
}
}

7:写入文件头

  

 ret = avformat_write_header(oc, &opt);
if (ret < ) {
fprintf(stderr, "Error occurred when opening output file: %s\n",
av_err2str(ret));
return ;
}

8:循环编码写入数据

  

 while (encode_video || encode_audio) {
/* select the stream to encode */
if (encode_video &&
(!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,
audio_st.next_pts, audio_st.enc->time_base) <= )) {
encode_video = !write_video_frame(oc, &video_st);
} else {
encode_audio = !write_audio_frame(oc, &audio_st);
}
}

  分析static int write_video_frame(AVFormatContext *oc, OutputStream *ost),编码一帧视屏,并且送入muxer(复合器),编码完成返回1,否则返回0

  

static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
{
int ret;
AVCodecContext *c;
AVFrame *frame;
int got_packet = ;
AVPacket pkt = { }; c = ost->enc; frame = get_video_frame(ost); av_init_packet(&pkt); /* encode the image */
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
if (ret < ) {
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
exit();
} if (got_packet) {
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
} else {
ret = ;
} if (ret < ) {
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
exit();
} return (frame || got_packet) ? : ;
}

  创建一帧数据:frame = get_video_frame(ost);

  

 static AVFrame *get_video_frame(OutputStream *ost)
{
AVCodecContext *c = ost->enc; /* check if we want to generate more frames */
if (av_compare_ts(ost->next_pts, c->time_base,
STREAM_DURATION, (AVRational){ , }) >= )
return NULL; /* when we pass a frame to the encoder, it may keep a reference to it
* internally; make sure we do not overwrite it here */
if (av_frame_make_writable(ost->frame) < )
exit(); if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
/* as we only generate a YUV420P picture, we must convert it
* to the codec pixel format if needed */
if (!ost->sws_ctx) {
ost->sws_ctx = sws_getContext(c->width, c->height,
AV_PIX_FMT_YUV420P,
c->width, c->height,
c->pix_fmt,
SCALE_FLAGS, NULL, NULL, NULL);
if (!ost->sws_ctx) {
fprintf(stderr,
"Could not initialize the conversion context\n");
exit();
}
}
fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
sws_scale(ost->sws_ctx,
(const uint8_t * const *)ost->tmp_frame->data, ost->tmp_frame->linesize,
, c->height, ost->frame->data, ost->frame->linesize);
} else {
fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
} ost->frame->pts = ost->next_pts++; return ost->frame;
}

  使用static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)填充一帧数据。

  

 static void fill_yuv_image(AVFrame *pict, int frame_index,
int width, int height)
{
int x, y, i; i = frame_index; /* Y */
for (y = ; y < height; y++)
for (x = ; x < width; x++)
pict->data[][y * pict->linesize[] + x] = x + y + i * ; /* Cb and Cr */
for (y = ; y < height / ; y++) {
for (x = ; x < width / ; x++) {
pict->data[][y * pict->linesize[] + x] = + y + i * ;
pict->data[][y * pict->linesize[] + x] = + x + i * ;
}
}
}

  当编码成功后,使用自定义函数 write_frame(oc, &c->time_base, ost->st, &pkt),将一帧数据包写入上下文。

  

 static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
{
/* rescale output packet timestamp values from codec to stream timebase */
av_packet_rescale_ts(pkt, *time_base, st->time_base);
pkt->stream_index = st->index; /* Write the compressed frame to the media file. */
log_packet(fmt_ctx, pkt);
return av_interleaved_write_frame(fmt_ctx, pkt);
}

9:写尾部,尾部必须在关闭编码器上下文之前写入:

  

 av_write_trailer(oc);

10:最后编码每个编码器和输出文件,释放上下文

  

 /* Close each codec. */
if (have_video)
close_stream(oc, &video_st);
if (have_audio)
close_stream(oc, &audio_st); if (!(fmt->flags & AVFMT_NOFILE))
/* Close the output file. */
avio_closep(&oc->pb); /* free the stream */
avformat_free_context(oc);

11:close_stream的实现如下:

  

 static void close_stream(AVFormatContext *oc, OutputStream *ost)
{
avcodec_free_context(&ost->enc);
av_frame_free(&ost->frame);
av_frame_free(&ost->tmp_frame);
sws_freeContext(ost->sws_ctx);
swr_free(&ost->swr_ctx);
}

ffmpeg 编码(视屏)的更多相关文章

  1. FFmpeg + php 视屏转换

    什么是FFmpeg? FFmpeg是一个开源免费跨平台的视频和音频流方案,属于自由软件,采用LGPL或GPL许可证(依据你选择的组件).它提供了录制.转换以及流化音视频的完整解决方案.它包含了非常先进 ...

  2. 加入ffmpeg播放视屏

    下面的字反了..,另外没声音 2018-4-28 前段时间已经做的差不多了,音频的pack取出来用openAL播放,并实现了视屏同步播放,并且支持unity 现在的问题就是支持大分辨率视屏播放的问题, ...

  3. 读取视屏文件,保存帧图片为ppm文件

    ffmpeg跟sdl的学习过程:一.版本信息:ffmpeg-3.0.2.tar.bz2SDL2-2.0.4.tar.gz二.编译过程:1.ffmgeg的编译:./configure --enable- ...

  4. wndows程序设计之书籍知识与代码摘录-获取视屏显示器像素等参数GetsystemMetrics

    以下的代码段用于获取视屏显示器的高度宽度,以像素为单位. int sxScreen, cyScreen; cxScreen = GetSystemMetrics (SM_CXSCREEN); cySc ...

  5. Android中使用SurfaceView+MediaPlayer+自定义的MediaController实现自定义的视屏播放器

    效果图如下: (PS本来是要给大家穿gif动态图的,无奈太大了,没法上传) 功能实现:暂停,播放,快进,快退,全屏,退出全屏,等基本功能 实现的思路: 在主布局中放置一个SurfaceView,在Su ...

  6. opencv视屏流嵌入wxpython框架

    前几篇博客分享搭建人脸识别与情绪判断的环境和源码,但是没有UI,界面很难看,一打开就是opencv弹出的一个视屏框.处女座的我看着非常难受,于是决定做一个UI,稍微规矩好看一点,再怎么说,这样的话也算 ...

  7. Android视屏播放兼容性问题分享

    最近产品提了一个紧急需求:webview加载的URL,需要支持视频播放. 为了快速完成需求,功能实现上直接使用系统自带播放器播放视频.由于是自带播放器,需要进行兼容性测试,过程发现了不少问题,这里分享 ...

  8. PS学习之制作音乐视屏

    素材: 新建画布 插入图片素材 调整和画布一样大小 喜欢彩色的 可以加照片滤镜 喜欢黑白的可以加黑白滤镜 也可以添加自己喜欢的文字 在窗口中选择时间轴 创建视屏时间轴 图中标记得就是每秒能播放30张 ...

  9. MPEG-1视屏压缩标准

    MPEG-1标准包括5个部分 图像的四种类型: I帧: B帧:双向帧间预测 P帧: D帧:只含有16分量,为快放设计 压缩前需要帧重排 视屏码流结构 I帧压缩 p帧压缩 b帧压缩 其他压缩算法 MPE ...

随机推荐

  1. 【转载】Java枚举类型的使用

    枚举类型概念 package com.lxq.enumm; public class EnumDemoOne { private enum InnerEnum { RED, GREEN, YELLOW ...

  2. sparksql与hive整合

    参考文献: http://blog.csdn.net/stark_summer/article/details/48443147 hive配置 编辑 $HIVE_HOME/conf/Hive-site ...

  3. DBWR进程

    --查询dbwr进程号 select pname,spid from v$process where pname like 'DBW%'; PNAME SPID----- -------------- ...

  4. Texas Instruments matrix-gui-2.0 hacking -- submenu.php

    <?php /* * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ * * * Redistrib ...

  5. OpenCV-bwLabel-实现图像连通组件标记与分析

    OpenCV实现图像连通组件标记与分析- matlab bwLabel; code: #include <opencv2/opencv.hpp> #include <iostream ...

  6. Redis的持久化策略

    Redis 持久化: 提供了多种不同级别的持久化方式:一种是RDB,另一种是AOF. RDB 持久化可以在指定的时间间隔内生成数据集的时间点快照(point-in-time snapshot). AO ...

  7. Lua基本语法-lua与C#的交互(相当简单详细的例子)

    lua脚本 与 C#的交互 本文提供全流程,中文翻译.Chinar坚持将简单的生活方式,带给世人!(拥有更好的阅读体验 -- 高分辨率用户请根据需求调整网页缩放比例) 1 Lua And C# -- ...

  8. js操作链接url

    使用js对当前的URL进行操作,可以使用内置对象window.location: window.location有以下属性: window.location.href:取得当前地址栏中的完整URL,可 ...

  9. 使用 C# 编写简易 ASP.NET Web 服务器 ---- 模拟IIS的处理过程

    如果你想获得更好的阅读体验,可以前往我在 github 上的博客进行阅读,http://lcomplete.github.io/blog/2013/07/16/use-csharp-write-asp ...

  10. 03.将MPP部署到开发板上

    转载侵删 在一般的嵌入式开发中,只要将uboot,kernel,rootfs下载到开发板上,就可以进行程序开发了.但是海思又进一步的把一些常用视频编解码算法等封装到MPP平台中,进一步简化了工程师的开 ...