1、avformat_open_input

该方法声明在libavformat/avformat.h:2093

int avformat_open_input(AVFormatContext **ps, const char *url,
const AVInputFormat *fmt, AVDictionary **options);

方法实现位于libavformat/demux.c:207,该方法主要用来选择IO以及解复用组件,其中有几个关键方法:

1.1、init_input,该方法用来探测合适的IO和demux组件

1.2、调用AVInputFormat的read_header方法来尝试读取Stream的相关信息

int avformat_open_input(AVFormatContext **ps, const char *filename,
const AVInputFormat *fmt, AVDictionary **options)
{
AVFormatContext *s = *ps;
FFFormatContext *si;
AVDictionary *tmp = NULL;
ID3v2ExtraMeta *id3v2_extra_meta = NULL;
int ret = 0;

// 如果传入得AVFormatContext对象为空则在内部创建一个
if (!s && !(s = avformat_alloc_context()))
return AVERROR(ENOMEM);
// 通过强转来获得一个FFFormatContext类型的指针,记录了AVFormatContext得状态信息
si = ffformatcontext(s);
if (!s->av_class) {
av_log(NULL, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");
return AVERROR(EINVAL);
}
// 是否指定AVInputFormat,如果指定的话就使用指定的Format
if (fmt)
s->iformat = fmt;

// 如果有参数选项传递则拷贝选项
if (options)
av_dict_copy(&tmp, *options, 0);

// 这个pb的类型为AVIOContext,pb不为null表示使用自定义的IO
if (s->pb) // must be before any goto fail
s->flags |= AVFMT_FLAG_CUSTOM_IO;

// 将参数设置给AVFormatContext
if ((ret = av_opt_set_dict(s, &tmp)) < 0)
goto fail;
   // 拷贝url到AVFormatContext中
if (!(s->url = av_strdup(filename ? filename : ""))) {
ret = AVERROR(ENOMEM);
goto fail;
}
//////////////////////////////////////////////////////////////////////////
// 1.1、调用init_input方法来初始化,主要做了两件事选择AVIOContext、选择AVInputFormat
if ((ret = init_input(s, filename, &tmp)) < 0)
goto fail;
   // 探测得分记录到AVFormatContext得probe_score当中
s->probe_score = ret;
//////////////////////////////////////////////////////////////////////////

// 拷贝白名单到AVFormatContext得protocol_whitelist中
if (!s->protocol_whitelist && s->pb && s->pb->protocol_whitelist) {
s->protocol_whitelist = av_strdup(s->pb->protocol_whitelist);
if (!s->protocol_whitelist) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
// 拷贝黑名单到AVFormatContext得protocol_blacklist中
if (!s->protocol_blacklist && s->pb && s->pb->protocol_blacklist) {
s->protocol_blacklist = av_strdup(s->pb->protocol_blacklist);
if (!s->protocol_blacklist) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
// 检查探测到得格式必须要在白名单当中
if (s->format_whitelist && av_match_list(s->iformat->name, s->format_whitelist, ',') <= 0) {
av_log(s, AV_LOG_ERROR, "Format not on whitelist \'%s\'\n", s->format_whitelist);
ret = AVERROR(EINVAL);
goto fail;
}

// 设置AVIOContext内部缓冲区跳过初始字节
avio_skip(s->pb, s->skip_initial_bytes);

// 检查文件名是否图片序列中的一个
/* Check filename in case an image number is expected. */
if (s->iformat->flags & AVFMT_NEEDNUMBER) {
if (!av_filename_number_test(filename)) {
ret = AVERROR(EINVAL);
goto fail;
}
}

// 初始化文件的时长和起始时间为AV_NOPTS_VALUE 0x8000000000000000
s->duration = s->start_time = AV_NOPTS_VALUE;

// 如果AVInputFormat带有private data,那么为AVFormatContext的priv_data分配内存
/* Allocate private data. */
if (s->iformat->priv_data_size > 0) {
if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
ret = AVERROR(ENOMEM);
goto fail;
}
if (s->iformat->priv_class) {
// 将AVInputFormat的priv_class拷贝给AVFormatContext的priv_data
*(const AVClass **) s->priv_data = s->iformat->priv_class;
// 初始化AVFormatCOntext私有数据的其他成员
       av_opt_set_defaults(s->priv_data);
// 给私有数据设置用户选项
if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
goto fail;
}
}

// 如果创建了AVIOContext,读取id3信息
/* e.g. AVFMT_NOFILE formats will not have an AVIOContext */
if (s->pb)
ff_id3v2_read_dict(s->pb, &si->id3v2_meta, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);

// 如果AVInputFormat的read_header方法不为NULL,那么就去调用read_header读取header中的内容
if (s->iformat->read_header)
///////////////////////////////////////////////////
// 1.2、调用read_header方法读取stream相关信息
if ((ret = s->iformat->read_header(s)) < 0) {
if (s->iformat->flags_internal & FF_FMT_INIT_CLEANUP)
goto close;
goto fail;
}
///////////////////////////////////////////////////

// 处理meta与id3信息
if (!s->metadata) {
s->metadata = si->id3v2_meta;
si->id3v2_meta = NULL;
} else if (si->id3v2_meta) {
av_log(s, AV_LOG_WARNING, "Discarding ID3 tags because more suitable tags were found.\n");
av_dict_free(&si->id3v2_meta);
} if (id3v2_extra_meta) {
if (!strcmp(s->iformat->name, "mp3") || !strcmp(s->iformat->name, "aac") ||
!strcmp(s->iformat->name, "tta") || !strcmp(s->iformat->name, "wav")) {
if ((ret = ff_id3v2_parse_apic(s, id3v2_extra_meta)) < 0)
goto close;
if ((ret = ff_id3v2_parse_chapters(s, id3v2_extra_meta)) < 0)
goto close;
if ((ret = ff_id3v2_parse_priv(s, id3v2_extra_meta)) < 0)
goto close;
} else
av_log(s, AV_LOG_DEBUG, "demuxer does not support additional id3 data, skipping\n");
ff_id3v2_free_extra_meta(&id3v2_extra_meta);
}

// 封面图片处理
if ((ret = avformat_queue_attached_pictures(s)) < 0)
goto close;

// 如果AVIOContext不为NULL,并且FFFormatContext记录的AVFormatContext数据读取偏移量为0,则记录下当前AVIOContext写入数据偏移量到FFFormatContext中
if (s->pb && !si->data_offset)
si->data_offset = avio_tell(s->pb); si->raw_packet_buffer_size = 0; update_stream_avctx(s); if (options) {
av_dict_free(options);
*options = tmp;
}
*ps = s;
return 0; close:
if (s->iformat->read_close)
s->iformat->read_close(s);
fail:
ff_id3v2_free_extra_meta(&id3v2_extra_meta);
av_dict_free(&tmp);
if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
avio_closep(&s->pb);
avformat_free_context(s);
*ps = NULL;
return ret;
}

1.1、init_input

该方法实现位于libavformat/demux.c:150,在avformat_open_input中被调用用于找到合适的IO组件以及合适的demux组件。一般来说不指定AVIOContext以及AVInputFormat,需要自己探测拿到最合适的。这里面有2个重要的方法:

1.1.1、io_open,选择并打开IO组件

1.1.2、av_probe_input_buffer2,探测合适的demux组件

static int init_input(AVFormatContext *s, const char *filename,
AVDictionary **options)
{
int ret;
// 创建一个AVProbeData
AVProbeData pd = { filename, NULL, 0 };
int score = AVPROBE_SCORE_RETRY;

// 默认不使用自己的IO
if (s->pb) {
     // 给AVFormatContext添加flag,表示IO由用户指定
s->flags |= AVFMT_FLAG_CUSTOM_IO;
// 如果AVInputFormat为NULL则调用方法去探测
if (!s->iformat)
return av_probe_input_buffer2(s->pb, &s->iformat, filename,
s, 0, s->format_probesize);
else if (s->iformat->flags & AVFMT_NOFILE)
av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
"will be ignored with AVFMT_NOFILE format.\n");
return 0;
}

// 如果AVInputFormat不为空并且标志位NOFILE,或者为空并且找到了合适的demux组件,那么这里直接返回score
if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
(!s->iformat && (s->iformat = av_probe_input_format2(&pd, 0, &score))))
return score;

// 1.1.1、否则先调用io_open查找IO组件,io_open的实现在libavformat/options.c中,pb的类型为AVIOContext
if ((ret = s->io_open(s, &s->pb, filename, AVIO_FLAG_READ | s->avio_flags, options)) < 0)
return ret; if (s->iformat)
return 0;
   // 1.1.2、调用av_probe_input_buffer2去探测IO类型
return av_probe_input_buffer2(s->pb, &s->iformat, filename,
s, 0, s->format_probesize);
}

1.1.1、io_open

该方法的实现位于libavformat/options.c,方法实现为io_open_default,用于创建并初始化AVFormatContext中的AVIOContext,创建方法很简单调用ffio_open_whitlist方法。

static int io_open_default(AVFormatContext *s, AVIOContext **pb,
const char *url, int flags, AVDictionary **options)
{
int loglevel; if (!strcmp(url, s->url) ||
s->iformat && !strcmp(s->iformat->name, "image2") ||
s->oformat && !strcmp(s->oformat->name, "image2")
) {
loglevel = AV_LOG_DEBUG;
} else
loglevel = AV_LOG_INFO; av_log(s, loglevel, "Opening \'%s\' for %s\n", url, flags & AVIO_FLAG_WRITE ? "writing" : "reading");
// 这里的url就是前面的filename
return ffio_open_whitelist(pb, url, flags, &s->interrupt_callback, options, s->protocol_whitelist, s->protocol_blacklist);
}

ffio_open_whitelist声明在libavformat/avio_internal.h:225,实现位于libavformat/aviobuf.c:1225,这个方法做了两件事:

1.1.1.1、ffurl_open_whitelist,该方法用于创建一个URLContext对象,URLContext中封装有最终的IO实现

1.1.1.2、ffio_fdopen,使用URLContext来创建一个IO上下文AVIOContext

int ffio_open_whitelist(AVIOContext **s, const char *filename, int flags,
const AVIOInterruptCB *int_cb, AVDictionary **options,
const char *whitelist, const char *blacklist
)
{
URLContext *h;
int err; *s = NULL;
  // 1.1.1.1、调用该方法返回一个URLContext
err = ffurl_open_whitelist(&h, filename, flags, int_cb, options, whitelist, blacklist, NULL);
if (err < 0)
return err;
// 1.1.1.2、用URLContext来创建并初始化一个AVIOContext
err = ffio_fdopen(s, h);
if (err < 0) {
ffurl_close(h);
return err;
}
return 0;
}

1.1.1.1、ffurl_open_whitelist

方法实现位于libavformat/avio.c:306,用于创建一个URLContext对象,总共分别两步:

1.1.1.1.1、ffurl_alloc,创建并初始化一个URLContext,并且要选择好合适的IO protocol

1.1.1.1.2、ffurl_connect,打开url

int ffurl_open_whitelist(URLContext **puc, const char *filename, int flags,
const AVIOInterruptCB *int_cb, AVDictionary **options,
const char *whitelist, const char* blacklist,
URLContext *parent)
{
AVDictionary *tmp_opts = NULL;
AVDictionaryEntry *e;
// 创建并且初始化一个URLContext对象
int ret = ffurl_alloc(puc, filename, flags, int_cb);
if (ret < 0)
return ret;
if (parent) {
ret = av_opt_copy(*puc, parent);
if (ret < 0)
goto fail;
}
if (options &&
(ret = av_opt_set_dict(*puc, options)) < 0)
goto fail;
if (options && (*puc)->prot->priv_data_class &&
(ret = av_opt_set_dict((*puc)->priv_data, options)) < 0)
goto fail; if (!options)
options = &tmp_opts;    // 删减部分代码

// 调用ffurl_connect,打开url
ret = ffurl_connect(*puc, options); if (!ret)
return 0;
fail:
ffurl_closep(puc);
return ret;
}
1.1.1.1.1、ffurl_alloc

方法实现在libavformat/avio.c:293,该方法会先调用url_find_protocol,根据文件名来查找合适的URLProtocol,这部分部分就不单独拆分为小节了;查找到合适的URLProtocol之后就调用url_alloc_for_protocol,来为URLProtocol创建上下文URLContext

int ffurl_alloc(URLContext **puc, const char *filename, int flags,
const AVIOInterruptCB *int_cb)
{
const URLProtocol *p = NULL;
// 找到合适的protocol
p = url_find_protocol(filename);
// 利用找到的protocol来创建一个URLContext
if (p)
return url_alloc_for_protocol(puc, p, filename, flags, int_cb); *puc = NULL;
return AVERROR_PROTOCOL_NOT_FOUND;
}

url_find_protocol实现位于libavformat/avio.c:251,主要是根据文件名的前缀来判断使用什么URLProtocol,所有的protocol可以通过ffurl_get_protocols来获取

static const struct URLProtocol *url_find_protocol(const char *filename)
{
const URLProtocol **protocols;
char proto_str[128], proto_nested[128], *ptr;
// 这个方法比较有意思,是用于查找filename中第一个没有出现在URL_SCHEME_CHARS中的字符的下表,起始就是查找":"
size_t proto_len = strspn(filename, URL_SCHEME_CHARS);
int i;

// 如果filename中没有":",并且以subfile开头或者没有":",或者是路径,protocol类型为file
if (filename[proto_len] != ':' &&
(strncmp(filename, "subfile,", 8) || !strchr(filename + proto_len + 1, ':')) ||
is_dos_path(filename))
strcpy(proto_str, "file");
else
av_strlcpy(proto_str, filename,
FFMIN(proto_len + 1, sizeof(proto_str))); av_strlcpy(proto_nested, proto_str, sizeof(proto_nested));
if ((ptr = strchr(proto_nested, '+')))
*ptr = '\0';

// 获取所有的protocal
protocols = ffurl_get_protocols(NULL, NULL);
if (!protocols)
return NULL;
// 遍历protocals列表,对比protocol name
for (i = 0; protocols[i]; i++) {
const URLProtocol *up = protocols[i];
if (!strcmp(proto_str, up->name)) {
av_freep(&protocols);
return up;
}
if (up->flags & URL_PROTOCOL_FLAG_NESTED_SCHEME &&
!strcmp(proto_nested, up->name)) {
av_freep(&protocols);
return up;
}
}
// 释放protocols指针
av_freep(&protocols);
if (av_strstart(filename, "https:", NULL) || av_strstart(filename, "tls:", NULL))
av_log(NULL, AV_LOG_WARNING, "https protocol not found, recompile FFmpeg with "
"openssl, gnutls or securetransport enabled.\n"); return NULL;
}

ffurl_get_protocols实现位于libavformat/protocols.c,所有的URLProtocol的地址都组织在一个指针数组url_protocol当中,

const URLProtocol **ffurl_get_protocols(const char *whitelist,
const char *blacklist)
{
const URLProtocol **ret;
int i, ret_idx = 0;
   // 开辟一块内存空间
ret = av_calloc(FF_ARRAY_ELEMS(url_protocols), sizeof(*ret));
if (!ret)
return NULL; for (i = 0; url_protocols[i]; i++) {
const URLProtocol *up = url_protocols[i]; if (whitelist && *whitelist && !av_match_name(up->name, whitelist))
continue;
if (blacklist && *blacklist && av_match_name(up->name, blacklist))
continue;
     // 将url_protocals中的地址拷贝到返回值中,它这里做拷贝主要是为了使用白名单和黑名单的筛选功能
ret[ret_idx++] = up;
} return ret;
}

url_protocols定义在libavformat/protocol_list.c,是一个二级指针,保存着各个protocol的地址

static const URLProtocol * const url_protocols[] = {
&ff_async_protocol,
&ff_cache_protocol,
&ff_concat_protocol,
&ff_concatf_protocol,
&ff_crypto_protocol,
&ff_data_protocol,
&ff_ffrtmphttp_protocol,
&ff_file_protocol,
&ff_ftp_protocol,
&ff_gopher_protocol,
&ff_hls_protocol,
&ff_http_protocol,
&ff_httpproxy_protocol,
&ff_icecast_protocol,
&ff_mmsh_protocol,
&ff_mmst_protocol,
&ff_md5_protocol,
&ff_pipe_protocol,
&ff_prompeg_protocol,
&ff_rtmp_protocol,
&ff_rtmpt_protocol,
&ff_rtp_protocol,
&ff_srtp_protocol,
&ff_subfile_protocol,
&ff_tee_protocol,
&ff_tcp_protocol,
&ff_udp_protocol,
&ff_udplite_protocol,
&ff_unix_protocol,
NULL };

下面以 ff_hls_protocol 和 ff_file_protocol举个例子

URLProtocol这个结构体中有很多函数指针,用于实现多态,在这里不同的protocol会为结构体中的函数指针赋不同的值,URLProtocol是真正的文件读写或者网络读写的实现

ff_hls_protocol 定义在libavformat/hlsproto.c:311

const URLProtocol ff_hls_protocol = {
.name = "hls",
.url_open = hls_open,
.url_read = hls_read,
.url_close = hls_close,
.flags = URL_PROTOCOL_FLAG_NESTED_SCHEME,
.priv_data_size = sizeof(HLSContext),
};

ff_file_protocol定义在libavformat/file.c:357

const URLProtocol ff_file_protocol = {
.name = "file",
.url_open = file_open,
.url_read = file_read,
.url_write = file_write,
.url_seek = file_seek,
.url_close = file_close,
.url_get_file_handle = file_get_handle,
.url_check = file_check,
.url_delete = file_delete,
.url_move = file_move,
.priv_data_size = sizeof(FileContext),
.priv_data_class = &file_class,
.url_open_dir = file_open_dir,
.url_read_dir = file_read_dir,
.url_close_dir = file_close_dir,
.default_whitelist = "file,crypto,data"
};

再回到ffurl_alloc中,找到合适的protocol之后,调用url_alloc_for_protocol来创建URLProtocol的上下文URLContext

url_alloc_for_protocol实现位于libavformat/avio.c:74

static int url_alloc_for_protocol(URLContext **puc, const URLProtocol *up,
const char *filename, int flags,
const AVIOInterruptCB *int_cb)
{
URLContext *uc;
int err; #if CONFIG_NETWORK
if (up->flags & URL_PROTOCOL_FLAG_NETWORK && !ff_network_init())
return AVERROR(EIO);
#endif
if ((flags & AVIO_FLAG_READ) && !up->url_read) {
av_log(NULL, AV_LOG_ERROR,
"Impossible to open the '%s' protocol for reading\n", up->name);
return AVERROR(EIO);
}
if ((flags & AVIO_FLAG_WRITE) && !up->url_write) {
av_log(NULL, AV_LOG_ERROR,
"Impossible to open the '%s' protocol for writing\n", up->name);
return AVERROR(EIO);
}
// 为一个URLContext对象开辟空间,空间的大小是URLContext结构体的大小 + 文件名的字符串长度
//(因为URLCOntext中的filename是一个char指针)
uc = av_mallocz(sizeof(URLContext) + strlen(filename) + 1);
if (!uc) {
err = AVERROR(ENOMEM);
goto fail;
}
// 初始化URLContext
uc->av_class = &ffurl_context_class;
uc->filename = (char *)&uc[1]; // 字符串的地址在整个结构体的末端,利用该地址为filename地址赋值
strcpy(uc->filename, filename);
uc->prot = up;     // 将URLProtocol对象地址保存到prot指针
uc->flags = flags;
uc->is_streamed = 0; /* default = not streamed */
uc->max_packet_size = 0; /* default: stream file */
if (up->priv_data_size) {     // 需要根据实际的URLProtocol来判断是否有priv_data_size
// 这里的priv_data指的是不同protocol特有的数据,比如本地文件打开拿到的fd
uc->priv_data = av_mallocz(up->priv_data_size); // 给priv_data开辟空间,这个空间大小需要到对应的Protocol中查找
if (!uc->priv_data) {
err = AVERROR(ENOMEM);
goto fail;
}
if (up->priv_data_class) {
char *start;
*(const AVClass **)uc->priv_data = up->priv_data_class;
av_opt_set_defaults(uc->priv_data);
if (av_strstart(uc->filename, up->name, (const char**)&start) && *start == ',') {
int ret= 0;
char *p= start;
char sep= *++p;
char *key, *val;
p++; if (strcmp(up->name, "subfile"))
ret = AVERROR(EINVAL); while(ret >= 0 && (key= strchr(p, sep)) && p<key && (val = strchr(key+1, sep))){
*val= *key= 0;
if (strcmp(p, "start") && strcmp(p, "end")) {
ret = AVERROR_OPTION_NOT_FOUND;
} else
ret= av_opt_set(uc->priv_data, p, key+1, 0);
if (ret == AVERROR_OPTION_NOT_FOUND)
av_log(uc, AV_LOG_ERROR, "Key '%s' not found.\n", p);
*val= *key= sep;
p= val+1;
}
if(ret<0 || p!=key){
av_log(uc, AV_LOG_ERROR, "Error parsing options string %s\n", start);
av_freep(&uc->priv_data);
av_freep(&uc);
err = AVERROR(EINVAL);
goto fail;
}
memmove(start, key+1, strlen(key));
}
}
}
if (int_cb)
uc->interrupt_callback = *int_cb;
// 将创建的URLContext成员的地址返回
*puc = uc;
return 0;
fail:
*puc = NULL;
if (uc)
av_freep(&uc->priv_data);
av_freep(&uc);
#if CONFIG_NETWORK
if (up->flags & URL_PROTOCOL_FLAG_NETWORK)
ff_network_close();
#endif
return err;
}
1.1.1.1.2、ffurl_connect

再回到ffurl_open_whitelist中来,调用ffurl_alloc获取到URLContext以及URLProtocol之后会调用ffurl_connect来打开url

ffurl_connect实现位于libavformat/avio.c:166

打开url的方法很简单,调用URLContext中的URLProtocol成员的url_open方法就好了,如果打开成功那么就将URLContext中的is_connect置为1

int ffurl_connect(URLContext *uc, AVDictionary **options)
{
int err;
AVDictionary *tmp_opts = NULL;
AVDictionaryEntry *e; if (!options)
options = &tmp_opts;   // 删减部分代码
  
  // 调用URLContext的prot url_open方法,如果有url_open2就调用,没有就调用url_open,该方法定义在URLProtocol中,不同的protocol会有不同的实现
err =
uc->prot->url_open2 ? uc->prot->url_open2(uc,
uc->filename,
uc->flags,
options) :
uc->prot->url_open(uc, uc->filename, uc->flags); av_dict_set(options, "protocol_whitelist", NULL, 0);
av_dict_set(options, "protocol_blacklist", NULL, 0); if (err)
return err;
// 将URLContext中的连接flag置1
uc->is_connected = 1;
/* We must be careful here as ffurl_seek() could be slow,
* for example for http */
if ((uc->flags & AVIO_FLAG_WRITE) || !strcmp(uc->prot->name, "file"))
if (!uc->is_streamed && ffurl_seek(uc, 0, SEEK_SET) < 0)
uc->is_streamed = 1;
return 0;
}

下面以ff_file_protocol的file_open为例子,这是用来打开本地文件的protocol

方法实现位于libavformat/file.c:208

static int file_open(URLContext *h, const char *filename, int flags)
{
// URLContext中有一个成员priv_data,用于存储不同protocol的独有数据,ff_file_protocol的priv_data为FileContext
FileContext *c = h->priv_data;
int access;
int fd;
struct stat st; av_strstart(filename, "file:", &filename); if (flags & AVIO_FLAG_WRITE && flags & AVIO_FLAG_READ) {
access = O_CREAT | O_RDWR;
if (c->trunc)
access |= O_TRUNC;
} else if (flags & AVIO_FLAG_WRITE) {
access = O_CREAT | O_WRONLY;
if (c->trunc)
access |= O_TRUNC;
} else {
access = O_RDONLY;
}
#ifdef O_BINARY
access |= O_BINARY;
#endif
// 调用系统函数open来打开文件,获取文件描述符
fd = avpriv_open(filename, access, 0666);
if (fd == -1)
return AVERROR(errno);
// 将文件描述符保存到FileContext中
c->fd = fd;

// 初始化URLContext的is_stream状态,获取fd的状态,并判断其是否为FIFO文件,判断该文件是否为streaming
h->is_streamed = !fstat(fd, &st) && S_ISFIFO(st.st_mode); /* Buffer writes more than the default 32k to improve throughput especially
* with networked file systems */
if (!h->is_streamed && flags & AVIO_FLAG_WRITE)
h->min_packet_size = h->max_packet_size = 262144; if (c->seekable >= 0)
h->is_streamed = !c->seekable; return 0;
}

1.1.1.2、ffio_fdopen

该方法实现位于libavformat/aviobuf.c:956

做完ffurl_open_whitelist(创建URLContext,选择了合适的URLProtocol并且url_open)之后,调用ffio_fdopen方法来为URLContext方法创建更高级的上下文AVIOContext,AVIOContext的read_pause和read_seek等于URLProtocol中的方法

int ffio_fdopen(AVIOContext **s, URLContext *h)
{
uint8_t *buffer = NULL;
int buffer_size, max_packet_size; max_packet_size = h->max_packet_size;
if (max_packet_size) {
buffer_size = max_packet_size; /* no need to bufferize more than one packet */
} else {
buffer_size = IO_BUFFER_SIZE;
}
if (!(h->flags & AVIO_FLAG_WRITE) && h->is_streamed) {
if (buffer_size > INT_MAX/2)
return AVERROR(EINVAL);
buffer_size *= 2;
}
// 为一块buffer开辟空间
buffer = av_malloc(buffer_size);
if (!buffer)
return AVERROR(ENOMEM);
// 调用avio_alloc_context方法创建FFIOContext对象
*s = avio_alloc_context(buffer, buffer_size, h->flags & AVIO_FLAG_WRITE, h,
(int (*)(void *, uint8_t *, int)) ffurl_read,
(int (*)(void *, uint8_t *, int)) ffurl_write,
(int64_t (*)(void *, int64_t, int))ffurl_seek);
if (!*s) {
av_freep(&buffer);
return AVERROR(ENOMEM);
} // ......
  // 初始化AVIOContext中的成员 
(*s)->direct = h->flags & AVIO_FLAG_DIRECT; (*s)->seekable = h->is_streamed ? 0 : AVIO_SEEKABLE_NORMAL;
(*s)->max_packet_size = max_packet_size;
(*s)->min_packet_size = h->min_packet_size;
if(h->prot) {
// 给AVIOContext中的 read_pause、read_seek、url_read_seek赋值
(*s)->read_pause = (int (*)(void *, int))h->prot->url_read_pause;
(*s)->read_seek =
(int64_t (*)(void *, int, int64_t, int))h->prot->url_read_seek; if (h->prot->url_read_seek)
(*s)->seekable |= AVIO_SEEKABLE_TIME;
}
// 给FFIOContext中的成员方法赋值
((FFIOContext*)(*s))->short_seek_get = (int (*)(void *))ffurl_get_short_seek;
(*s)->av_class = &ff_avio_class;
return 0;
}

avio_aloc_context用于创建并初始化一个FFIOContext对象,里面会包含有一个AVIOContext对象

libavformat/aviobuf.c:135:

AVIOContext *avio_alloc_context(
unsigned char *buffer,
int buffer_size,
int write_flag,
void *opaque,
int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),
int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),
int64_t (*seek)(void *opaque, int64_t offset, int whence))
{
FFIOContext *s = av_malloc(sizeof(*s));
if (!s)
return NULL;
ffio_init_context(s, buffer, buffer_size, write_flag, opaque,
read_packet, write_packet, seek);
return &s->pub;
}

ffio_init_context实现位于libavformat/aviobuf.c:81,用于初始化AVIOContext,该结构体中持有的buffer用于存储读取到的数据

其中write_packet、read_packet、seek方法对应libavformat/avio.c:401中的ffurl_read、ffurl_write、ffurl_seek,其实就是调用的URLProtocol中的方法。上面可以看到AVIOContext中还有read_pause、read_seek这两个方法,也是指向了URLProtocol中的方法,可能这两个方法是用来做读写控制的,实现pause、seek等命令的

void ffio_init_context(FFIOContext *ctx,
unsigned char *buffer,
int buffer_size,
int write_flag,
void *opaque,
int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),
int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),
int64_t (*seek)(void *opaque, int64_t offset, int whence))
{
AVIOContext *const s = &ctx->pub; memset(ctx, 0, sizeof(*ctx));
// 用于存储读取的数据
s->buffer = buffer;
// buffer的最大大小
ctx->orig_buffer_size =
s->buffer_size = buffer_size;
// buffer的读写指针
s->buf_ptr = buffer;
// 可读写最大位置
s->buf_ptr_max = buffer;
// URLProtocol
s->opaque = opaque;
s->direct = 0; url_resetbuf(s, write_flag ? AVIO_FLAG_WRITE : AVIO_FLAG_READ);
// 初始化write、read、seek方法,这里是直接给定的ffurl_read、ffurl_write、ffurl_seek
s->write_packet = write_packet;
s->read_packet = read_packet;
s->seek = seek;
// 初始化AVIOContext中的其他成员
s->pos = 0;
s->eof_reached = 0;
s->error = 0;
s->seekable = seek ? AVIO_SEEKABLE_NORMAL : 0;
s->min_packet_size = 0;
s->max_packet_size = 0;
s->update_checksum = NULL;
ctx->short_seek_threshold = SHORT_SEEK_THRESHOLD; if (!read_packet && !write_flag) {
s->pos = buffer_size;
s->buf_end = s->buffer + buffer_size;
}
s->read_pause = NULL;
s->read_seek = NULL; s->write_data_type = NULL;
s->ignore_boundary_point = 0;
ctx->current_type = AVIO_DATA_MARKER_UNKNOWN;
ctx->last_time = AV_NOPTS_VALUE;
ctx->short_seek_get = NULL;
#if FF_API_AVIOCONTEXT_WRITTEN
FF_DISABLE_DEPRECATION_WARNINGS
s->written = 0;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
}

1.1.2、av_probe_input_buffer2

我们再回到init_input中,调用io_open获取到FFIOContext / AVIOContext之后,会再调用av_probe_input_buffer2来做探测

实现位于libavformat/format.c:225,这里开始就是来探测demux的组件了,也就是AVInputFomat。

这个方法主要内容分为两步:

1.1.2.1、调用avio_read读取数据,为什么要看这个方法呢?是因为读取的数据后续可能还会再用,所以看下数据是如何存储的

1.1.2.2、调用av_probe_input_format2来探测input format,直到有一个合适的demux组件返回,就会停止读取了

1.1.2.3、调用ffio_rewind_with_probe_data复用已经读取的数据

int av_probe_input_buffer2(AVIOContext *pb, const AVInputFormat **fmt,
const char *filename, void *logctx,
unsigned int offset, unsigned int max_probe_size)
{
AVProbeData pd = { filename ? filename : "" };
uint8_t *buf = NULL;
int ret = 0, probe_size, buf_offset = 0;
int score = 0;
int ret2;

// 检查是否设定最大探测长度,如果没有定义则使用PROBE_BUF_MAX (1 << 20)
if (!max_probe_size)
max_probe_size = PROBE_BUF_MAX;
else if (max_probe_size < PROBE_BUF_MIN) { // 探测值不能小于 2048
av_log(logctx, AV_LOG_ERROR,
"Specified probe size value %u cannot be < %u\n", max_probe_size, PROBE_BUF_MIN);
return AVERROR(EINVAL);
}

// 检查探测的初始便宜是否合法
if (offset >= max_probe_size)
return AVERROR(EINVAL); if (pb->av_class) {
uint8_t *mime_type_opt = NULL;
char *semi;
// 从AVIOContext中搜索mime_type
av_opt_get(pb, "mime_type", AV_OPT_SEARCH_CHILDREN, &mime_type_opt);
pd.mime_type = (const char *)mime_type_opt;
// 选项信息中存在多少个,以分号分隔,截取第一个;如果没有mime这置为null
semi = pd.mime_type ? strchr(pd.mime_type, ';') : NULL;
if (semi) {
*semi = '\0';
}
}

// probe_size至少读2048,直到到达上限或者AVInputFormat被找到
for (probe_size = PROBE_BUF_MIN; probe_size <= max_probe_size && !*fmt;
// probe_size x 2
     probe_size = FFMIN(probe_size << 1,
FFMAX(max_probe_size, probe_size + 1))) {
// 设置score为25
score = probe_size < max_probe_size ? AVPROBE_SCORE_RETRY : 0; /* Read probe data. */
     // 重新分配一块 probe_size + padding_size大小的buf,缓冲区中仍然保存有上次读取的数据
if ((ret = av_reallocp(&buf, probe_size + AVPROBE_PADDING_SIZE)) < 0)
goto fail;
// 1.1.2.1、调用avio_read来读取buffer,存储到从buf_offset开始,大小为probe_size - buf_offset的数据
if ((ret = avio_read(pb, buf + buf_offset,
probe_size - buf_offset)) < 0) {
/* Fail if error was not end of file, otherwise, lower score. */
if (ret != AVERROR_EOF)
goto fail; score = 0;
ret = 0; /* error was end of file, nothing read */
}
// 记录buf中数据的长度
buf_offset += ret;
// 如果数据没有到达offset的位置
if (buf_offset < offset)
continue;
// 将读到的数据封装到AVProbeData中
pd.buf_size = buf_offset - offset;
pd.buf = &buf[offset];
// 多余的padding_size设置为0
memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE); /* Guess file format. */
// 1.1.2.2、调用该方法来创建一个最为合适的 AVInputFormat
*fmt = av_probe_input_format2(&pd, 1, &score);
if (*fmt) {
/* This can only be true in the last iteration. */
if (score <= AVPROBE_SCORE_RETRY) {
av_log(logctx, AV_LOG_WARNING,
"Format %s detected only with low score of %d, "
"misdetection possible!\n", (*fmt)->name, score);
} else
av_log(logctx, AV_LOG_DEBUG,
"Format %s probed with size=%d and score=%d\n",
(*fmt)->name, probe_size, score);
#if 0
FILE *f = fopen("probestat.tmp", "ab");
fprintf(f, "probe_size:%d format:%s score:%d filename:%s\n", probe_size, (*fmt)->name, score, filename);
fclose(f);
#endif
}
} if (!*fmt)
ret = AVERROR_INVALIDDATA; fail:
/* Rewind. Reuse probe buffer to avoid seeking. */
// 如果找到了合适的demux组件,那么让AVIOContext内部的缓冲区归位,保留文件探测过程中读取到的数据
ret2 = ffio_rewind_with_probe_data(pb, &buf, buf_offset);
if (ret >= 0)
ret = ret2; av_freep(&pd.mime_type);
return ret < 0 ? ret : score;
}

1.1.2.1、avio_read

avio_read实现位于libavformat/aviobuf.c:641

这个方法是对AVIOContext的read_packet的又一层封装,将数据读到AVIOContext中,然后从中拷贝到外部buf中。如果buf大小大于了AVIOContext的最大大小,就直接读到外部buffer中

int avio_read(AVIOContext *s, unsigned char *buf, int size)
{
int len, size1; size1 = size;
while (size > 0) {
// 检查AVIOContext中的buf的大小
len = FFMIN(s->buf_end - s->buf_ptr, size);
// 如果AVIOContext没有空间或者要读取的size为0(其实一般不会出现size为0,因为这时候直接退出循环了
     // 也就是读满AVIOContext之后就不再往里面读了,直接读到buffer中)
if (len == 0 || s->write_flag) {
// 如果AVIOContext的direct flag为 1 或者要读取的大小大于AVIOContext的最大buf大小
if((s->direct || size > s->buffer_size) && !s->update_checksum && s->read_packet) {
// bypass the buffer and read data directly into buf
// 调用AVIOContext的read_packet方法直接将读取数据到buf当中
len = read_packet_wrapper(s, buf, size);
if (len == AVERROR_EOF) {
/* do not modify buffer if EOF reached so that a seek back can
be done without rereading data */
s->eof_reached = 1;
break;
} else if (len < 0) {
s->eof_reached = 1;
s->error= len;
break;
} else {
s->pos += len;
ffiocontext(s)->bytes_read += len;
s->bytes_read = ffiocontext(s)->bytes_read;
size -= len;
buf += len;
// reset the buffer
s->buf_ptr = s->buffer;
s->buf_end = s->buffer/* + len*/;
}
} else {
fill_buffer(s);
len = s->buf_end - s->buf_ptr;
if (len == 0)
break;
}
} else {
// 从AVIOContext中拷贝buffer出来,并且移动读写指针
memcpy(buf, s->buf_ptr, len);
buf += len;
s->buf_ptr += len;
size -= len;
}
}
if (size1 == size) {
if (s->error) return s->error;
if (avio_feof(s)) return AVERROR_EOF;
}
return size1 - size;
}

libavformat/aviobuf.c:527

read_packet_wrapper方法会调用AVIOContext的read_packet方法

static int read_packet_wrapper(AVIOContext *s, uint8_t *buf, int size)
{
int ret; if (!s->read_packet)
return AVERROR(EINVAL);
ret = s->read_packet(s->opaque, buf, size);
av_assert2(ret || s->max_packet_size);
return ret;
}

1.1.2.2、av_probe_input_format2

av_probe_input_format2实现位于libavformat/format.c:128

主要内容就是调用了av_probe_input_format3

const AVInputFormat *av_probe_input_format2(const AVProbeData *pd,
int is_opened, int *score_max)
{
int score_ret;
const AVInputFormat *fmt = av_probe_input_format3(pd, is_opened, &score_ret);
if (score_ret > *score_max) {
// 如果得分大于25则返回该demux组件
*score_max = score_ret;
return fmt;
} else
return NULL;
}

av_probe_input_format3 实现于 libavformat/format.c:207

const AVInputFormat *av_probe_input_format3(const AVProbeData *pd,
int is_opened, int *score_ret)
{
AVProbeData lpd = *pd;
const AVInputFormat *fmt1 = NULL;
const AVInputFormat *fmt = NULL;
int score, score_max = 0;
void *i = 0;
const static uint8_t zerobuffer[AVPROBE_PADDING_SIZE];
enum nodat {
NO_ID3,
ID3_ALMOST_GREATER_PROBE,
ID3_GREATER_PROBE,
ID3_GREATER_MAX_PROBE,
} nodat = NO_ID3; if (!lpd.buf)
lpd.buf = (unsigned char *) zerobuffer;

// 这是是用来判断是否有id3信息,分析前10个字节中是否存在id3
if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
int id3len = ff_id3v2_tag_len(lpd.buf);
if (lpd.buf_size > id3len + 16) {
if (lpd.buf_size < 2LL*id3len + 16)
nodat = ID3_ALMOST_GREATER_PROBE;
lpd.buf += id3len;
lpd.buf_size -= id3len;
} else if (id3len >= PROBE_BUF_MAX) {
nodat = ID3_GREATER_MAX_PROBE;
} else
nodat = ID3_GREATER_PROBE;
}

// 调用av_demuxer_iterate来遍历获取demuxer
while ((fmt1 = av_demuxer_iterate(&i))) {
if (fmt1->flags & AVFMT_EXPERIMENTAL)
continue;
if (!is_opened == !(fmt1->flags & AVFMT_NOFILE) && strcmp(fmt1->name, "image2"))
continue;
score = 0;
if (fmt1->read_probe) {
// 调用AVInputFormat的read_probe方法来探测并且获取分数
score = fmt1->read_probe(&lpd);
if (score)
av_log(NULL, AV_LOG_TRACE, "Probing %s score:%d size:%d\n", fmt1->name, score, lpd.buf_size);
// 综合考虑计算得分与扩展名得分
if (fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions)) {
switch (nodat) {
case NO_ID3:
score = FFMAX(score, 1); // 如果没有id3,取计算得分
break;
case ID3_GREATER_PROBE:
case ID3_ALMOST_GREATER_PROBE:
score = FFMAX(score, AVPROBE_SCORE_EXTENSION / 2 - 1);
break;
case ID3_GREATER_MAX_PROBE:
score = FFMAX(score, AVPROBE_SCORE_EXTENSION);
break;
}
}
} else if (fmt1->extensions) { // 如果没有read_probe,存在扩展名,根据扩展名来判断
if (av_match_ext(lpd.filename, fmt1->extensions))
score = AVPROBE_SCORE_EXTENSION;
}
// 再次根据mime类型来判断
if (av_match_name(lpd.mime_type, fmt1->mime_type)) {
if (AVPROBE_SCORE_MIME > score) {
av_log(NULL, AV_LOG_DEBUG, "Probing %s score:%d increased to %d due to MIME type\n", fmt1->name, score, AVPROBE_SCORE_MIME);
score = AVPROBE_SCORE_MIME;
}
}
// 记录下最高分数和对应的AVInputFormat
if (score > score_max) {
score_max = score;
fmt = fmt1;
} else if (score == score_max)
fmt = NULL;
}
if (nodat == ID3_GREATER_PROBE)
score_max = FFMIN(AVPROBE_SCORE_EXTENSION / 2 - 1, score_max);
*score_ret = score_max; return fmt;
}

av_demuxer_iterate的实现位于libavformat/allformats.c:564,它和前面遍历获取URLContext时很类似

const AVInputFormat *av_demuxer_iterate(void **opaque)
{
static const uintptr_t size = sizeof(demuxer_list)/sizeof(demuxer_list[0]) - 1;
uintptr_t i = (uintptr_t)*opaque;
const AVInputFormat *f = NULL;
uintptr_t tmp; if (i < size) {
// 从demuxer_list中获取指定项
f = demuxer_list[i];
} else if (tmp = atomic_load_explicit(&indev_list_intptr, memory_order_relaxed)) {
const AVInputFormat *const *indev_list = (const AVInputFormat *const *)tmp;
f = indev_list[i - size];
}

// 将索引加一
if (f)
*opaque = (void*)(i + 1);
return f;
}

demuxer_list声明于libavformat/demuxer_list.c中,由于实在太长,这里就不贴代码了,这是一个二级指针,保存了各个demuxer(AVInputFormat)的地址。下面以ff_mpegts_demuxer为例子看看代码

ff_mpegts_demuxer声明在libavformat/mpegts.c:3422中

const AVInputFormat ff_mpegts_demuxer = {
.name = "mpegts",
.long_name = NULL_IF_CONFIG_SMALL("MPEG-TS (MPEG-2 Transport Stream)"),
.priv_data_size = sizeof(MpegTSContext),
.read_probe = mpegts_probe,
.read_header = mpegts_read_header,
.read_packet = mpegts_read_packet,
.read_close = mpegts_read_close,
.read_timestamp = mpegts_get_dts,
.flags = AVFMT_SHOW_IDS | AVFMT_TS_DISCONT,
.priv_class = &mpegts_class,
};

read_probe方法就是用来探测的

1.1.2.3、调用ffio_rewind_with_probe_data

ffio_rewind_with_probe_data这个方法用于复用已经读取的用于探测demux组件的buffer,我的理解如下:因为读取buffer时可能超过了AVIOContext最大缓存值,也就是bufp中的大小会超过该最大缓存值,

int ffio_rewind_with_probe_data(AVIOContext *s, unsigned char **bufp, int buf_size)
{
int64_t buffer_start;
int buffer_size;
int overlap, new_size, alloc_size;
uint8_t *buf = *bufp; if (s->write_flag) {
av_freep(bufp);
return AVERROR(EINVAL);
}

// 获取buffer中数据的长度
buffer_size = s->buf_end - s->buffer; /* the buffers must touch or overlap */
// buffer_start表示bufp比AVIOContext多出的数据长度
   // 如果该多出的长度大于总长度说明有错误,始放掉buffer
if ((buffer_start = s->pos - buffer_size) > buf_size) {
av_freep(bufp);
return AVERROR(EINVAL);
}
// 计算出重合的长度
overlap = buf_size - buffer_start;
// 计算出新长度buffer_size + buffer_start,我觉得好像就和buf_size大小是相同的
new_size = buf_size + buffer_size - overlap;

// 如果new_size(buf_size)大于原来的AVIOContext size,那么alloc_size就赋大值
alloc_size = FFMAX(s->buffer_size, new_size);
// 我觉得这两个值是相等的,所以不需要重新分配空间
if (alloc_size > buf_size)
if (!(buf = (*bufp) = av_realloc_f(buf, 1, alloc_size)))
return AVERROR(ENOMEM);
// 这里应该也不用重新拷贝数据
if (new_size > buf_size) {
memcpy(buf + buf_size, s->buffer + overlap, buffer_size - overlap);
buf_size = new_size;
}

// 释放掉AVIOContext中原来的buffer
av_free(s->buffer);
// 将buf指针指向新的可能更大的buffer
s->buf_ptr = s->buffer = buf;
// 修改buffer_size
s->buffer_size = alloc_size;
// 修改读写位置
s->pos = buf_size;
// 修改数据结束位置
s->buf_end = s->buf_ptr + buf_size;
s->eof_reached = 0; return 0;
}

1.2、read_header

read_header其实是AVInputFormat中的方法,在所有的准备工作都做完了之后就调用该方法读取meta data,获取stream等信息。这个方法比较适合flv、mov这种有header的封装格式,对于mpeg2这种没有header的可能会无法提取出steam info。

下面以ff_mov_demuxer举个例子,代码实现路径 libavformat/mov.c:8566

代码很长,这里只看比较关键的节点,主要就是调用mov_read_default,读取box(atom)

static int mov_read_header(AVFormatContext *s)
{
MOVContext *mov = s->priv_data;
AVIOContext *pb = s->pb;
int j, err;
MOVAtom atom = { AV_RL32("root") };
int i; if (mov->decryption_key_len != 0 && mov->decryption_key_len != AES_CTR_KEY_SIZE) {
av_log(s, AV_LOG_ERROR, "Invalid decryption key len %d expected %d\n",
mov->decryption_key_len, AES_CTR_KEY_SIZE);
return AVERROR(EINVAL);
} mov->fc = s;
mov->trak_index = -1;
/* .mov and .mp4 aren't streamable anyway (only progressive download if moov is before mdat) */
if (pb->seekable & AVIO_SEEKABLE_NORMAL)
atom.size = avio_size(pb);
else
atom.size = INT64_MAX; /* check MOV header */
do {
if (mov->moov_retry)
avio_seek(pb, 0, SEEK_SET);
if ((err = mov_read_default(mov, pb, atom)) < 0) {
av_log(s, AV_LOG_ERROR, "error reading header\n");
return err;
}
} while ((pb->seekable & AVIO_SEEKABLE_NORMAL) && !mov->found_moov && !mov->moov_retry++);
if (!mov->found_moov) {
av_log(s, AV_LOG_ERROR, "moov atom not found\n");
return AVERROR_INVALIDDATA;
}
av_log(mov->fc, AV_LOG_TRACE, "on_parse_exit_offset=%"PRId64"\n", avio_tell(pb)); // ......
}

mov_read_default主要是解析出box type,然后找到对应的方法来解析box,比如trak box,那就会找到根据box type找到mov_default_parse_table中对应的处理方法mov_read_trak

static int mov_read_default(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
int64_t total_size = 0;
MOVAtom a;
int i; if (c->atom_depth > 10) {
av_log(c->fc, AV_LOG_ERROR, "Atoms too deeply nested\n");
return AVERROR_INVALIDDATA;
}
c->atom_depth ++; if (atom.size < 0)
atom.size = INT64_MAX;
while (total_size <= atom.size - 8 && !avio_feof(pb)) {
int (*parse)(MOVContext*, AVIOContext*, MOVAtom) = NULL;
a.size = atom.size;
a.type=0;
if (atom.size >= 8) {
a.size = avio_rb32(pb);
a.type = avio_rl32(pb);
if (((a.type == MKTAG('f','r','e','e') && c->moov_retry) ||
a.type == MKTAG('h','o','o','v')) &&
a.size >= 8 &&
c->fc->strict_std_compliance < FF_COMPLIANCE_STRICT) {
uint32_t type;
avio_skip(pb, 4);
type = avio_rl32(pb);
if (avio_feof(pb))
break;
avio_seek(pb, -8, SEEK_CUR);
if (type == MKTAG('m','v','h','d') ||
type == MKTAG('c','m','o','v')) {
av_log(c->fc, AV_LOG_ERROR, "Detected moov in a free or hoov atom.\n");
a.type = MKTAG('m','o','o','v');
}
}
if (atom.type != MKTAG('r','o','o','t') &&
atom.type != MKTAG('m','o','o','v')) {
if (a.type == MKTAG('t','r','a','k') ||
a.type == MKTAG('m','d','a','t')) {
av_log(c->fc, AV_LOG_ERROR, "Broken file, trak/mdat not at top-level\n");
avio_skip(pb, -8);
c->atom_depth --;
return 0;
}
}
total_size += 8;
if (a.size == 1 && total_size + 8 <= atom.size) { /* 64 bit extended size */
a.size = avio_rb64(pb) - 8;
total_size += 8;
}
}
av_log(c->fc, AV_LOG_TRACE, "type:'%s' parent:'%s' sz: %"PRId64" %"PRId64" %"PRId64"\n",
av_fourcc2str(a.type), av_fourcc2str(atom.type), a.size, total_size, atom.size);
if (a.size == 0) {
a.size = atom.size - total_size + 8;
}
if (a.size < 0)
break;
a.size -= 8;
if (a.size < 0)
break;
a.size = FFMIN(a.size, atom.size - total_size);
// 遍历查找box type对应的处理方法
for (i = 0; mov_default_parse_table[i].type; i++)
if (mov_default_parse_table[i].type == a.type) {
parse = mov_default_parse_table[i].parse;
break;
}
// ......
}

mov_read_trak

该方法会读取box中的track信息,并且创建一个AVStream,将这些信息存储到AVSTream当中

1.2.1、调用avformat_new_stream创建一个AVStream,传入参数为AVFormatContext

static int mov_read_trak(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
MOVStreamContext *sc;
int ret;
// 1.2.1、创建一个AVStream
st = avformat_new_stream(c->fc, NULL);
if (!st) return AVERROR(ENOMEM);
// 默认id为-1
st->id = -1;
// 创建一个MOVSTreamContext休息
sc = av_mallocz(sizeof(MOVStreamContext));
if (!sc) return AVERROR(ENOMEM);
// AVStream中的priv_data存放的每个InputFormat独有的Stream info组织形式
st->priv_data = sc;
// 默认置codecpar->codec_type为data
st->codecpar->codec_type = AVMEDIA_TYPE_DATA;
sc->ffindex = st->index;
c->trak_index = st->index;
// 继续读取track box的子box,这时候就把相关track meta读到AVStream当中去了
if ((ret = mov_read_default(c, pb, atom)) < 0)
return ret; c->trak_index = -1; // Here stsc refers to a chunk not described in stco. This is technically invalid,
// but we can overlook it (clearing stsc) whenever stts_count == 0 (indicating no samples).
if (!sc->chunk_count && !sc->stts_count && sc->stsc_count) {
sc->stsc_count = 0;
av_freep(&sc->stsc_data);
} /* sanity checks */
if ((sc->chunk_count && (!sc->stts_count || !sc->stsc_count ||
(!sc->sample_size && !sc->sample_count))) ||
(!sc->chunk_count && sc->sample_count)) {
av_log(c->fc, AV_LOG_ERROR, "stream %d, missing mandatory atoms, broken header\n",
st->index);
return 0;
}
if (sc->stsc_count && sc->stsc_data[ sc->stsc_count - 1 ].first > sc->chunk_count) {
av_log(c->fc, AV_LOG_ERROR, "stream %d, contradictionary STSC and STCO\n",
st->index);
return AVERROR_INVALIDDATA;
} fix_timescale(c, sc); avpriv_set_pts_info(st, 64, 1, sc->time_scale); mov_build_index(c, st); if (sc->dref_id-1 < sc->drefs_count && sc->drefs[sc->dref_id-1].path) {
MOVDref *dref = &sc->drefs[sc->dref_id - 1];
if (c->enable_drefs) {
if (mov_open_dref(c, &sc->pb, c->fc->url, dref) < 0)
av_log(c->fc, AV_LOG_ERROR,
"stream %d, error opening alias: path='%s', dir='%s', "
"filename='%s', volume='%s', nlvl_from=%d, nlvl_to=%d\n",
st->index, dref->path, dref->dir, dref->filename,
dref->volume, dref->nlvl_from, dref->nlvl_to);
} else {
av_log(c->fc, AV_LOG_WARNING,
"Skipped opening external track: "
"stream %d, alias: path='%s', dir='%s', "
"filename='%s', volume='%s', nlvl_from=%d, nlvl_to=%d."
"Set enable_drefs to allow this.\n",
st->index, dref->path, dref->dir, dref->filename,
dref->volume, dref->nlvl_from, dref->nlvl_to);
}
} else {
sc->pb = c->fc->pb;
sc->pb_is_copied = 1;
} if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
if (!st->sample_aspect_ratio.num && st->codecpar->width && st->codecpar->height &&
sc->height && sc->width &&
(st->codecpar->width != sc->width || st->codecpar->height != sc->height)) {
st->sample_aspect_ratio = av_d2q(((double)st->codecpar->height * sc->width) /
((double)st->codecpar->width * sc->height), INT_MAX);
} #if FF_API_R_FRAME_RATE
if (sc->stts_count == 1 || (sc->stts_count == 2 && sc->stts_data[1].count == 1))
av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den,
sc->time_scale, sc->stts_data[0].duration, INT_MAX);
#endif
} // done for ai5q, ai52, ai55, ai1q, ai12 and ai15.
if (!st->codecpar->extradata_size && st->codecpar->codec_id == AV_CODEC_ID_H264 &&
TAG_IS_AVCI(st->codecpar->codec_tag)) {
ret = ff_generate_avci_extradata(st);
if (ret < 0)
return ret;
} switch (st->codecpar->codec_id) {
#if CONFIG_H261_DECODER
case AV_CODEC_ID_H261:
#endif
#if CONFIG_H263_DECODER
case AV_CODEC_ID_H263:
#endif
#if CONFIG_MPEG4_DECODER
case AV_CODEC_ID_MPEG4:
#endif
st->codecpar->width = 0; /* let decoder init width/height */
st->codecpar->height= 0;
break;
} // If the duration of the mp3 packets is not constant, then they could need a parser
if (st->codecpar->codec_id == AV_CODEC_ID_MP3
&& sc->stts_count > 3
&& sc->stts_count*10 > st->nb_frames
&& sc->time_scale == st->codecpar->sample_rate) {
ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL;
}
/* Do not need those anymore. */
av_freep(&sc->chunk_offsets);
av_freep(&sc->sample_sizes);
av_freep(&sc->keyframes);
av_freep(&sc->stts_data);
av_freep(&sc->stps_data);
av_freep(&sc->elst_data);
av_freep(&sc->rap_group); return 0;
}

1.2.1、avformat_new_stream

下面看下关键的创建stream的方法avformat_new_stream

方法实现位于 libavformat/utils.c:768,主要有以下几个步骤:

1.2.1.1、给AVStream开辟空间

1.2.1.2、给AVStream创建一个上下文FFStream

1.2.1.3、调用avcodec_parameters_alloc创建AVStream中的codecpar

1.2.1.4、调用avcodec_alloc_context3给FFStream创建一个AVCodecContext,FFStream作为AVStream的上下文,起到一个管理作用,保存有Stream对应的信息,也保存有stream对应的Codec

1.2.1.5、给FFStream中的info成员分配内存并初始化

这里比较重要的是1.2.1.4

AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
{
FFFormatContext *const si = ffformatcontext(s);
FFStream *sti;
AVStream *st;
AVStream **streams;
// 检查stream的个数
if (s->nb_streams >= s->max_streams) {
av_log(s, AV_LOG_ERROR, "Number of streams exceeds max_streams parameter"
" (%d), see the documentation if you wish to increase it\n",
s->max_streams);
return NULL;
}
// 1.2.1.1、重新给AVFormatContext中的stream数组开辟空间
streams = av_realloc_array(s->streams, s->nb_streams + 1, sizeof(*streams));
if (!streams)
return NULL;
s->streams = streams;   // 1.2.1.2、创建一个AVStream的上下文 FFStream
sti = av_mallocz(sizeof(*sti));
if (!sti)
return NULL;
// 将FFStream和AVStrem相关联
st = &sti->pub; #if FF_API_AVSTREAM_CLASS
st->av_class = &stream_class;
#endif
// 1.2.1.3、调用avcodec_parameters_alloc来初始化codecpar
st->codecpar = avcodec_parameters_alloc();
if (!st->codecpar)
goto fail;
// 1.2.1.4、调用avcodec_alloc_context3来创建一个AVCodecContext对象
sti->avctx = avcodec_alloc_context3(NULL);
if (!sti->avctx)
goto fail; if (s->iformat) {
// 1.2.1.5、给info结构体分配内存,存储的是codec相关信息
sti->info = av_mallocz(sizeof(*sti->info));
if (!sti->info)
goto fail; #if FF_API_R_FRAME_RATE
sti->info->last_dts = AV_NOPTS_VALUE;
#endif
sti->info->fps_first_dts = AV_NOPTS_VALUE;
sti->info->fps_last_dts = AV_NOPTS_VALUE; /* default pts setting is MPEG-like */
avpriv_set_pts_info(st, 33, 1, 90000);
/* we set the current DTS to 0 so that formats without any timestamps
* but durations get some timestamps, formats with some unknown
* timestamps have their first few packets buffered and the
* timestamps corrected before they are returned to the user */
sti->cur_dts = RELATIVE_TS_BASE;
} else {
sti->cur_dts = AV_NOPTS_VALUE;
}
// AVStream的index对应于AVFormatContext中的nb_streams索引
st->index = s->nb_streams;
// 初始化AVStream和FFStream中的相关内容
st->start_time = AV_NOPTS_VALUE;
st->duration = AV_NOPTS_VALUE;
sti->first_dts = AV_NOPTS_VALUE;
sti->probe_packets = s->max_probe_packets;
sti->pts_wrap_reference = AV_NOPTS_VALUE;
sti->pts_wrap_behavior = AV_PTS_WRAP_IGNORE; sti->last_IP_pts = AV_NOPTS_VALUE;
sti->last_dts_for_order_check = AV_NOPTS_VALUE;
// 初始化pts_buffer
for (int i = 0; i < MAX_REORDER_DELAY + 1; i++)
sti->pts_buffer[i] = AV_NOPTS_VALUE; st->sample_aspect_ratio = (AVRational) { 0, 1 }; sti->inject_global_side_data = si->inject_global_side_data; sti->need_context_update = 1;
// 将创建的AVStream保存到AVFormatContext中
s->streams[s->nb_streams++] = st;
return st;
fail:
free_stream(&st);
return NULL;
}

1.2.1.4、avcodec_alloc_context3

该方法用于创建一个AVCodecContext对象,方法实现位于libavcodec/options.c:141

AVCodecContext *avcodec_alloc_context3(const AVCodec *codec)
{
AVCodecContext *avctx= av_malloc(sizeof(AVCodecContext)); if (!avctx)
return NULL; if (init_context_defaults(avctx, codec) < 0) {
av_free(avctx);
return NULL;
} return avctx;
}

FFmpeg中的关键方法及结构体(二)avformat_open_input的更多相关文章

  1. FFmpeg源代码简单分析:常见结构体的初始化和销毁(AVFormatContext,AVFrame等)

    ===================================================== FFmpeg的库函数源代码分析文章列表: [架构图] FFmpeg源代码结构图 - 解码 F ...

  2. [转载] FFmpeg源代码简单分析:常见结构体的初始化和销毁(AVFormatContext,AVFrame等)

    ===================================================== FFmpeg的库函数源代码分析文章列表: [架构图] FFmpeg源代码结构图 - 解码 F ...

  3. FFmpeg源码简单分析:结构体成员管理系统-AVOption

    ===================================================== FFmpeg的库函数源码分析文章列表: [架构图] FFmpeg源码结构图 - 解码 FFm ...

  4. 内核中用于数据接收的结构体struct msghdr(转)

    内核中用于数据接收的结构体struct msghdr(转) 我们从一个实际的数据包发送的例子入手,来看看其发送的具体流程,以及过程中涉及到的相关数据结构.在我们的虚拟机上发送icmp回显请求包,pin ...

  5. FFMPEG中最关键的结构体之间的关系

    FFMPEG中结构体很多.最关键的结构体可以分成以下几类: a)        解协议(http,rtsp,rtmp,mms) AVIOContext,URLProtocol,URLContext主要 ...

  6. Linux字符设备中的两个重要结构体(file、inode)

    对于Linux系统中,一般字符设备和驱动之间的函数调用关系如下图所示 上图描述了用户空间应用程序通过系统调用来调用程序的过程.一般而言在驱动程序的设计中,会关系 struct file 和 struc ...

  7. C#中的元组(Tuple)和结构体(struct)

    在正常的函数调用中,一个函数只能返回一个类型的值,但在某些特殊情况下,我们可能需要一个方法返回多个类型的值,除了通过ref,out或者泛型集合可以实现这种需求外,今天,讲一下元组和结构体在这一方面的应 ...

  8. ios开发中的C语言学习—— 结构体简介

    在开发过程中,经常会需要处理一组不同类型的数据,比如学生的个人信息,由姓名.年龄.性别.身高等组成,因为这些数据是由不同数据类型组成的,因此不能用数组表示,对于不同数据类型的一组数据,可以采用结构体来 ...

  9. c++中包含string成员的结构体拷贝导致的double free问题

    最近调试代码遇到一个的问题,提示double free,但是找了好久也没有找到释放两次的地方,后来调试发现,是由于使用了一个包含string成员的结构体,这个结构体使用memcpy拷贝导致的问题: 代 ...

  10. c动态分配结构体二维数组

    这个问题我纠结了蛮久了,因为前面一直忙(自己也懒了点),所以没有能好好研究这个.希望这篇文章能够帮助你们. #include <stdio.h> #include <stdlib.h ...

随机推荐

  1. css block,inline和inline-block概念和区别

    总体概念 block和inline这两个概念是简略的说法,完整确切的说应该是 block-level elements (块级元素) 和 inline elements (内联元素).block元素通 ...

  2. Pytorch-tensor的转置,运算

    1.矩阵的转置 方法:t() a=torch.randint(1,10,[2,3]) print(a,'\n') print(a.t()) 输出结果 tensor([[2, 8, 2], [9, 2, ...

  3. 高云GOWIN下载出现No devices found咋办

    在使用GOWIN下载器下载会出现该类问题 No devices found错误. 原因如下: A . Windows10 系统会出现下载器通道顺序错误,所以要确保在 A 通道上. B.有一些 USB ...

  4. Spring Boot+mybatis集成数据库访问

    1.整合druid数据源 1.1 单数据源配置 <dependency> <groupId>com.alibaba</groupId> <artifactId ...

  5. 使用EPLAN做项目的收获点

    收获点如下: 1.消息检查功能. 最具有代表性的EPLAN软件的辅助检查功能.通过自带检查可以发现设计的错误和警告. 2.导线表导出功能. 使用导入模版.宏设置,导出对应线号表. 此类接线通过一个导线 ...

  6. 谈谈C++新标准带来的属性(Attribute)

    简介: 从C++11开始,标准引入了一个新概念"属性(attribute)",本文将简单介绍一下目前在C++标准中已经添加的各个属性以及常用属性的具体应用. 作者 | 寒冬来源 | ...

  7. Flink + Iceberg + 对象存储,构建数据湖方案

    ​简介: 上海站 Flink Meetup 分享内容,如何基于Flink.对象存储.Iceberg 来构建数据湖生态. 本文整理自 Dell 科技集团高级软件研发经理孙伟在 4 月 17 日 上海站 ...

  8. [FAQ] 钉钉 Excel 回车键不能换行 ? 在线编辑如何换行

      Win 端表格换行:AIT+ENTER Mac 端表格换行:AIT OPTION+ENTER Tool:ChatAI Refer:钉钉技巧 Refer:https://www.dingtalk.c ...

  9. [FAQ] docker-compose MySQL8 ERROR: Different lower_case_table_names settings for server

    MySQL8 启动时 lower_case_table_names 的设置和初始值不一致时,会报 ERROR. 在 docker-compose 中,只需要在命令中加入命令选项即可,并配置一个新的 v ...

  10. [Go] 有了 cast 组件, golang 类型转换从此不再困扰

    在 golang 中,参数和返回值之间往往涉及 int.string.[].map 等之间的转换. 如果是手动去处理,一容易出错,二不能兼容多数类型,比较麻烦. 使用 cast,能够让代码更健壮.可维 ...