SDL播放声音
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
#include <libavutil/avstring.h>
#include <libavutil/pixfmt.h>
#include <libavutil/log.h>
}; #include <stdio.h>
#include <math.h>
#include <SDL.h>
#include <SDL_thread.h> #pragma comment(lib, "libmingwex.lib")
#pragma comment(lib, "libgcc.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "swresample.lib") #pragma comment(lib, "SDL.lib")
#pragma comment(lib, "SDLmain.lib") #define SDL_AUDIO_BUFFER_SIZE 1024
#define MAX_AUDIOQ_SIZE (1 * 1024 * 1024)
#define FF_ALLOC_EVENT (SDL_USEREVENT)
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
#define FF_QUIT_EVENT (SDL_USEREVENT + 2) typedef struct PacketQueue {//Queue
AVPacketList *first_pkt, *last_pkt;
int nb_packets;
int size;
SDL_mutex *mutex;
SDL_cond *cond;
} PacketQueue; typedef struct VideoState {//State
char filename[1024];
AVFormatContext *ic;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
int videoStream, audioStream;
AVStream *audio_st;
AVFrame *audio_frame;
PacketQueue audioq;
unsigned int audio_buf_size;
unsigned int audio_buf_index;
AVPacket audio_pkt;
uint8_t *audio_pkt_data;
int audio_pkt_size;
uint8_t *audio_buf;
uint8_t *audio_buf1;
DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
enum AVSampleFormat audio_src_fmt;
enum AVSampleFormat audio_tgt_fmt;
int audio_src_channels;
int audio_tgt_channels;
int64_t audio_src_channel_layout;
int64_t audio_tgt_channel_layout;
int audio_src_freq;
int audio_tgt_freq;
struct SwrContext *swr_ctx;
SDL_Thread *parse_tid;//thread id
int quit;//flag
} VideoState; VideoState *global_video_state;//global state void packet_queue_init(PacketQueue *q) {//init queue
memset(q, 0, sizeof(PacketQueue));
q->mutex = SDL_CreateMutex();
q->cond = SDL_CreateCond();
} int packet_queue_put(PacketQueue *q, AVPacket *pkt) {//put pkt to pktQueue
AVPacketList *pkt1; pkt1 = (AVPacketList *)av_malloc(sizeof(AVPacketList));
if (!pkt1) {
return -1;
}
pkt1->pkt = *pkt;//put pkt to PacketList
pkt1->next = NULL; SDL_LockMutex(q->mutex); if (!q->last_pkt) {//last_pkt = NULL, Queue element is PacketList,we can not use packet directly
q->first_pkt = pkt1;
} else {//not first time
q->last_pkt->next = pkt1;
} q->last_pkt = pkt1;//update
q->nb_packets++;//record nb_packet
q->size += pkt1->pkt.size;//update pkt size in queue
SDL_CondSignal(q->cond);
SDL_UnlockMutex(q->mutex);
return 0;
} static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) {//get data from queue
AVPacketList *pkt1;
int ret; SDL_LockMutex(q->mutex);//lock for(;;) {
if(global_video_state->quit) {//is or not quit firstly
ret = -1;
break;
}//quit,跳出循环 pkt1 = q->first_pkt;//queue: first in, first out
if (pkt1) {
q->first_pkt = pkt1->next;
if (!q->first_pkt) {//if queue is over
q->last_pkt = NULL;
}
q->nb_packets--;
q->size -= pkt1->pkt.size;
*pkt = pkt1->pkt; av_free(pkt1);
ret = 1;//
break;
} else if (!block) {
ret = 0;
break;
} else {
SDL_CondWait(q->cond, q->mutex);//until data enough
}
}
SDL_UnlockMutex(q->mutex);
return ret;
} static void packet_queue_flush(PacketQueue *q) {//flush queue
AVPacketList *pkt, *pkt1; SDL_LockMutex(q->mutex);//lock
for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
pkt1 = pkt->next;
av_free_packet(&pkt->pkt);
av_freep(&pkt);
}
q->last_pkt = NULL;
q->first_pkt = NULL;
q->nb_packets = 0;
q->size = 0;
SDL_UnlockMutex(q->mutex);//unlock
} int audio_decode_frame(VideoState *is) {//core code decoded information is saved in videostate-is
int len1, len2, decoded_data_size;
AVPacket *pkt = &is->audio_pkt;
int got_frame = 0;
int64_t dec_channel_layout;
int wanted_nb_samples, resampled_data_size; for (;;) {//dead loop
while (is->audio_pkt_size > 0) {
if (!is->audio_frame) {
if (!(is->audio_frame = avcodec_alloc_frame())) {
return AVERROR(ENOMEM);
}
} else
avcodec_get_frame_defaults(is->audio_frame);//AVFrame should be set to default values
len1 = avcodec_decode_audio4(is->audio_st->codec, is->audio_frame, &got_frame, pkt);
if (len1 < 0) {
// error, skip the frame
is->audio_pkt_size = 0;
break;
} is->audio_pkt_data += len1;
is->audio_pkt_size -= len1; if (!got_frame)
continue; /* decoded_data_size = av_samples_get_buffer_size(NULL,
is->audio_frame->channels,
is->audio_frame->nb_samples,
is->audio_frame->format, 1);*/
decoded_data_size = av_samples_get_buffer_size(NULL,
is->pCodecCtx->channels,
is->audio_frame->nb_samples,
AVSampleFormat(is->audio_frame->format), 1);//get decoded_data_size /* dec_channel_layout = (is->audio_frame->channel_layout && is->audio_frame->channels
== av_get_channel_layout_nb_channels(is->audio_frame->channel_layout))
? is->audio_frame->channel_layout
: av_get_default_channel_layout(is->audio_frame->channels);*/ dec_channel_layout = (is->pCodecCtx->channel_layout && is->pCodecCtx->channels
== av_get_channel_layout_nb_channels(is->pCodecCtx->channel_layout))
? is->pCodecCtx->channel_layout
: av_get_default_channel_layout(is->pCodecCtx->channels); wanted_nb_samples = is->audio_frame->nb_samples; //fprintf(stderr, "wanted_nb_samples = %d\n", wanted_nb_samples);
//解码出来的音频与原先设定的格式不一致,则重采样
if (is->audio_frame->format != is->audio_src_fmt ||
dec_channel_layout != is->audio_src_channel_layout ||
is->pCodecCtx->sample_rate != is->audio_src_freq ||
(wanted_nb_samples != is->audio_frame->nb_samples && !is->swr_ctx)) {
if (is->swr_ctx) swr_free(&is->swr_ctx);
is->swr_ctx = swr_alloc_set_opts(NULL, //get swr_ctx
is->audio_tgt_channel_layout,
is->audio_tgt_fmt,
is->audio_tgt_freq,
dec_channel_layout,
AVSampleFormat(is->audio_frame->format),
is->pCodecCtx->sample_rate,
0, NULL);
if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {//init
fprintf(stderr, "swr_init() failed\n");
break;
}//get src parameters
is->audio_src_channel_layout = dec_channel_layout;
is->audio_src_channels = is->audio_st->codec->channels;
is->audio_src_freq = is->audio_st->codec->sample_rate;
is->audio_src_fmt = is->audio_st->codec->sample_fmt;
}
if (is->swr_ctx) {
// const uint8_t *in[] = { is->audio_frame->data[0] };
const uint8_t **in = (const uint8_t **)is->audio_frame->extended_data;
uint8_t *out[] = { is->audio_buf2 };
if (wanted_nb_samples != is->audio_frame->nb_samples) {//compensate samples
swr_compensate(is->swr_ctx, (wanted_nb_samples - is->audio_frame->nb_samples)
* is->audio_tgt_freq / is->pCodecCtx->sample_rate,
wanted_nb_samples * is->audio_tgt_freq / is->pCodecCtx->sample_rate);
} len2 = swr_convert(is->swr_ctx, out,// in data is changed by swr_ctx
sizeof(is->audio_buf2)
/ is->audio_tgt_channels
/ av_get_bytes_per_sample(is->audio_tgt_fmt),//tgt_nb_samples
in, is->audio_frame->nb_samples);//convert
if (len2 < 0) {
fprintf(stderr, "swr_convert() failed\n");
break;
}
if (len2 == sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt)) {
fprintf(stderr, "warning: audio buffer is probably too small\n");
swr_init(is->swr_ctx);
}
is->audio_buf = is->audio_buf2;//audio_buff
resampled_data_size = len2 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);//resampled_data_size
} else {
resampled_data_size = decoded_data_size;//not resampled
is->audio_buf = is->audio_frame->data[0];
}
// We have data, return it and come back for more later
return resampled_data_size; //返回重采样后的长度
}//decode one frame if (pkt->data) av_free_packet(pkt);
memset(pkt, 0, sizeof(*pkt));
if (is->quit) return -1;
if (packet_queue_get(&is->audioq, pkt, 1) < 0) return -1;//get next packet is->audio_pkt_data = pkt->data;
is->audio_pkt_size = pkt->size;
}
} void audio_callback(void *userdata, Uint8 *stream, int len) {
VideoState *is = (VideoState *)userdata;
int len1, audio_data_size;
printf("audio callback 1 len=%d\n",len); while (len > 0) {
if (is->audio_buf_index >= is->audio_buf_size) {
audio_data_size = audio_decode_frame(is);//decode one frame,return size
if(audio_data_size < 0) {
/* silence */
is->audio_buf_size = 1024;
memset(is->audio_buf, 0, is->audio_buf_size);
} else {
is->audio_buf_size = audio_data_size;
}
is->audio_buf_index = 0;
printf("audio callback 2 (audio_buf_size,audio_buf_index) = (%d,%d)\n",is->audio_buf_size,is->audio_buf_index);
} len1 = is->audio_buf_size - is->audio_buf_index;
if (len1 > len) {
len1 = len;
} memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
len -= len1;
stream += len1;
is->audio_buf_index += len1;
printf("audio callback 3 (len1,len,audio_buf_index) = (%d,%d,%d)\n",len1,len,is->audio_buf_index);
}
} int stream_component_open(VideoState *is, int stream_index) {//open stream
AVFormatContext *ic = is->ic;
AVCodecContext *codecCtx;
AVCodec *codec;
SDL_AudioSpec wanted_spec, spec;
int64_t wanted_channel_layout = 0;
int wanted_nb_channels;
const int next_nb_channels[] = {0, 0, 1 ,6, 2, 6, 4, 6}; if (stream_index < 0 || stream_index >= ic->nb_streams) {
return -1;
} codecCtx = ic->streams[stream_index]->codec;
is->pCodecCtx=codecCtx;//Add
wanted_nb_channels = codecCtx->channels;//wanted parameters
if(!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
} wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
wanted_spec.freq = codecCtx->sample_rate;
if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
fprintf(stderr, "Invalid sample rate or channel count!\n");
return -1;
}
wanted_spec.format = AUDIO_S16SYS;
wanted_spec.silence = 0;
wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
wanted_spec.callback = audio_callback;//callback
wanted_spec.userdata = is; while(SDL_OpenAudio(&wanted_spec, &spec) < 0) {//OpenAudio
fprintf(stderr, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
if(!wanted_spec.channels) {
fprintf(stderr, "No more channel combinations to tyu, audio open failed\n");
return -1;
}
wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
} if (spec.format != AUDIO_S16SYS) {
fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
return -1;
}
if (spec.channels != wanted_spec.channels) {
wanted_channel_layout = av_get_default_channel_layout(spec.channels);
if (!wanted_channel_layout) {
fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
return -1;
}
} fprintf(stderr, "%d: wanted_spec.format = %d\n", __LINE__, wanted_spec.format);
fprintf(stderr, "%d: wanted_spec.samples = %d\n", __LINE__, wanted_spec.samples);
fprintf(stderr, "%d: wanted_spec.channels = %d\n", __LINE__, wanted_spec.channels);
fprintf(stderr, "%d: wanted_spec.freq = %d\n", __LINE__, wanted_spec.freq); fprintf(stderr, "%d: spec.format = %d\n", __LINE__, spec.format);
fprintf(stderr, "%d: spec.samples = %d\n", __LINE__, spec.samples);
fprintf(stderr, "%d: spec.channels = %d\n", __LINE__, spec.channels);
fprintf(stderr, "%d: spec.freq = %d\n", __LINE__, spec.freq); is->audio_src_fmt = is->audio_tgt_fmt = AV_SAMPLE_FMT_S16;//src parameters
is->audio_src_freq = is->audio_tgt_freq = spec.freq;
is->audio_src_channel_layout = is->audio_tgt_channel_layout = wanted_channel_layout;
is->audio_src_channels = is->audio_tgt_channels = spec.channels; codec = avcodec_find_decoder(codecCtx->codec_id);//find decoder
is->pCodec=codec;//Add
if (!codec || (avcodec_open2(codecCtx, codec, NULL) < 0)) {//Unsupported codec
fprintf(stderr, "Unsupported codec!\n");
return -1;
}
ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
switch(codecCtx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
is->audioStream = stream_index;
is->audio_st = ic->streams[stream_index];
is->audio_buf_size = 0;
is->audio_buf_index = 0;
memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
packet_queue_init(&is->audioq);
SDL_PauseAudio(0);
break;
default:
break;
}
}
/*
static void stream_component_close(VideoState *is, int stream_index) {
AVFormatContext *oc = is->;
AVCodecContext *avctx; if(stream_index < 0 || stream_index >= ic->nb_streams) return;
avctx = ic->streams[stream_index]->codec; }
*/
static int decode_thread(void *arg) {
//初始化参数,函数内部处理得到的相关参数赋给 is
VideoState *is = (VideoState *)arg;
AVFormatContext *ic = NULL;
AVPacket pkt1, *packet = &pkt1;
int ret, i, audio_index = -1; is->audioStream=-1;
global_video_state = is;//全局状态
if (avformat_open_input(&ic, is->filename, NULL, NULL) != 0) {
return -1;
}
is->ic = ic;
if (avformat_find_stream_info(ic, NULL) < 0) {//打开流信息
return -1;
}
av_dump_format(ic, 0, is->filename, 0);
for (i=0; i<ic->nb_streams; i++) {
if (ic->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && audio_index < 0) {//打开第一条音频流
audio_index=i;
break;
}
}
if (audio_index >= 0) {
stream_component_open(is, audio_index);//打开音频流
}
if (is->audioStream < 0) {
fprintf(stderr, "%s: could not open codecs\n", is->filename);
goto fail;
}
// main decode loop
for(;;) {
if(is->quit) break;
if (is->audioq.size > MAX_AUDIOQ_SIZE) {
SDL_Delay(10);//so fast
continue;
}
ret = av_read_frame(is->ic, packet);//read data to one packet
if (ret < 0) {
if(ret == AVERROR_EOF || url_feof(is->ic->pb)) {//error or end
break;
}
if(is->ic->pb && is->ic->pb->error) {
break;
}
continue;
} if (packet->stream_index == is->audioStream) {//packet data to audioq
packet_queue_put(&is->audioq, packet);
} else {
av_free_packet(packet);
}
} while (!is->quit) {// delay
SDL_Delay(100);
} fail: {//if fail
SDL_Event event;
event.type = FF_QUIT_EVENT;
event.user.data1 = is;
SDL_PushEvent(&event);
} return 0;
} int main(int argc, char *argv[])
{
SDL_Event event;
VideoState *is; is = (VideoState *)av_mallocz(sizeof(VideoState)); //if (argc < 2) {
// fprintf(stderr, "Usage: test <file>\n");
// exit(1);
//} argv[1]="test.mp4";
av_register_all();//注册编解码库 if (SDL_Init(SDL_INIT_AUDIO)) { //初始化音频SDL
fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
exit(1);
} av_strlcpy(is->filename,argv[1], sizeof(is->filename)); is->parse_tid = SDL_CreateThread(decode_thread, is);
if (!is->parse_tid) {
av_free(is);
return -1;
} for(;;) {
SDL_WaitEvent(&event);
switch(event.type) {
case FF_QUIT_EVENT:
case SDL_QUIT://退出
is->quit = 1;
SDL_Quit();
exit(0);
break;
default:
break;
}
}
return 0;
}
SDL播放声音的更多相关文章
- ffmpeg 和 SDL 的结合使用
FFmpeg是一套可以用来记录.转换数字音频.视频,并能将其转化为流的开源计算机程序.采用LGPL或GPL许可证.它提供了录制.转换以及流化音视 频的完整解决方案.它包含了非常先进的音频/视频编解码库 ...
- (转)如何基于FFMPEG和SDL写一个少于1000行代码的视频播放器
原文地址:http://www.dranger.com/ffmpeg/ FFMPEG是一个很好的库,可以用来创建视频应用或者生成特定的工具.FFMPEG几乎为你把所有的繁重工作都做了,比如解码.编码. ...
- FFmpeg学习3:播放音频
参考dranger tutorial,本文将介绍如何使用FFmpeg解码音频数据,并使用SDL将解码后的数据输出. 本文主要包含以下几方面的内容: 关于播放音频的需要的一些基础知识介绍 使用SDL2播 ...
- FFPLAY的原理
概要 电影文件有很多基本的组成部分.首先,文件本身被称为容器Container,容器的类型决定了信息被存放在文件中的位置.AVI和Quicktime就是容器的例子.接着,你有一组流,例如,你经常有的是 ...
- FFPLAY的原理(三)
播放声音 现在我们要来播放声音.SDL也为我们准备了输出声音的方法.函数SDL_OpenAudio()本身就是用来打开声音设备的.它使用一个叫做SDL_AudioSpec结构体作为参数,这个结构体中包 ...
- ffmpeg教程
转:http://blog.sina.com.cn/s/blog_51396f890100nd91.html 概要 电影文件有很多基本的组成部分.首先,文件本身被称为容器Container,容器的类 ...
- ffmpeg个人翻译文档1-8<转>
[个人翻译]ffmpeg文档1 (2008-08-26 09:39:15) 转载 标签: 杂谈 分类: 翻译文档 指导1:制作屏幕录像 源代码:tutorial01.c 概要 电影文件有很多基本的 ...
- SDL开发笔记(二):音频基础介绍、使用SDL播放音频
若该文为原创文章,未经允许不得转载原博主博客地址:https://blog.csdn.net/qq21497936原博主博客导航:https://blog.csdn.net/qq21497936/ar ...
- Visual studio 通用开发环境配置:SDL,FFMPEG为例
引言 每一个C++库的使用都是从开发环境的配置开始的,其实每个库的配置过程都是大同小异,总结下来有下面几个步骤: 下载库文件,这里假定是已经预先编译完成的. 配置库文件的包含目录(include)和库 ...
随机推荐
- php的if语句单行与多行
//正确: //错误写法 $b = //if前面不能带 等号
- NestIn VS插件 visual studio 中将同类CS文件放在一起显示
https://visualstudiogallery.msdn.microsoft.com/9d6ef0ce-2bef-4a82-9a84-7718caa5bb45 Nest files in So ...
- sqlserver检测死锁;杀死锁和进程;查看锁信息
http://blog.sina.com.cn/s/blog_9dcdd2020101nf4v.html sqlserver检测死锁;杀死锁和进程;查看锁信息 ( ::)转载▼ 标签: sql 检测死 ...
- Windows Server 2008修改IE浏览器级别便于使用
1.降低IE安全级别 Win 2008默认IE的安全级别为“高”,并且不能随意调整,在浏览网页的时候有些会有一些限制,可以打开注册表编辑器进行设置,定位到 [HKEY_LOCAL_MACHINE\S ...
- SQL having 子句
1.为什么存在? 在 SQL 中增加 HAVING 子句原因是,WHERE 关键字无法与合计函数一起使用. 2.举例子: SELECT Customer,SUM(OrderPrice) FROM Or ...
- array_fill 用给定的值填充数组
转自:http://www.phpstudy.net/php/165.html PHP array_fill 用给定的值填充数组 array_fill (PHP 4 >= 4.2.0, PHP ...
- UvaLive6662 The Last Ant 模拟
UvaLive6662 PDF题目 题意:给出隧道长度L,蚂蚁数量N,各蚂蚁位置Pi.前进方向Di,都为整数(前进方向为L或R),蚂蚁速度为1cm每秒,两蚂蚁若在整数点相遇则都反向,若不在整数点相遇则 ...
- CSS鼠标响应事件经过、移动、点击示例介绍
本文为大家介绍下CSS 鼠标响应事件:鼠标经过CSS.鼠标移动CSS.鼠标点击CSS以及示例,喜欢的朋友可以参考下 几种鼠标触发CSS事件. 说明: onMouseDown 按下鼠标时触发 onM ...
- 即将翻译 Building The New Financial Times Web App
<金融时报>这份Web APP 经验的总结,写得非常详细,也提到Web APP制作中常遇到的问题.为么他们就没有点透Bug - -! Building The New Financial ...
- 说说移动前端中 viewport (视口)
转载网络资源的文章:来源不详~~ 移动前端中常说的 viewport (视口)就是浏览器显示页面内容的屏幕区域.其中涉及几个重要概念是 dip ( device-independent pixel 设 ...