cygwin上文编译文章.

在ffmpeg/arm添加的文件夹Android.mk 的主要目的是为了宣布动态库libs下一个

LOCAL_PATH:= $(call my-dir)

include $(CLEAR_VARS)
LOCAL_MODULE:= libavcodec
LOCAL_SRC_FILES:= lib/libavcodec-55.so
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
include $(PREBUILT_SHARED_LIBRARY) include $(CLEAR_VARS)
LOCAL_MODULE:= libavformat
LOCAL_SRC_FILES:= lib/libavformat-55.so
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
include $(PREBUILT_SHARED_LIBRARY) include $(CLEAR_VARS)
LOCAL_MODULE:= libswscale
LOCAL_SRC_FILES:= lib/libswscale-2.so
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
include $(PREBUILT_SHARED_LIBRARY) include $(CLEAR_VARS)
LOCAL_MODULE:= libavutil
LOCAL_SRC_FILES:= lib/libavutil-52.so
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
include $(PREBUILT_SHARED_LIBRARY) include $(CLEAR_VARS)
LOCAL_MODULE:= libavfilter
LOCAL_SRC_FILES:= lib/libavfilter-4.so
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
include $(PREBUILT_SHARED_LIBRARY) include $(CLEAR_VARS)
LOCAL_MODULE:= libwsresample
LOCAL_SRC_FILES:= lib/libswresample-0.so
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
include $(PREBUILT_SHARED_LIBRARY)

添加Media.h

#pragma once
#include <jni.h>
#include <android/native_window_jni.h>
#include "utils/Lock.h"
#include <pthread.h> //ffmpeg 须要先定义 __STDC_CONSTANT_MACROS 才干通过 c++ 编译
#define __STDC_CONSTANT_MACROS
#ifndef INT64_C
#define INT64_C(c) (c ## LL)
#define UINT64_C(c) (c ## ULL)
#endif
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/avutil.h>
#include <libavutil/dict.h>
#include <libavutil/frame.h>
#include <libavutil/mem.h>
#include <libavutil/pixfmt.h>
#include <libswscale/swscale.h>
#include <libavutil/time.h>
#include <libavutil/opt.h>
#include <libswresample/swresample.h>
} class Media
{
public: Media();
~Media();
void setSurface(JNIEnv *pEnv, jobject pSurface,int pWidth,int pHeight);
bool initPath(const char * path);
bool initCodec(int width,int height);
int getResWidth();
int getResHeight();
void play();
void pause();
void stop();
bool isPlaying();
void decodeAndRenderPic(void *pBuffer,int dwBufsize);
void decodeAudioAndPlay(void *pBuffer,int dwBufsize);
private:
static void* decodeAndRenderAdpt(void *params);
void decodeAndRender();
private:
bool bInit; ANativeWindow* window;
char *videoFileName;
AVFormatContext *formatCtx;
int videoStream;
int audioStream;
AVCodecContext *codecCtx;
AVCodecContext *codecCtxAudio;
AVFrame *decodedFrame;
AVFrame *frameRGBA ;
jobject bitmap;
void* buffer;
struct SwsContext *sws_ctx;
struct SwrContext *swr_ctx;
int width;
int height;
bool _stop; pthread_t decodeThread; Mutex mutexSurface;
Mutex lockWindow;
};

核心代码例如以下:

添加Media.cpp

#include "Media.h"
#include "Audio.h" //ffmpeg 须要先定义 __STDC_CONSTANT_MACROS 才干通过 c++ 编译
#define __STDC_CONSTANT_MACROS #include <android/native_window_jni.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include "utils/Log.h"
#include "cu.h"
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/linux-syscalls.h> #define SYS_gettid __NR_gettid extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/avutil.h>
#include <libavutil/dict.h>
#include <libavutil/frame.h>
#include <libavutil/mem.h>
#include <libavutil/pixfmt.h>
#include <libswscale/swscale.h>
} #define RGB_SIZE 4
//AV_PIX_FMT_RGBA,AV_PIX_FMT_RGB24
#define AV_FMT AV_PIX_FMT_RGBA Media::Media():mutexSurface(true),window(NULL),lockWindow(false)
,frameRGBA(NULL),decodedFrame(NULL)
,codecCtx(NULL),formatCtx(NULL),_stop(true)
,buffer(NULL),height(0),width(0),videoStream(-1)
,sws_ctx(NULL),videoFileName(NULL),audioStream(-1)
,codecCtxAudio(NULL),swr_ctx(NULL),decodeThread(NULL)
{
bInit = false;
}
Media::~Media(){
stop();
if(NULL!=decodeThread)
{
pthread_join(decodeThread, NULL);
}
if(NULL!=window)
{
ANativeWindow_release(window);
window=NULL;
}
// Free the RGB image
if(NULL!=frameRGBA)
{
av_free(frameRGBA);
frameRGBA=NULL;
}
// Free the YUV frame
if(NULL!=decodedFrame)
{
av_free(decodedFrame);
decodedFrame=NULL;
}
// Close the codec
if(NULL!=codecCtx)
{
avcodec_close(codecCtx);
codecCtx=NULL;
}
// Close the video file
if(NULL!=formatCtx)
{
avformat_close_input(&formatCtx);
formatCtx=NULL;
}
}
void Media::setSurface(JNIEnv *pEnv, jobject pSurface,int pWidth,int pHeight)
{
LOGD("Media::setSurface start, %d,%d,%d", (int)pSurface , pWidth, pHeight);
if (0 != pSurface) {
if(pWidth <=0 || pHeight<=0)
{
LOGD("Media::setSurface width or height is zero !!! %d,%d", pWidth, pHeight);
return;
}
if(NULL==window)
{
synchronized(lockWindow)
{
// get the native window reference
window = ANativeWindow_fromSurface(pEnv, pSurface);
// set format and size of window buffer WINDOW_FORMAT_RGBA_8888
ANativeWindow_setBuffersGeometry(window, 0, 0, WINDOW_FORMAT_RGBA_8888);
}
}
} else {
stop();
if(NULL!=window)
{
// release the native window
synchronized(lockWindow)
{
ANativeWindow_release(window);
window=NULL;
}
}
return;
} //reset width and height
width = pWidth;
height = pHeight;
if(NULL != buffer)
{
free(buffer);
buffer=NULL;
}
buffer = malloc(pWidth * pHeight * RGB_SIZE);
if(NULL == buffer)
{
LOGE("Media::setSurface Cannot malloc buffer size : %d!", pWidth * pHeight * RGB_SIZE);
return;
}
//get the scaling context
sws_ctx = sws_getContext (
codecCtx->width,
codecCtx->height,
codecCtx->pix_fmt,
pWidth,
pHeight,
AV_FMT,
SWS_FAST_BILINEAR,//SWS_BILINEAR,
NULL,
NULL,
NULL
);
// Assign appropriate parts of bitmap to image planes in pFrameRGBA
// Note that pFrameRGBA is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)frameRGBA, (uint8_t *)buffer, AV_FMT,
pWidth, pHeight); LOGD("Media::setSurface window:%d , mutexInit.isLocked: %d !", (int)window,(int) mutexSurface.isLocked());
if(NULL!=window && mutexSurface.isLocked())
{
LOGD("Media::setSurface unlock surface!");
mutexSurface.unlock();
}
LOGD("Media::setSurface OK!");
return;
} void audio_swr_resampling_audio_init(SwrContext **swr_ctx,/*TargetAudioParams *targetAudioParams,*/AVCodecContext *codec){ if(codec->sample_fmt == AV_SAMPLE_FMT_S16 /*|| codec->sample_fmt == AV_SAMPLE_FMT_S32 */||codec->sample_fmt == AV_SAMPLE_FMT_U8){
LOGE("codec->sample_fmt:%d",codec->sample_fmt);
if(*swr_ctx){
swr_free(swr_ctx);
*swr_ctx = NULL;
}
return;
} if(*swr_ctx){
swr_free(swr_ctx);
} *swr_ctx = swr_alloc();
if(!*swr_ctx){
LOGE("swr_alloc failed");
return;
}
/* set options */
av_opt_set_int(*swr_ctx, "in_channel_layout", codec->channel_layout, 0);
av_opt_set_int(*swr_ctx, "in_sample_rate", codec->sample_rate, 0);
av_opt_set_sample_fmt(*swr_ctx, "in_sample_fmt", codec->sample_fmt, 0);
av_opt_set_int(*swr_ctx, "out_channel_layout", codec->channel_layout/*targetAudioParams->channel_layout*/, 0);
av_opt_set_int(*swr_ctx, "out_sample_rate", codec->sample_rate/*targetAudioParams->sample_rate*/, 0);
av_opt_set_sample_fmt(*swr_ctx, "out_sample_fmt", AV_SAMPLE_FMT_S16/*targetAudioParams->sample_fmt*/, 0);// AV_SAMPLE_FMT_S16 /* initialize the resampling context */
int ret = 0;
if ((ret = swr_init(*swr_ctx)) < 0) {
LOGE("Failed to initialize the resampling context\n");
if(*swr_ctx){
swr_free(swr_ctx);
*swr_ctx = NULL;
}
return;
}
} int audio_swr_resampling_audio(struct SwrContext *swr_ctx,/*TargetAudioParams *targetAudioParams,*/AVFrame *audioFrame,uint8_t **targetData){
int len = swr_convert(swr_ctx,targetData
,audioFrame->nb_samples
,(const uint8_t **)audioFrame->extended_data
,audioFrame->nb_samples);
if(len < 0){
LOGE("error swr_convert");
return -1;
} int dst_bufsize = len * audioFrame->channels/*targetAudioParams->channels*/ * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16/*targetAudioParams->sample_fmt*/);
LOGI("dst_bufsize:%d",dst_bufsize);
return dst_bufsize;
} void audio_swr_resampling_audio_destory(SwrContext **swr_ctx){
if(*swr_ctx){
swr_free(swr_ctx);
*swr_ctx = NULL;
}
} bool Media::initPath(const char *path)
{
AVCodec *pCodec = NULL;
int i;
AVDictionary *optionsDict = NULL; LOGI("video file name is %s", path);
// Register all formats and codecs
av_register_all();
// Open video file
if(avformat_open_input(&formatCtx, path, NULL, NULL)!=0)
return false; // Couldn't open file
LOGD("Media::initPath pos 1");
// Retrieve stream information
if(avformat_find_stream_info(formatCtx, NULL)<0)
return false; // Couldn't find stream information
LOGD("Media::initPath pos 2");
// Dump information about file onto standard error
av_dump_format(formatCtx, 0, path, 0);
LOGD("Media::initPath pos 3");
// Find the first video stream
videoStream=-1;
audioStream=-1;
for(i=0; i<formatCtx->nb_streams; i++) {
if(formatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
LOGI("FIND VIDEO CODEC ID : %d" , i);
videoStream=i;
if(audioStream!=-1 )
break;
}
else if(formatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO) {
LOGI("FIND AUDIO CODEC ID: %d" , i);
audioStream=i;
if(videoStream!=-1 )
break;
}
}
LOGD("Media::initPath pos 5");
if(videoStream==-1)
return false; // Didn't find a video stream
// Get a pointer to the codec context for the video stream
codecCtx=formatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(codecCtx->codec_id);
LOGD("Media::initPath pos 6");
if(pCodec==NULL) {
fprintf(stderr, "Unsupported codec!\n");
return false; // Codec not found
}
LOGD("Media::initPath pos 7");
// Open codec
if(avcodec_open2(codecCtx, pCodec, &optionsDict)<0)
return false; // Could not open codec
LOGD("Media::initPath pos 8");
// Allocate video frame
decodedFrame=av_frame_alloc();
LOGD("Media::initPath pos 9");
// Allocate an AVFrame structure
frameRGBA=av_frame_alloc();
if(frameRGBA==NULL)
return false;
bInit=true;
LOGD("Media::initPath pos 10"); //audio decodec
if(-1!=audioStream)
{
codecCtxAudio = formatCtx->streams[audioStream]->codec;
pCodec = avcodec_find_decoder(codecCtxAudio->codec_id); if(avcodec_open2(codecCtxAudio, pCodec, &optionsDict)<0)
{
audioStream=-1;
LOGW("Error avcodec_open2 Audio Decode!");
} LOGE("codecCtxAudio data: %d, %d, %d"
, codecCtxAudio->bit_rate
, codecCtxAudio->sample_rate
, codecCtxAudio->channels);
}
return true;
} bool Media::initCodec(int width, int height)
{
AVCodec *pCodec = NULL;
int i; //avcodec_init();
// Register all formats and codecs
av_register_all(); /* find the video encoder */
pCodec = avcodec_find_decoder(CODEC_ID_H264); if (!pCodec)
{
LOGE("codec not found!");
return false;
} codecCtx = avcodec_alloc_context3(pCodec);
//初始化參数。以下的參数应该由详细的业务决定
codecCtx->time_base.num = 1;
codecCtx->frame_number = 1; //每包一个视频帧
codecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
codecCtx->bit_rate = 0;
codecCtx->time_base.den = 30;//帧率
codecCtx->width = width;//视频宽
codecCtx->height = height;//视频高
codecCtx->pix_fmt = AV_PIX_FMT_YUV420P; LOGE("codecCtx init OK! %d", (int)codecCtx);
// Open codec
if(avcodec_open2(codecCtx, pCodec, NULL)<0)
return false; // Could not open codec
// Allocate video frame
decodedFrame=av_frame_alloc();
// Allocate an AVFrame structure
frameRGBA=av_frame_alloc();
if(frameRGBA==NULL)
return false; //Audio
int audioBitrate = 64000;
int sampleRate = 44100;// 44100, 22050 and 11025.
int channels=2;
pCodec = avcodec_find_decoder(CODEC_ID_AAC);
if (!pCodec)
{
LOGE("codec not found!");
return false;
}
codecCtxAudio = avcodec_alloc_context3(pCodec);
codecCtxAudio->codec_type = AVMEDIA_TYPE_AUDIO;
codecCtxAudio->codec_id = AV_CODEC_ID_AAC;
codecCtxAudio->sample_fmt = AV_SAMPLE_FMT_S16;
codecCtxAudio->sample_rate = sampleRate;
codecCtxAudio->channels = channels;
/*codecCtxAudio->profile = FF_PROFILE_AAC_MAIN;
codecCtxAudio->channel_layout = AV_CH_LAYOUT_STEREO;
codecCtxAudio->bit_rate = audioBitrate;
codecCtxAudio->time_base.num= 1;
codecCtxAudio->time_base.den= sampleRate;*/
codecCtxAudio->pix_fmt = PIX_FMT_NONE; if(avcodec_open2(codecCtxAudio, pCodec, NULL)<0)
{
LOGE("codec not found!");
codecCtxAudio=NULL;
}
bInit=true;
return true;
} int Media::getResWidth()
{
if(bInit && NULL != codecCtx)
return codecCtx->width;
return -1;
} int Media::getResHeight()
{
if(bInit && NULL != codecCtx)
return codecCtx->height;
return -1;
} void Media::play()
{
_stop = false;
pthread_create(&decodeThread, NULL, decodeAndRenderAdpt, this);
} void Media::pause()
{
_stop=true;
} void Media::stop()
{
_stop=true;
bInit = false;
}
bool Media::isPlaying()
{
return !_stop;
}
void* Media::decodeAndRenderAdpt(void *params)
{
LOGW("create thread : %d Media::decodeAndRenderAdpt", syscall(SYS_gettid));
bool bOk = AttachCurrentThread();
LOGI("AttachCurrentThread Result: %d", bOk);
Media *pMedia = (Media *)params;
if(NULL!=pMedia->codecCtxAudio)
{
audio_swr_resampling_audio_init(&pMedia->swr_ctx,pMedia->codecCtxAudio);
initAudio(pMedia->codecCtxAudio->sample_rate,pMedia->codecCtxAudio->channels==1,pMedia->codecCtxAudio->sample_fmt != AV_SAMPLE_FMT_U8);
LOGI("initAudio %d,%d,%d",pMedia->codecCtxAudio->sample_rate,pMedia->codecCtxAudio->channels,pMedia->codecCtxAudio->sample_fmt);
}
try{
pMedia->decodeAndRender();
}catch (...) {
LOGE("unkown Exception in Thread: Media::decodeAndRender");
}
if(bOk)
DetachCurrentThread();
if(NULL!=pMedia->codecCtxAudio)
{
releaseAudio();
audio_swr_resampling_audio_destory(&pMedia->swr_ctx);
}
pMedia->decodeThread=NULL;
return NULL;
} void Media::decodeAndRender()
{
LOGD("Media::decodeAndRender check mutexInit.isLocked: %d !",(int)mutexSurface.isLocked());
if(mutexSurface.isLocked())
{
LOGD("Media::decodeAndRender wait unlock surface!");
mutexSurface.lock();
mutexSurface.unlock();
LOGD("Media::decodeAndRender wait unlock surface finished ok!");
}
ANativeWindow_Buffer windowBuffer;
AVPacket packet;
int i=0;
int frameFinished;
int lineCnt;
long pts;
long baseTime=0;
long waitTime = 0;
ANativeWindow * pWin;
pWin=window;
uint8_t **dst_data = NULL;
/* FILE *stream;
stream = fopen("/sdcard/1.pcm", "wb");*/
while(av_read_frame(formatCtx, &packet)>=0 && !_stop && NULL!=window && bInit) {
// Is this a packet from the video stream? if(packet.stream_index==videoStream) {
// Decode video frame
avcodec_decode_video2(codecCtx, decodedFrame, &frameFinished,
&packet);
// Did we get a video frame? if(frameFinished) {
// Convert the image from its native format to RGBA
sws_scale
(
sws_ctx,
(uint8_t const * const *)decodedFrame->data,
decodedFrame->linesize,
0,
codecCtx->height,
frameRGBA->data,
frameRGBA->linesize
); if(packet.dts == AV_NOPTS_VALUE
&& decodedFrame->opaque && *(uint64_t*)decodedFrame->opaque != AV_NOPTS_VALUE)
{
pts = *(uint64_t *)decodedFrame->opaque;
LOGD("pst1: %d",pts);
}
else if(packet.dts != AV_NOPTS_VALUE) {
pts = packet.dts;
LOGD("pst2: %d",pts);
} else {
pts = 0;
LOGD("pst3: %d",pts);
}
//pts = av_q2d(codecCtx->time_base) * 1000000.0 * i * 2;
pts *= 1000;
//LOGD("debug %d,%d,%f",pts, (long)(av_q2d(codecCtx->time_base) * 1000000.0 * i * 2), av_q2d(codecCtx->time_base));
if(0 == pts || 0 == baseTime)
{
baseTime = av_gettime() - pts;
LOGD("BASETIME: %d",baseTime);
}else{
waitTime = (baseTime + pts) - av_gettime();
LOGD("WAITTIME: %d, %d",waitTime,pts);
} //waitTime = (av_q2d(codecCtx->time_base) * 1000.0 - 0.0) * 1000;
if(waitTime>0)
usleep(waitTime);
if(!_stop)
{
synchronized(lockWindow)
{
if(!_stop && NULL!=window)
{
// lock the window buffer
if (ANativeWindow_lock(pWin, &windowBuffer, NULL) < 0) {
LOGE("cannot lock window");
} else {
// draw the frame on buffer
//LOGD("copy buffer %d:%d:%d", width, height, width*height*RGB_SIZE);
//LOGD("window buffer: %d:%d:%d", windowBuffer.width, windowBuffer.height, windowBuffer.stride);
//memcpy(windowBuffer.bits, buffer, width * height * RGB_SIZE);
if(windowBuffer.width >= windowBuffer.stride){
//LOGE("1=========windowBuffer: %d,%d,%d,%d", windowBuffer.format,windowBuffer.stride,windowBuffer.width,windowBuffer.height);
memcpy(windowBuffer.bits, buffer, width * height * RGB_SIZE);
}else{
//LOGE("2=========windowBuffer: %d,%d,%d,%d", windowBuffer.format,windowBuffer.stride,windowBuffer.width,windowBuffer.height);
//skip stride-width 跳过padding部分内存
for(int i=0;i<height;++i)
memcpy(windowBuffer.bits + windowBuffer.stride * i * RGB_SIZE
, buffer + width * i * RGB_SIZE
, width * RGB_SIZE);
}
// unlock the window buffer and post it to display
ANativeWindow_unlockAndPost(pWin);
// count number of frames
++i;
}
}
}
}
}
}else if(packet.stream_index==audioStream) {
int ret = avcodec_decode_audio4(codecCtxAudio,decodedFrame, &frameFinished,
&packet);
// LOGD("avcodec_decode_audio4, %d , ret %d" , frameFinished, ret);
if(frameFinished)
{
// LOGD("read audio play");
size_t unpadded_linesize = decodedFrame->nb_samples * av_get_bytes_per_sample((AVSampleFormat)decodedFrame->format);
/* Write the raw audio data samples of the first plane. This works
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
* most audio decoders output planar audio, which uses a separate
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
* In other words, this code will write only the first audio channel
* in these cases.
* You should use libswresample or libavfilter to convert the frame
* to packed data. */
if(NULL!=swr_ctx)
{
int dst_linesize = 0;
int dst_nb_samples =av_rescale_rnd(decodedFrame->nb_samples, decodedFrame->sample_rate, codecCtxAudio->sample_rate, AV_ROUND_UP);
int dst_nb_channels = av_get_channel_layout_nb_channels(codecCtxAudio->channels ==1 ?AV_CH_LAYOUT_MONO:AV_CH_LAYOUT_STEREO);
av_samples_alloc_array_and_samples(&dst_data,&dst_linesize,dst_nb_channels,dst_nb_samples,codecCtxAudio->sample_fmt == AV_SAMPLE_FMT_U8? AV_SAMPLE_FMT_U8:AV_SAMPLE_FMT_S16, 0);
int ret = audio_swr_resampling_audio(swr_ctx,decodedFrame,dst_data);
if(ret>0){
writeAudio(dst_data[0],ret);
//fwrite(dst_data[0], 1, ret, stream);
}
if (dst_data)
{
av_freep(&dst_data[0]);
}
av_freep(&dst_data);
}else{
writeAudio(decodedFrame->extended_data[0], unpadded_linesize);
//fwrite(decodedFrame->extended_data[0], 1, unpadded_linesize, stream);
}
//fwrite(decodedFrame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
LOGD("read audio buffer: %d ,%d", unpadded_linesize, decodedFrame->linesize[0]);
}else{
//LOGD("===read audio buffer: %d", packet.size);
//writeAudio(packet.data, packet.size);
}
}else{
LOGD("unkown stream index: %d", packet.stream_index);
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
//fclose(stream);
LOGI("total No. of frames decoded and rendered %d", i);
} void Media::decodeAndRenderPic(void *pBuffer,int dwBufsize) {
ANativeWindow_Buffer windowBuffer;
AVPacket packet;
int frameFinished;
int lineCnt;
ANativeWindow * pWin;
pWin=window;
ARect rect; rect.left=0;
rect.top=0;
rect.right = width;
rect.bottom = height; memset(&packet,0x00,sizeof(AVPacket));
packet.data = (uint8_t*)pBuffer;//这里填入一个指向完整H264数据帧的指针
packet.size = dwBufsize;//这个填入H264数据帧的大小
// Decode video frame
avcodec_decode_video2(codecCtx, decodedFrame, &frameFinished,
&packet);
// Did we get a video frame? //LOGD("111111111111111111111111");
if(frameFinished && NULL!=window && bInit) {
// Convert the image from its native format to RGBA
sws_scale
(
sws_ctx,
(uint8_t const * const *)decodedFrame->data,
decodedFrame->linesize,
0,
codecCtx->height,
frameRGBA->data,
frameRGBA->linesize
);
//LOGD("22222222222222222222222222222");
synchronized(lockWindow)
{
if(NULL!=window)
{
// lock the window buffer
if (ANativeWindow_lock(pWin, &windowBuffer, &rect) < 0) {
LOGE("cannot lock window");
} else {
//LOGD("333333333333333333333333333");
// draw the frame on buffer
LOGD("copy buffer %d:%d:%d lineSize:%d", width, height, width*height*RGB_SIZE, frameRGBA->linesize[0]);
LOGD("RECT : %d,%d,%d,%d",rect.left,rect.top,rect.right,rect.bottom);
//LOGD("window buffer: %d:%d:%d", windowBuffer.width,windowBuffer.height, windowBuffer.stride);
if(windowBuffer.width >= windowBuffer.stride){
//LOGE("1=========windowBuffer: %d,%d,%d,%d", windowBuffer.format,windowBuffer.stride,windowBuffer.width,windowBuffer.height);
memcpy(windowBuffer.bits, buffer, width * height * RGB_SIZE);
}else{
//LOGE("2=========windowBuffer: %d,%d,%d,%d", windowBuffer.format,windowBuffer.stride,windowBuffer.width,windowBuffer.height);
//skip stride-width 跳过padding部分内存
for(int i=0;i<height;++i)
memcpy(windowBuffer.bits + windowBuffer.stride * i * RGB_SIZE
, buffer + width * i * RGB_SIZE
, width * RGB_SIZE);
}
//LOGD("666666666666666666666666666");
// unlock the window buffer and post it to display
ANativeWindow_unlockAndPost(pWin);
// count number of frames
//SaveFrame(pEnv, bitmap, codecCtx->width, codecCtx->height, i);
//stop = 1;
}
}
}
}
//LOGD("44444444444444444444444");
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
//LOGD("5555555555555555555555555");
} void Media::decodeAudioAndPlay(void *pBuffer,int dwBufsize) {
AVPacket packet;
int frameFinished; LOGD("decodeAudioAndPlay start");
if(NULL == codecCtxAudio)
{
LOGD("codecCtxAudio not init!");
return;
} memset(&packet,0x00,sizeof(AVPacket));
packet.data = (uint8_t*)pBuffer;//这里填入一个指向完整H264数据帧的指针
packet.size = dwBufsize;//这个填入H264数据帧的大小
// Decode audio frame
int ret = avcodec_decode_audio4(codecCtxAudio,decodedFrame, &frameFinished,
&packet);
LOGD("avcodec_decode_audio4, %d , ret %d" , frameFinished, ret);
// Did we get a audio frame? if(frameFinished && bInit) {
size_t unpadded_linesize = decodedFrame->nb_samples * av_get_bytes_per_sample((AVSampleFormat)decodedFrame->format);
writeAudio(decodedFrame->extended_data[0], unpadded_linesize);
LOGD("writeAudio");
}else{
LOGD("writeAudio fail!");
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
LOGD("decodeAudioAndPlay end");
}

Audio.h 音频做成了单例模式... 没有怎么封装, 使用java的AudioTrack,使用native的话不同版本号Android的so文件不一样所以不考虑了.

#pragma once

void initAudio(int mhz=44100,bool bMono=false,bool b16Bit=true);
void writeAudio(void * buffer,int size);
void releaseAudio();

Audio.cpp

#include "media/Audio.h"
#include <jni.h>
#include "cu.h"
#include "utils/Log.h"
#include "utils/sharedptr.h"
#include "utils/Lock.h"
#include <vector>
#include <pthread.h>
#include <sys/syscall.h>
#include <sys/linux-syscalls.h> #define SYS_gettid __NR_gettid
#define BUFFER_SIZE 1024*20 static bool init = false; static jbyteArray buffer;
static jobject audio_track;
static jint buffer_size;
static jmethodID method_write; using std::vector; struct ElementBuf{sharedptr<jbyte> buf;int size; };
typedef sharedptr<ElementBuf> SE;
static vector<SE> _vector;
static Mutex mutex(true);
static Mutex mutexVistor;
static bool _stop = true; void* audioThread(void *params); SE pop()
{
mutex.lock();
synchronized (mutexVistor)
{
if(!_vector.empty())
{
vector<SE>::iterator iter =_vector.begin();
SE e=*iter;
_vector.erase(iter);
return e;
}
}
return pop();
} void push(SE e)
{
synchronized (mutexVistor)
{
_vector.push_back(e);
}
mutex.unlock();
} void releaseAudioRes()
{
LOGD("releaseAudioRes start");
JNIEnv * pEnv = getEnv();
jclass audio_track_cls = pEnv->FindClass("android/media/AudioTrack");
// audio.stop();
//audio.release();
jmethodID method_stop =pEnv->GetMethodID(audio_track_cls, "stop",
"()V");
jmethodID method_release =pEnv->GetMethodID(audio_track_cls, "release",
"()V");
pEnv->CallVoidMethod(audio_track, method_stop);
pEnv->CallVoidMethod(audio_track, method_release); pEnv->DeleteGlobalRef(audio_track);
audio_track=NULL;
LOGD("releaseAudioRes end");
} int g_oldMhz = 0;
int g_oldbMono = false;
int g_oldb16Bit = true; void initAudio(int mhz,bool bMono,bool b16Bit)
{
LOGD("initAudio, %d ,%d, %d",mhz,bMono,b16Bit);
_stop=false;
if(init)
{
if(g_oldMhz!=mhz||g_oldbMono!=bMono||g_oldb16Bit!=b16Bit)
{
releaseAudioRes();
}else{
return;
}
}
g_oldMhz=mhz;
g_oldbMono=bMono;
g_oldb16Bit=b16Bit;
JNIEnv * pEnv = getEnv();
jclass audio_track_cls = pEnv->FindClass("android/media/AudioTrack");
jmethodID min_buff_size_id = pEnv->GetStaticMethodID(audio_track_cls,"getMinBufferSize", "(III)I");
buffer_size =pEnv->CallStaticIntMethod(audio_track_cls,min_buff_size_id,
mhz, //44100, 22050 and 11025.
bMono?2:0x4|0x8, //0x4|0x8,//2, /*CHANNEL_CONFIGURATION_MONO*/
2); /*ENCODING_PCM_16BIT*/
LOGI("buffer_size=%i",buffer_size);
buffer =pEnv->NewByteArray(BUFFER_SIZE);//buffer_size/4
buffer = (jbyteArray)pEnv->NewGlobalRef(buffer);
jmethodID constructor_id =pEnv->GetMethodID(audio_track_cls, "<init>",
"(IIIIII)V"); audio_track =pEnv->NewObject(audio_track_cls,
constructor_id,
3, /*AudioManager.STREAM_MUSIC*/
mhz, //11025, /*sampleRateInHz*/ 44100, 22050 and 11025.
bMono? 2:0x4|0x8,//0x4|0x8,//2, /*CHANNEL_CONFIGURATION_MONO*/
b16Bit?2:3, /*ENCODING_PCM_16BIT*/
buffer_size, /*bufferSizeInBytes*/
1 /*AudioTrack.MODE_STREAM*/
);
audio_track = (jobject)pEnv->NewGlobalRef(audio_track);
//setvolume
LOGD("setStereoVolume 1");
jmethodID setStereoVolume =pEnv->GetMethodID(audio_track_cls,"setStereoVolume","(FF)I");
pEnv->CallIntMethod(audio_track,setStereoVolume,1.0,1.0);
LOGD("setStereoVolume 2");
//play
jmethodID method_play =pEnv->GetMethodID(audio_track_cls, "play",
"()V");
pEnv->CallVoidMethod(audio_track, method_play); //write
method_write =pEnv->GetMethodID(audio_track_cls,"write","([BII)I");
//method_write = (jmethodID)pEnv->NewGlobalRef(method_write); LOGI("initAudio OK, BufferSize/4:%d",buffer_size/4 ); static pthread_t thread=NULL;
if(NULL==thread)
pthread_create(&thread, NULL, audioThread, NULL);
init = true;
} void* audioThread(void *params)
{
LOGW("create thread : %d Audio.cpp audioThread", syscall(SYS_gettid));
AttachCurrentThread();
JNIEnv * env = getEnv();
while(true)
{
SE e = pop();
if(_stop)
continue;
int size = e->size;
int wirteSize = 0;
jbyte * buf= e->buf.get();
while(size > BUFFER_SIZE)
{
// LOGD("writeAudio , BufferSize/4:%d",BUFFER_SIZE );
env->SetByteArrayRegion(buffer, 0,BUFFER_SIZE, buf + wirteSize);
//LOGD("writeAudio , ==========" );
env->CallVoidMethod(audio_track,method_write,buffer,0,BUFFER_SIZE);
wirteSize += BUFFER_SIZE;
size -= BUFFER_SIZE;
}
if(size>0)
{
//LOGD("writeAudio , size:%d",size );
env->SetByteArrayRegion(buffer, 0,size, buf + wirteSize);
env->CallVoidMethod(audio_track,method_write,buffer,0,size);
}
//LOGD("writeAudio , OK! size:%d",e->size );
} DetachCurrentThread();
return NULL;
} void writeAudio(void * buf,int size)
{
sharedptr<jbyte> b(new jbyte[size]);
memcpy(b.get(),buf,size);
ElementBuf *eb =new ElementBuf();
eb->buf = b;
eb->size = size;
SE e(eb);
push(e);
} void releaseAudio()
{
_stop = true;
}

其他相关代码:

bool AttachCurrentThread()
{
LOGI("AttachCurrentThread ing");
JNIEnv * env;
int status = 0;
env = getEnv();
if(NULL==env){
int ret = g_jvm->AttachCurrentThread(&env, NULL);
LOGI("AttachCurrentThread ok");
return ret>=0;
}
LOGW("AttachCurrentThread fail, thread is attached");
return false;
}
void DetachCurrentThread()
{
LOGI("DetachCurrentThread ing");
if(NULL!=getEnv())
g_jvm->DetachCurrentThread();
LOGI("DetachCurrentThread ok");
}
JNIEnv * getEnv()
{
<span style="white-space:pre"> </span>JNIEnv* env;
<span style="white-space:pre"> </span>if (g_jvm->GetEnv((void **)&env, JNI_VERSION_1_6) != JNI_OK) {
<span style="white-space:pre"> </span> return NULL;
<span style="white-space:pre"> </span>}
<span style="white-space:pre"> </span>return env;
}

项目的Android.mk 我这里包括了我使用的libspeex库,没用到的能够不用

LOCAL_PATH := $(call my-dir)

include $(CLEAR_VARS)

LOCAL_MODULE    := cu
#LOCAL_SRC_FILES := cu.cpp FILE_LIST := $(wildcard $(LOCAL_PATH)/*.cpp)
LOCAL_SRC_FILES += $(FILE_LIST:$(LOCAL_PATH)/%=%) FILE_LIST := $(wildcard $(LOCAL_PATH)/*.c)
LOCAL_SRC_FILES += $(FILE_LIST:$(LOCAL_PATH)/%=%) FILE_LIST := $(wildcard $(LOCAL_PATH)/*/*.cpp)
LOCAL_SRC_FILES += $(FILE_LIST:$(LOCAL_PATH)/%=%) LOCAL_LDLIBS := -llog -ljnigraphics -lz -landroid
LOCAL_SHARED_LIBRARIES := libavformat libavcodec libswscale libavutil libwsresample libspeex include $(BUILD_SHARED_LIBRARY) $(call import-add-path,$(LOCAL_PATH))
$(call import-add-path,$(LOCAL_PATH)/ffmpeg/arm/include)
$(call import-module, ffmpeg/arm)
$(call import-module, speex)
include $(all-subdir-makefiles)

Application.mk:

APP_ABI := armeabi
#APP_ABI := armeabi-v7a
APP_PLATFORM := android-9
APP_STL := stlport_static
APP_CPPFLAGS += -fexceptions
APP_CFLAGS += -Wno-error=format-security

临时贴这么多代码出来啦!

音频视频同步展示仅仅做了最简单的依据视屏的pts做同步,我发现pts和网上说的不太一样.

音频使用的是:java 的AudioTrack

版权声明:本文博客原创文章,博客,未经同意,不得转载。

android使用ffmpeg的更多相关文章

  1. [原]如何在Android用FFmpeg+SDL2.0解码图像线程

    关于如何在Android上用FFmpeg+SDL2.0解码显示图像参考[原]如何在Android用FFmpeg+SDL2.0解码显示图像 ,关于如何在Android使用FFmpeg+SDL2.0解码声 ...

  2. [原]如何在Android用FFmpeg+SDL2.0解码声音

    关于如何在Android上用FFmpeg+SDL2.0解码显示图像参考[原]如何在Android用FFmpeg+SDL2.0解码显示图像 ,本文是基于上述文章和[原]零基础学习视频解码之解码声音 来移 ...

  3. [原]如何在Android用FFmpeg+SDL2.0解码显示图像

    如何在Android上使用FFmpeg解码图像参考文章[原]如何在Android用FFmpeg解码图像 ,如何在Android上使用SDL2.0来显示图像参考[原]零基础学习SDL开发之在Androi ...

  4. [原]如何在Android用FFmpeg+SDL2.0之同步音频

    同步音频的原理可以参考:http://dranger.com/ffmpeg/tutorial05.html  本文是在 [原]如何在Android用FFmpeg+SDL2.0之同步视频 的基础上面继续 ...

  5. [原]如何在Android用FFmpeg解码图像

    前一篇[原]如何用Android NDK编译FFmpeg 我们知道了如何使用NDK来编译Android平台下使用的FFmpeg动态库.这篇文章我们就可以使用Android下的JNI来调用FFMpeg进 ...

  6. 记录一次Android交叉编译ffmpeg排查错误

    Android版本手机直播引擎中,引用了libvlc开源库.项目接过来,发现编译脚本中使用了很多用户名下的绝对路径.项目相关人离职,导致这个脚本实际上已经废掉.而且不知道相关路径下有没有其他文件和第三 ...

  7. Android集成ffmpeg

    1.ffmpeg官网文档地址:https://trac.ffmpeg.org/wiki/CompilationGuide/Android 2.上面页面资源列表里面第一项 https://github. ...

  8. [原]如何在Android用FFmpeg+SDL2.0之同步视频

    关于视频同步的原理可以参考http://dranger.com/ffmpeg/tutorial05.html 和 [原]基础学习视频解码之同步视频 这两篇文章,本文是在这两篇的基础上移植到了Andro ...

  9. android studio ffmpeg简单使用 (cmake)

    编译ffmpeg android studio 新建项目,勾选上 将编译好的libffmpeg.so库扔到src/main/jniLibs/armeabi下(主要这里我只编译了arm的ffmpeg的库 ...

随机推荐

  1. PHPCMS V9{loop subcat(0,0,0,$siteid) $r}怎么解释?

    {loop subcat(0,0,0,$siteid) $r}{/loop} /** * 获取子栏目  * @param $parentid 父级id   * @param $type 栏目类型  * ...

  2. UVa 12459 - Bees&#39; ancestors

    称号:区区女性有父亲和母亲,区区无人机只有一个母亲,我问一个单纯的无人机第一n随着祖先的数量. 分析:递归.Fib序列. 状态定义:建立f(k)和m(k)分别用于第一k雌蜂和雄蜂的数量: 递推关系:f ...

  3. [置顶] ios 一个不错的图片浏览分享框架demo

    demo功能:一个不错的图片浏览分享框架demo.iphone6.1 测试通过.可以浏览图片,保存,微博分享到新浪,腾讯,网易,人人等. 注:(由于各个微博的接口有时候会有调整,不一定能分享成功.只看 ...

  4. centos7关闭防火墙(转)

    直接命令:service firewalld stop 1. Disable Firewalld Service. [root@rhel-centos7-tejas-barot-linux ~]# s ...

  5. 【原创】UVAOJ水题10025解题报告

    首先是原题,转自UVAOJ  The ? 1 ? 2 ? ... ? n = k problem  The problem Given the following formula, one can s ...

  6. RH033读书笔记(4)-Lab 5 File Permissions

    Lab 5 File Permissions Sequence 1: Determining File Permissions 1. What is the symbolic representati ...

  7. BackGroundWorker使用总结

    方法: backgroundWorker1.CancelAsync() 用于取消异步执行 backgroundWorker1.ReportProgress(int ,object)用于向主线层报告进度 ...

  8. HDU-2857-Mirror and Light(计算几何)

    Problem Description The light travels in a straight line and always goes in the minimal path between ...

  9. POJ 2352 Stars 树阵

    标题效果:特定y值在升序一些点.一个点的定义level值点的数目对于其左下,每个请求level多少分. 思维:因为y值它是按升序.所以分的差距仅仅是推断x值相比之前的大.就用树状数组维护. CODE: ...

  10. MONGO DB windows 设备

    1,下载安装包 https://fastdl.mongodb.org/win32/mongodb-win32-x86_64-2008plus-ssl-3.0.0-signed.msi?_ga=1.22 ...