这一节是实践,学习android原生SimplePlayer并自己码一遍,果然学到很多。

MyPlayer.h

#include <media/stagefright/foundation/AHandler.h>
#include <media/stagefright/foundation/AString.h>
#include <utils/KeyedVector.h> namespace android { class IGraphicBufferProducer;
struct MediaCodec;
class MediaCodecBuffer;
struct NuMediaExtractor;
class Surface;
struct ABuffer;
class AudioTrack;
struct ALooper; class MyPlayer : public AHandler { public:
MyPlayer(); status_t setDataSource(const char* path);
status_t setSurface(const sp<IGraphicBufferProducer> &bufferProducer);
status_t prepare();
status_t start();
status_t stop(); protected:
virtual ~MyPlayer();
virtual void onMessageReceived(const sp<AMessage> &msg); private: enum {
kWhatSetDataSource = 0,
kWhatSetSurface,
kWhatPrepare,
kWhatStart,
kWhatStop,
kWhatDoMoreStuff,
}; enum State{
UNINITIALIZED,
UNPREPARED,
STOPPED,
STARTED,
}; struct BufferInfo{
size_t mIndex;
size_t mOffset;
size_t mSize;
int64_t mPresentationTimeUs;
uint32_t mFlags;
}; struct CodecState {
sp<MediaCodec> mCodec;
Vector<sp<MediaCodecBuffer>> mBuffers[2];
Vector<sp<ABuffer>> mCSD;
List<size_t> mAvailInputBufferIndices;
List<BufferInfo> mAvailOutputBufferInfos;
sp<AudioTrack> mAudioTrack;
uint32_t mNumFramesWritten;
}; status_t onPrepare();
status_t onStart();
status_t onStop();
status_t onDoMoreStuff();
status_t onOutputFormatChanged(CodecState *);
status_t renderAudio(CodecState* state, BufferInfo* info, const sp<MediaCodecBuffer> &buffer); AString mPath;
State mState;
sp<Surface> mSurface;
sp<NuMediaExtractor> mExtractor;
sp<ALooper> mCodecLooper;
KeyedVector<size_t, CodecState> mStateByTrackIndex;
int32_t mDoMoreStuffGeneration;
int64_t mStartTimeRealUs;
}; }

MyPlayer.cpp

//define LOG_NDEBUG 0
#define LOG_TAG "MyPlayer" #include "MyPlayer.h" #include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/ADebug.h>
#include <gui/Surface.h>
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/NuMediaExtractor.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/MediaCodecBuffer.h>
#include <media/IMediaHTTPService.h>
#include <mediadrm/ICrypto.h>
#include <media/AudioTrack.h> namespace android { status_t PostAndAwaitResponse(const sp<AMessage> &msg, sp<AMessage> *response)
{
status_t err = msg->postAndAwaitResponse(response);
if(err != OK)
return err; if(!(*response)->findInt32("err", &err))
{
err = OK;
}
return err;
} MyPlayer::MyPlayer()
: mState(UNINITIALIZED),
mDoMoreStuffGeneration(0),
mStartTimeRealUs(-1ll)
{ } MyPlayer::~MyPlayer()
{ } status_t MyPlayer::setDataSource(const char* path)
{
sp<AMessage> msg = new AMessage(kWhatSetDataSource, this);
msg->setString("path", path);
sp<AMessage> response;
return PostAndAwaitResponse(msg, &response);
} status_t MyPlayer::setSurface(const sp<IGraphicBufferProducer> &BufferProducer)
{
sp<AMessage> msg = new AMessage(kWhatSetSurface, this); sp<Surface> surface;
if(BufferProducer != NULL)
surface = new Surface(BufferProducer); msg->setObject("surface", surface); sp<AMessage> response;
return PostAndAwaitResponse(msg, &response);
} status_t MyPlayer::prepare()
{
sp<AMessage> msg = new AMessage(kWhatPrepare, this);
sp<AMessage> response;
return PostAndAwaitResponse(msg, &response);
} status_t MyPlayer::start()
{
sp<AMessage> msg = new AMessage(kWhatStart, this);
sp<AMessage> response;
return PostAndAwaitResponse(msg, &response);
} status_t MyPlayer::stop()
{
sp<AMessage> msg = new AMessage(kWhatStop, this);
sp<AMessage> response;
return PostAndAwaitResponse(msg, &response);
} void MyPlayer::onMessageReceived(const sp<AMessage> &msg)
{
switch(msg->what())
{
case kWhatSetDataSource:
{
status_t err;
if(mState != UNINITIALIZED)
err = INVALID_OPERATION;
else {
CHECK(msg->findString("path", &mPath));
mState = UNPREPARED;
}
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
sp<AMessage> response = new AMessage;
response->setInt32("err", err);
response->postReply(replyID);
break;
}
case kWhatSetSurface:
{
status_t err;
if(mState != UNPREPARED)
err = INVALID_OPERATION;
else{
sp<RefBase> obj;
CHECK(msg->findObject("surface", &obj));
mSurface = static_cast<Surface*>(obj.get());
}
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
sp<AMessage> response = new AMessage;
response->setInt32("err", err);
response->postReply(replyID);
break;
}
case kWhatPrepare:
{
status_t err;
if(mState != UNPREPARED)
err = INVALID_OPERATION;
else{
err = onPrepare();
if(err == OK)
mState = STOPPED;
}
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
sp<AMessage> response = new AMessage;
response->setInt32("err", err);
response->postReply(replyID);
break;
}
case kWhatStart:
{
status_t err = OK;
if(mState == UNPREPARED){
err = onPrepare();
if(err == OK)
mState = STOPPED;
} if(err == OK)
{
if(mState != STOPPED)
err = INVALID_OPERATION;
else{
err = onStart();
if(err == OK)
mState = STARTED;
}
}
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
sp<AMessage> response = new AMessage;
response->setInt32("err", err);
response->postReply(replyID);
break;
}
case kWhatStop:
{
status_t err;
if(mState != STARTED)
err = INVALID_OPERATION;
else{
err = onStop();
if(err == OK)
mState = STOPPED;
}
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
sp<AMessage> response = new AMessage;
response->setInt32("err", err);
response->postReply(replyID);
break;
}
case kWhatDoMoreStuff:
{
int32_t generation;
CHECK(msg->findInt32("generation", &generation));
if(generation != mDoMoreStuffGeneration)
break; status_t err = onDoMoreStuff(); if(err == OK)
msg->post(10000ll); break;
} default:
break;
}
} status_t MyPlayer::onPrepare()
{
CHECK_EQ(mState, UNPREPARED); // 创建NuMediaExtractor
mExtractor = new NuMediaExtractor(); status_t err = mExtractor->setDataSource(NULL, mPath.c_str()); if(err != OK)
{
mExtractor.clear();
return err;
} if(mCodecLooper == NULL)
{
mCodecLooper = new ALooper;
mCodecLooper->start();
} bool haveAudio = false;
bool haveVideo = false;
for(size_t i = 0; i < mExtractor->countTracks(); i++)
{
sp<AMessage> format;
status_t err = mExtractor->getTrackFormat(i, &format);
CHECK_EQ(err, (status_t)OK); AString mime;
CHECK(format->findString("mime", &mime));
bool isVideo = !strncasecmp(mime.c_str(), "video/", 6);
if(!haveAudio && !strncasecmp(mime.c_str(), "audio/", 6))
haveAudio = true;
else if(!haveVideo && isVideo)
haveVideo = true;
else
continue; // 获取IMediaSource
err = mExtractor->selectTrack(i);
CHECK_EQ(err, (status_t)OK);
// 为audio 和 video track分别创建一个CodecState结构体,保存mediacodec相关内容
// 这里涉及到KeyedVector的用法,add方法的第一个参数为key,第二个参数为value,返回添加元素的索引
// editValueAt,查找指定索引对应的value
// keyAt,找到指定索引对应的key值
// editValueFor,查找指定key对应的value
CodecState* state = &mStateByTrackIndex.editValueAt(mStateByTrackIndex.add(i, CodecState()));
state->mCodec = MediaCodec::CreateByType(mCodecLooper, mime.c_str(), false);
state->mNumFramesWritten = 0; // 用于audio
CHECK(state->mCodec != NULL); // configure mediacodec
err = state->mCodec->configure(format, isVideo?mSurface:NULL, NULL, 0); CHECK_EQ(err, OK); size_t j = 0;
sp<ABuffer> buffer;
// 检查mediaformat中是否有csd buffer,可以使用AStringPrintf做字符串拼接
while(format->findBuffer(AStringPrintf("csd-%d", j).c_str(), &buffer)){
state->mCSD.push_back(buffer);
j++;
}
} for(size_t i = 0; i < mStateByTrackIndex.size(); i++)
{
ALOGD("mStateByTrackIndex[%d].key = %d", i, mStateByTrackIndex.keyAt(i)); } for(size_t i = 0; i < mStateByTrackIndex.size(); i++)
{
CodecState* state = &mStateByTrackIndex.editValueAt(i);
// 先启动mediacodec,启动之后才会完成buffer的分配工作
status_t err = state->mCodec->start();
CHECK_EQ(err, (status_t)OK); // 将两个track中mediacodec的input和output buffer都保存到CodecState中
err = state->mCodec->getInputBuffers(&state->mBuffers[0]);
CHECK_EQ(err, (status_t)OK); err = state->mCodec->getOutputBuffers(&state->mBuffers[1]);
CHECK_EQ(err, (status_t)OK); // 如果有csd buffer要先写给decoder
for(size_t j = 0; j < state->mCSD.size(); ++j)
{
const sp<ABuffer> &srcBuffer = state->mCSD.itemAt(j); size_t index;
// 阻塞等待获取一个inputbuffer index
err = state->mCodec->dequeueInputBuffer(&index, -1ll);
CHECK_EQ(err, (status_t)OK); const sp<MediaCodecBuffer> &dstBuffer = state->mBuffers[0].itemAt(index); // 检查dstbuffer的空间是否足够
// ABuffer的size方法返回真实数据长度,capactiy返回最大容量,offset返回起始读取位置
CHECK_LE(srcBuffer->size(), dstBuffer->capacity());
dstBuffer->setRange(0, srcBuffer->size());
memcpy(dstBuffer->data(), srcBuffer->data(), srcBuffer->size()); // 将buffer送给decoder,由于是CSD buffer,所以flag需要添加BUFFER_FLAG_CODECCONFIG
err = state->mCodec->queueInputBuffer(index, 0, dstBuffer->size(), 0ll, MediaCodec::BUFFER_FLAG_CODECCONFIG); CHECK_EQ(err, (status_t)OK);
} } return OK; } status_t MyPlayer::onStart()
{
CHECK_EQ(mState, STOPPED); // start之后会一直发送kWhatDoMoreStuff消息,不断执行onDoMoreStuff方法
sp<AMessage> msg = new AMessage(kWhatDoMoreStuff, this);
msg->setInt32("generation", ++mDoMoreStuffGeneration);
msg->post();
return OK;
} status_t MyPlayer::onStop()
{
return OK;
} status_t MyPlayer::onDoMoreStuff()
{
// 1、为每个track获取一个input 和一个output buffer
for(size_t i = 0; i < mStateByTrackIndex.size(); i++)
{
CodecState *state = &mStateByTrackIndex.editValueAt(i); status_t err; // 这里会不阻塞轮询,直到获取到一个input buffer
do{
size_t index;
err = state->mCodec->dequeueInputBuffer(&index);
if(err == OK)
{
// 将index加入到列表当中
state->mAvailInputBufferIndices.push_back(index);
}
else
{ }
}while(err == OK); // 同样是不阻塞轮询,直到获取到一个ouput buffer
do{
BufferInfo info;
// dequeueOutputBuffer拿到的不仅仅是index,还有buffer的offset、size,PTS,FLAGS等信息;
// dequeueOutputBuffer的返回值带有一些特殊信息,所以传回的buffer并不一定是解码结果,也可能是decoder送回的信息
err = state->mCodec->dequeueOutputBuffer(&info.mIndex, &info.mOffset, &info.mSize, &info.mPresentationTimeUs, &info.mFlags); if(err == OK)
{
// 如果是解码后的数据,将bufferInfo加入到列表等待处理
state->mAvailOutputBufferInfos.push_back(info);
}
else if(err == INFO_FORMAT_CHANGED)
{
// 如果返回值为INFO_FORMAT_CHANGED,说明视频格式发生变化
// 执行onOutPutFormatChanged方法中会创建AudioTrack
// 我觉得 AudioTrack也可以在MediaExtractor->getTrackFormat拿到audio track时创建
err = onOutputFormatChanged(state);
CHECK_EQ(err, (status_t)OK);
}
else if(err == INFO_OUTPUT_BUFFERS_CHANGED)
{
// 如果是INFO_OUTPUT_BUFFERS_CHANGED,说明用于输出的buffer发生变化,需要重新获取一次output buffer列表
err = state->mCodec->getOutputBuffers(&state->mBuffers[1]);
CHECK_EQ(err, (status_t)OK);
}
else
{ }
}while(err == OK || err == INFO_FORMAT_CHANGED || err == INFO_OUTPUT_BUFFERS_CHANGED);
} // 2、处理input buffer,这里不会向上面遍历所有track,这里的方法是按顺序读取文件,如果有某个track没有input buffer了就退出循环
for(;;)
{
size_t trackIndex;
// 获取当前数据所属的track index,返回值不等于OK说明到达了EOS
status_t err = mExtractor->getSampleTrackIndex(&trackIndex);
if(err != OK){
ALOGD("get input EOS");
break;
}
else{
// 拿到track对应的codecstate
CodecState *state = &mStateByTrackIndex.editValueFor(trackIndex);
// 如果当前CodecState中没有input buffer则直接退出循环
if(state->mAvailInputBufferIndices.empty())
break; // 取出一个index
size_t index = *state->mAvailInputBufferIndices.begin();
state->mAvailInputBufferIndices.erase(state->mAvailInputBufferIndices.begin()); // input和output buffer都是MediaCodecBuffer,base方法可以获取到存储数据的指针
const sp<MediaCodecBuffer> &dstBuffer = state->mBuffers[0].itemAt(index);
sp<ABuffer> abuffer = new ABuffer(dstBuffer->base(), dstBuffer->capacity()); // 将数据读到MediaCodecBuffer中
err = mExtractor->readSampleData(abuffer);
CHECK_EQ(err, (status_t)OK);
// 设置可读取范围
dstBuffer->setRange(abuffer->offset(), abuffer->size()); int64_t timeUs;
// 获取数据对应的PTS
CHECK_EQ(mExtractor->getSampleTime(&timeUs), OK); // 将数据送给decoder,同时附带PTS信息,不带FLAG
err = state->mCodec->queueInputBuffer(index, dstBuffer->offset(), dstBuffer->size(), timeUs, 0);
CHECK_EQ(err, (status_t)OK); ALOGD("enqueue input data on track %u", trackIndex); // 将Extractor指向下一笔数据
err = mExtractor->advance();
CHECK_EQ(err, (status_t)OK);
}
} int64_t nowUs = ALooper::GetNowUs();
// 初始化开始播放时间
if(mStartTimeRealUs < 0ll)
{
mStartTimeRealUs = nowUs + 1000000ll;
} // 3、处理output buffer并做av sync,遍历所有track
for(size_t i = 0; i < mStateByTrackIndex.size(); i++)
{
CodecState *state = &mStateByTrackIndex.editValueAt(i); // 一次性处理所有的outputbuffer
while(!state->mAvailOutputBufferInfos.empty()){
BufferInfo *info = &*state->mAvailOutputBufferInfos.begin(); // 计算真实播放时间
int64_t whenRealUs = info->mPresentationTimeUs + mStartTimeRealUs;
// 计算真实播放时间和当前系统时间的距离
int64_t lateByUs = nowUs - whenRealUs; // 如果系统时间距离播放时间小于10ms,则去渲染
if(lateByUs > -10000ll)
{
bool release = true; // 如果系统时间已经过了播放时间30ms则丢弃帧
if(lateByUs > 30000ll)
{
ALOGD("track %u, buffer late by %lld us, dropping", mStateByTrackIndex.keyAt(i),lateByUs);
// 直接调用release
state->mCodec->releaseOutputBuffer(info->mIndex);
}
else{
// 用于audio
if(state->mAudioTrack != NULL)
{
// 获取到output buffer
const sp<MediaCodecBuffer> &srcBuffer = state->mBuffers[1].itemAt(info->mIndex);
// 将数据写到audioTrack中,但是有可能一次写不完
renderAudio(state, info, srcBuffer);
// 根据mSize判断是否写完,写完了才回去release
if(info->mSize > 0)
release = false; }
// render,由于是等到绘制时间才去release and render,所以不用指定PTS
if(release)
state->mCodec->renderOutputBufferAndRelease(info->mIndex);
}
// 从列表中删除
if(release){
state->mAvailOutputBufferInfos.erase(state->mAvailOutputBufferInfos.begin());
info = NULL;
}
else
break;
}else {
// 如果系统时间早于播放时间10ms,则不去渲染
ALOGD("track %u buffer early by %lld us", mStateByTrackIndex.keyAt(i), lateByUs);
break;
}
}
} return OK;
} status_t MyPlayer::onOutputFormatChanged(CodecState *state)
{
sp<AMessage> format;
// getOutputFormat可以获取decoder返回的format
status_t err = state->mCodec->getOutputFormat(&format);
if(err != OK)
return err; AString mime;
CHECK(format->findString("mime", &mime));
if(!strncasecmp(mime.c_str(), "audio/", 6))
{
int32_t channelCount;
int32_t sampleRate = 0;
// 获取通道数和采样率
CHECK(format->findInt32("channel-count", &channelCount));
CHECK(format->findInt32("sample-rate", &sampleRate));
// 创建一个AudioTrack
state->mAudioTrack = new AudioTrack(AUDIO_STREAM_MUSIC, sampleRate, AUDIO_FORMAT_PCM_16_BIT, audio_channel_out_mask_from_count(channelCount), 0);
} return OK;
} status_t MyPlayer::renderAudio(CodecState* state, BufferInfo* info, const sp<MediaCodecBuffer> &buffer)
{
CHECK(state->mAudioTrack != NULL);
// 开启audio render
if(state->mAudioTrack->stopped())
state->mAudioTrack->start(); uint32_t numFramesPlayed;
// 获取当前已经播放的帧数
CHECK_EQ(state->mAudioTrack->getPosition(&numFramesPlayed), OK); // 获取可以写入的帧数
uint32_t numFramesAvailableToWrite = state->mAudioTrack->frameCount() - (state->mNumFramesWritten - numFramesPlayed); // 计算可写入数据大小
size_t numBytesAvailableToWrite = numFramesAvailableToWrite * state->mAudioTrack->frameSize(); size_t copy = info->mSize;
if(copy > numBytesAvailableToWrite)
copy = numBytesAvailableToWrite; if(copy == 0)
return OK; int64_t startTimeUs = ALooper::GetNowUs(); // 写入数据,注意这里有偏移量offset(上次可能没写完)
ssize_t nbytes = state->mAudioTrack->write(buffer->base() + info->mOffset, copy); CHECK_EQ(nbytes, (ssize_t)copy); int64_t delayUs = ALooper::GetNowUs() - startTimeUs; // 计算写入帧数
uint32_t numFramesWritten = nbytes / state->mAudioTrack->frameSize(); if(delayUs > 2000ll){
ALOGD("AudioTrack write took %lld us", delayUs);
} // 将offset和size记录并返回,用于判断数据是否全部写入
info->mOffset += nbytes;
info->mSize -= nbytes; // 记录已经写入帧数
state->mNumFramesWritten += numFramesWritten;
return OK;
}
}

TestPlayer.cpp

//#define LOG_NDEBUG 0
#define LOG_TAG "TestPlayer" #include "MyPlayer.h"
#include <gui/SurfaceControl.h>
#include <gui/SurfaceComposerClient.h>
#include <gui/Surface.h>
#include <ui/DisplayConfig.h>
#include <media/stagefright/foundation/ALooper.h>
#include <binder/IBinder.h>
#include <media/stagefright/foundation/ADebug.h> int main(int argc, char** argv)
{
using namespace android; sp<MyPlayer> player = new MyPlayer();
sp<android::ALooper> looper = new android::ALooper();
// 创建looper处理MyPlayer的消息
looper->registerHandler(player);
// 运行在新的线程上
looper->start(); // 创建Surface
sp<SurfaceComposerClient> composerClient = new SurfaceComposerClient;
CHECK_EQ(composerClient->initCheck(), (status_t)OK); const sp<IBinder> display = SurfaceComposerClient::getInternalDisplayToken();
CHECK(display != NULL); DisplayConfig config;
CHECK_EQ(SurfaceComposerClient::getActiveDisplayConfig(display, &config), NO_ERROR); const ui::Size &resolution = config.resolution;
const ssize_t displayWidth = resolution.getWidth();
const ssize_t displayHeight = resolution.getHeight();
ALOGD("display is %d x %d\n", displayWidth, displayHeight); sp<SurfaceControl> control = composerClient->createSurface(String8("A Surface"), displayWidth/2, displayHeight/2, PIXEL_FORMAT_RGB_565, 0);
CHECK(control != NULL);
CHECK(control->isValid()); SurfaceComposerClient::Transaction{}.setLayer(control, INT_MAX).show(control).apply();
sp<Surface> surface = control->getSurface();
CHECK(surface != NULL); // 开始播放
player->setDataSource(argv[1]);
player->setSurface(surface->getIGraphicBufferProducer());
player->start(); // sleep 60s,等待播放60s
sleep(60); composerClient->dispose(); looper->stop(); return 0; }

Android.bp

cc_binary {
name: "MyPlayer", srcs: [
"TestPlayer.cpp",
"MyPlayer.cpp",
], local_include_dirs: [
"include"
], shared_libs: [
"libstagefright",
"libmedia",
"libstagefright_foundation",
"libgui",
"libaudioclient",
"liblog",
"libutils",
"libcutils",
"libmedia_omx",
"libui",
], /*export_include_dirs: [
"include"
],*/ export_shared_lib_headers:[
"libstagefright",
"libmedia",
], header_libs: [
"libmediametrics_headers",
"libmediadrm_headers",
"libaudioclient_headers"
], }

记录一下编译过程中发现的问题:

1、local_include_dirs 和 export_include_dirs都是列表,所以要用 [] 将引用的内容框起来

2、cc_binary中不允许出现export_include_dirs,因为不需要导出头文件

3、main函数不能被包在 namespace android当中,不然会找不到main函数,可以在main函数中加上using namespace android,ALooper仍然需要命名空间、

4、还有以class形式替代头文件可能会出现的错误

Android 11(R) MultiMedia(十五)MediaCodec同步模式实现一个简易播放器的更多相关文章

  1. Android 11(R) Power HAL AIDL简析 -- 基本接口

    Android 11(R) Power HAL AIDL将分三篇文章来介绍: Android 11(R) Power HAL AIDL简析 -- 基本接口 Android 11(R) Power HA ...

  2. 【转】设计模式 ( 十五 ) 中介者模式Mediator(对象行为型)

    设计模式 ( 十五 ) 中介者模式Mediator(对象行为型) 1.概述 在面向对象的软件设计与开发过程中,根据"单一职责原则",我们应该尽量将对象细化,使其只负责或呈现单一的职 ...

  3. 设计模式 ( 十五 ) 中介者模式Mediator(对象行为型)

    设计模式 ( 十五 ) 中介者模式Mediator(对象行为型) 1.概述 在面向对象的软件设计与开发过程中,根据“单一职责原则”,我们应该尽量将对象细化,使其只负责或呈现单一的职责,即将行为分布到各 ...

  4. webpack4 系列教程(十五):开发模式与webpack-dev-server

    作者按:因为教程所示图片使用的是 github 仓库图片,网速过慢的朋友请移步<webpack4 系列教程(十五):开发模式与 webpack-dev-server>原文地址.更欢迎来我的 ...

  5. Java 设计模式系列(十五)迭代器模式(Iterator)

    Java 设计模式系列(十五)迭代器模式(Iterator) 迭代器模式又叫游标(Cursor)模式,是对象的行为模式.迭代子模式可以顺序地访问一个聚集中的元素而不必暴露聚集的内部表象(interna ...

  6. FFmpeg简易播放器的实现-音视频同步

    本文为作者原创,转载请注明出处:https://www.cnblogs.com/leisure_chn/p/10284653.html 基于FFmpeg和SDL实现的简易视频播放器,主要分为读取视频文 ...

  7. Android学习笔记(十二)——实战:制作一个聊天界面

    //此系列博文是<第一行Android代码>的学习笔记,如有错漏,欢迎指正! 运用简单的布局知识,我们可以来尝试制作一个聊天界面. 一.制作 Nine-Patch 图片 : Nine-Pa ...

  8. 从零开始学习PYTHON3讲义(十四)写一个mp3播放器

    <从零开始PYTHON3>第十四讲 通常来说,Python解释执行,运行速度慢,并不适合完整的开发游戏.随着电脑速度的快速提高,这种情况有所好转,但开发游戏仍然不是Python的重点工作. ...

  9. Android学习笔记(十五)——实战:强制下线

    //此系列博文是<第一行Android代码>的学习笔记,如有错漏,欢迎指正! 实现强制下线功能的思路也比较简单,只需要在界面上弹出一个对话框, 让用户无法进行任何其他操作, 必须要点击对话 ...

  10. Android核心分析之十五Android输入系统之输入路径详解

       Android用户事件输入路径 1 输入路径的一般原理 按键,鼠标消息从收集到最终将发送到焦点窗口,要经历怎样的路径,是Android GWES设计方案中需要详细考虑的问题.按键,鼠标等用户消息 ...

随机推荐

  1. 学习Source Generators之了解Source Generators的应用场景

    前面的文章我们都初步学习了Source Generators的使用方式以及做了一些简单的代码生成工具. 但是Source Generators除了做自动代码生成之外,还能有别的应用场景,本文来了解一下 ...

  2. javascript现代编程系列教程之一:区块作用域对VAR不起作用的问题

    在JavaScript中,使用var声明的变量具有函数作用域,而不是块级作用域.这意味着在一个函数内部,使用var声明的变量在整个函数范围内都是可见的,包括嵌套的块(如if语句.for循环等).为了避 ...

  3. Oracle 查询超级慢之buffer sort

    查询超级慢之buffer sort 在视图中增加了一个临时表作为一个数据源进行id和名称的转换,没加的时候一秒不到,加了以后14秒,感觉有点问题,于是打开了解释计划看了下,发现这个buffer sor ...

  4. Pandas+ SLS SQL:融合灵活性和高性能的数据透视

    简介: Pandas是一个十分强大的python数据分析工具,也是各种数据建模的标准工具.Pandas擅长处理数字型数据和时间序列数据.Pandas的第一大优势在于,封装了一些复杂的代码实现过程,只需 ...

  5. 大型企业数据库服务首选,AliSQL这几大企业级功能你了解几个?

    MySQL代表了开源数据库的快速发展,从2004年前后的Wiki.WordPress等轻量级Web 2.0应用起步,到2010年阿里巴巴在电商及支付场景大规模使用MySQL数据库,再到2012年开始阿 ...

  6. 阿里云服务网格 ASM 正式发布商业化版本

    ​简介:为了更好地满足企业日益加深的大规模使用服务网格产品.服务多语言互通.服务精细治理等需求,2022 年 4 月 1 日起,阿里云服务网格产品 ASM 正式发布商业化版本,为企业在生产环境下大规模 ...

  7. e签宝:借助钉钉宜搭变革传统项目管理模式,交付效率显著提升

    ​简介:通过钉钉宜搭,e签宝在半个月内搭建了项目交付管理平台,提升了项目管理的效率和质量,推进了团队核心业务的信息化建设.e签宝在有效梳理了各环节的工作进度.质量.成本.职权后,通过宜搭平台保障了内外 ...

  8. AI圈内卷?天池团聚请来专家集体“问诊”

    ​简介: 近期杭州云栖大会上出现了一个"数据博物馆",最吸引眼球的"展品",竟是行业大规模开源数据集.不仅数量多达上百个,还覆盖零售.文娱.工业.医疗.自然科学 ...

  9. Microsoft.Maui.Graphics.Skia 使用 DrawString 绘制文本的坐标问题

    本文记录使用 Microsoft.Maui.Graphics.Skia 的 DrawString 进行绘制文本,不同的重载方法绘制的文本的坐标不同的问题 本文开始之前,预期已经准备好了环境和基础项目, ...

  10. OLAP系列之分析型数据库clickhouse备份方式(五)

    一.常见备份方式 1.1 备份方式 备份方式 特点 物理文件备份 对物理文件进行拷贝,备份期间禁止数据写入 dump数据导入导出 备份方式灵活,但备份速度慢 快照表备份 制作_bak表进行备份 FRE ...