static int ffplay_video_thread(void *arg)
{
FFPlayer *ffp = arg;
VideoState *is = ffp->is;
AVFrame *frame = av_frame_alloc();
...
for (;;) {
ret = get_video_frame(ffp, frame);
...
ret = queue_picture(ffp, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);
av_frame_unref(frame);
#if CONFIG_AVFILTER
}
... /* alloc or resize hardware picture buffer */
if (!vp->bmp || !vp->allocated ||
vp->width != src_frame->width ||
vp->height != src_frame->height ||
vp->format != src_frame->format) { if (vp->width != src_frame->width || vp->height != src_frame->height)
ffp_notify_msg3(ffp, FFP_MSG_VIDEO_SIZE_CHANGED, src_frame->width, src_frame->height); vp->allocated = ;
vp->width = src_frame->width;
vp->height = src_frame->height;
vp->format = src_frame->format; /* the allocation must be done in the main thread to avoid
locking problems. */
alloc_picture(ffp, src_frame->format); if (is->videoq.abort_request)
return -;
} /* if the frame is not skipped, then display it */
if (vp->bmp) {
/* get a pointer on the bitmap */
SDL_VoutLockYUVOverlay(vp->bmp); #ifdef FFP_MERGE
#if CONFIG_AVFILTER
// FIXME use direct rendering
av_image_copy(data, linesize, (const uint8_t **)src_frame->data, src_frame->linesize,
src_frame->format, vp->width, vp->height);
#else
// sws_getCachedContext(...);
#endif
#endif
// FIXME: set swscale options
if (SDL_VoutFillFrameYUVOverlay(vp->bmp, src_frame) < ) {
av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
exit();
} ... static void alloc_picture(FFPlayer *ffp, int frame_format)
{
VideoState *is = ffp->is;
Frame *vp;
#ifdef FFP_MERGE
int sdl_format;
#endif vp = &is->pictq.queue[is->pictq.windex]; free_picture(vp); #ifdef FFP_MERGE
video_open(is, vp);
#endif SDL_VoutSetOverlayFormat(ffp->vout, ffp->overlay_format);
vp->bmp = SDL_Vout_CreateOverlay(vp->width, vp->height,
frame_format,
ffp->vout);
... int SDL_VoutFillFrameYUVOverlay(SDL_VoutOverlay *overlay, const AVFrame *frame)
{
if (!overlay || !overlay->func_fill_frame)
return -; return overlay->func_fill_frame(overlay, frame);
} static int get_video_frame(FFPlayer *ffp, AVFrame *frame)
{
VideoState *is = ffp->is;
int got_picture; ffp_video_statistic_l(ffp);
if ((got_picture = decoder_decode_frame(ffp, &is->viddec, frame, NULL)) < )
return -; ...

rindex

/* called to display each frame */
static void video_refresh(FFPlayer *opaque, double *remaining_time)
{
..
display:
/* display picture */
if (!ffp->display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
video_display2(ffp);
... /* display the current picture, if any */
static void video_display2(FFPlayer *ffp)
{
VideoState *is = ffp->is;
if (is->video_st)
video_image_display2(ffp);
} static void video_image_display2(FFPlayer *ffp)
{
VideoState *is = ffp->is;
Frame *vp;
Frame *sp = NULL; vp = frame_queue_peek_last(&is->pictq); int latest_seek_load_serial = __atomic_exchange_n(&(is->latest_seek_load_serial), -, memory_order_seq_cst);
if (latest_seek_load_serial == vp->serial)
ffp->stat.latest_seek_load_duration = (av_gettime() - is->latest_seek_load_start_at) / ; if (vp->bmp) {
if (is->subtitle_st) {
if (frame_queue_nb_remaining(&is->subpq) > ) {
sp = frame_queue_peek(&is->subpq); if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / )) {
if (!sp->uploaded) {
if (sp->sub.num_rects > ) {
char buffered_text[];
if (sp->sub.rects[]->text) {
strncpy(buffered_text, sp->sub.rects[]->text, );
}
else if (sp->sub.rects[]->ass) {
parse_ass_subtitle(sp->sub.rects[]->ass, buffered_text);
}
ffp_notify_msg4(ffp, FFP_MSG_TIMED_TEXT, , , buffered_text, sizeof(buffered_text));
}
sp->uploaded = ;
}
}
}
}
SDL_VoutDisplayYUVOverlay(ffp->vout, vp->bmp);
ffp->stat.vfps = SDL_SpeedSamplerAdd(&ffp->vfps_sampler, FFP_SHOW_VFPS_FFPLAY, "vfps[ffplay]");
if (!ffp->first_video_frame_rendered) {
ffp->first_video_frame_rendered = ;
ffp_notify_msg1(ffp, FFP_MSG_VIDEO_RENDERING_START);
}
}
} static Frame *frame_queue_peek_last(FrameQueue *f)
{
return &f->queue[f->rindex];
}

1.

static int func_fill_frame(SDL_VoutOverlay *overlay, const AVFrame *frame)
{
assert(overlay);
SDL_VoutOverlay_Opaque *opaque = overlay->opaque;
AVFrame swscale_dst_pic = { { } }; av_frame_unref(opaque->linked_frame); int need_swap_uv = ;
int use_linked_frame = ;
enum AVPixelFormat dst_format = AV_PIX_FMT_NONE;
switch (overlay->format) {
case SDL_FCC_YV12:
need_swap_uv = ;
// no break;
case SDL_FCC_I420:
if (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUVJ420P) {
// ALOGE("direct draw frame");
use_linked_frame = ;
dst_format = frame->format;
} else {
// ALOGE("copy draw frame");
dst_format = AV_PIX_FMT_YUV420P;
}
break;
case SDL_FCC_I444P10LE:
if (frame->format == AV_PIX_FMT_YUV444P10LE) {
// ALOGE("direct draw frame");
use_linked_frame = ;
dst_format = frame->format;
} else {
// ALOGE("copy draw frame");
dst_format = AV_PIX_FMT_YUV444P10LE;
}
break;
case SDL_FCC_RV32:
dst_format = AV_PIX_FMT_0BGR32;
break;
case SDL_FCC_RV24:
dst_format = AV_PIX_FMT_RGB24;
break;
case SDL_FCC_RV16:
dst_format = AV_PIX_FMT_RGB565;
break;
default:
ALOGE("SDL_VoutFFmpeg_ConvertPicture: unexpected overlay format %s(%d)",
(char*)&overlay->format, overlay->format);
return -;
} // setup frame
if (use_linked_frame) {
// linked frame
av_frame_ref(opaque->linked_frame, frame); overlay_fill(overlay, opaque->linked_frame, opaque->planes); ... static void overlay_fill(SDL_VoutOverlay *overlay, AVFrame *frame, int planes)
{
overlay->planes = planes; for (int i = ; i < AV_NUM_DATA_POINTERS; ++i) {
overlay->pixels[i] = frame->data[i];
overlay->pitches[i] = frame->linesize[i];
}
}
... static GLboolean yuv420p_uploadTexture(IJK_GLES2_Renderer *renderer, SDL_VoutOverlay *overlay)
{
if (!renderer || !overlay)
return GL_FALSE; int planes[] = { , , };
const GLsizei widths[] = { overlay->pitches[], overlay->pitches[], overlay->pitches[] };
const GLsizei heights[] = { overlay->h, overlay->h / , overlay->h / };
const GLubyte *pixels[] = { overlay->pixels[], overlay->pixels[], overlay->pixels[] }; switch (overlay->format) {
case SDL_FCC_I420:
break;
case SDL_FCC_YV12:
planes[] = ;
planes[] = ;
break;
default:
ALOGE("[yuv420p] unexpected format %x\n", overlay->format);
return GL_FALSE;
} for (int i = ; i < ; ++i) {
int plane = planes[i]; glBindTexture(GL_TEXTURE_2D, renderer->plane_textures[i]); glTexImage2D(GL_TEXTURE_2D,
,
GL_LUMINANCE,
widths[plane],
heights[plane],
,
GL_LUMINANCE,
GL_UNSIGNED_BYTE,
pixels[plane]);
} return GL_TRUE;
}

http://blog.csdn.net/liujiakunit/article/details/46899229

2.

/* open a given stream. Return 0 if OK */
static int stream_component_open(FFPlayer *ffp, int stream_index)
{
VideoState *is = ffp->is;
AVFormatContext *ic = is->ic;
AVCodecContext *avctx;
AVCodec *codec = NULL;
const char *forced_codec_name = NULL;
AVDictionary *opts = NULL;
AVDictionaryEntry *t = NULL;
int sample_rate, nb_channels;
int64_t channel_layout;
int ret = ;
int stream_lowres = ffp->lowres; if (stream_index < || stream_index >= ic->nb_streams)
return -;
avctx = avcodec_alloc_context3(NULL);
if (!avctx)
return AVERROR(ENOMEM); ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
if (ret < )
goto fail;
av_codec_set_pkt_timebase(avctx, ic->streams[stream_index]->time_base); codec = avcodec_find_decoder(avctx->codec_id);
... case AVMEDIA_TYPE_VIDEO:
is->video_stream = stream_index;
is->video_st = ic->streams[stream_index]; decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread); ... static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
memset(d, , sizeof(Decoder));
d->avctx = avctx;
d->queue = queue;
d->empty_queue_cond = empty_queue_cond;
d->start_pts = AV_NOPTS_VALUE; d->first_frame_decoded_time = SDL_GetTickHR();
d->first_frame_decoded = ; SDL_ProfilerReset(&d->decode_profiler, -);
}
static int get_video_frame(FFPlayer *ffp, AVFrame *frame)
{
VideoState *is = ffp->is;
int got_picture; ffp_video_statistic_l(ffp);
if ((got_picture = decoder_decode_frame(ffp, &is->viddec, frame, NULL)) < )
return -; ...
static int decoder_decode_frame(FFPlayer *ffp, Decoder *d, AVFrame *frame, AVSubtitle *sub) {
int got_frame = ; do {
int ret = -; if (d->queue->abort_request)
return -; if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
AVPacket pkt;
do {
if (d->queue->nb_packets == )
SDL_CondSignal(d->empty_queue_cond);
if (packet_queue_get_or_buffering(ffp, d->queue, &pkt, &d->pkt_serial, &d->finished) < )
return -;
if (pkt.data == flush_pkt.data) {
avcodec_flush_buffers(d->avctx);
d->finished = ;
d->next_pts = d->start_pts;
d->next_pts_tb = d->start_pts_tb;
}
} while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);
av_packet_unref(&d->pkt);
d->pkt_temp = d->pkt = pkt;
d->packet_pending = ;
} switch (d->avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO: {
ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);
if (got_frame) {
ffp->stat.vdps = SDL_SpeedSamplerAdd(&ffp->vdps_sampler, FFP_SHOW_VDPS_AVCODEC, "vdps[avcodec]");
if (ffp->decoder_reorder_pts == -) {
frame->pts = av_frame_get_best_effort_timestamp(frame);
} else if (!ffp->decoder_reorder_pts) {
frame->pts = frame->pkt_dts;
}
}
}
break;
case AVMEDIA_TYPE_AUDIO:
ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);
if (got_frame) {
AVRational tb = (AVRational){, frame->sample_rate};
if (frame->pts != AV_NOPTS_VALUE)
frame->pts = av_rescale_q(frame->pts, av_codec_get_pkt_timebase(d->avctx), tb);
else if (d->next_pts != AV_NOPTS_VALUE)
frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
if (frame->pts != AV_NOPTS_VALUE) {
d->next_pts = frame->pts + frame->nb_samples;
d->next_pts_tb = tb;
}
}
break;
case AVMEDIA_TYPE_SUBTITLE:
ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp);
break;
default:
break;
} if (ret < ) {
d->packet_pending = ;
} else {
d->pkt_temp.dts =
d->pkt_temp.pts = AV_NOPTS_VALUE;
if (d->pkt_temp.data) {
if (d->avctx->codec_type != AVMEDIA_TYPE_AUDIO)
ret = d->pkt_temp.size;
d->pkt_temp.data += ret;
d->pkt_temp.size -= ret;
if (d->pkt_temp.size <= )
d->packet_pending = ;
} else {
if (!got_frame) {
d->packet_pending = ;
d->finished = d->pkt_serial;
}
}
}
} while (!got_frame && !d->finished); return got_frame;
}

3.

/* prepare a new audio buffer */
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
{
FFPlayer *ffp = opaque;
VideoState *is = ffp->is;
int audio_size, len1;
if (!ffp || !is) {
memset(stream, , len);
return;
} ffp->audio_callback_time = av_gettime_relative(); if (ffp->pf_playback_rate_changed) {
ffp->pf_playback_rate_changed = ;
#if defined(__ANDROID__)
if (!ffp->soundtouch_enable) {
SDL_AoutSetPlaybackRate(ffp->aout, ffp->pf_playback_rate);
}
#else
SDL_AoutSetPlaybackRate(ffp->aout, ffp->pf_playback_rate);
#endif
}
if (ffp->pf_playback_volume_changed) {
ffp->pf_playback_volume_changed = ;
SDL_AoutSetPlaybackVolume(ffp->aout, ffp->pf_playback_volume);
} while (len > ) {
if (is->audio_buf_index >= is->audio_buf_size) {
audio_size = audio_decode_frame(ffp);
... if (audio_size < ) {
/* if error, just output silence */
is->audio_buf = NULL;
is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
} else {
if (is->show_mode != SHOW_MODE_VIDEO)
update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
is->audio_buf_size = audio_size;
}
is->audio_buf_index = ;
}
if (is->auddec.pkt_serial != is->audioq.serial) {
is->audio_buf_index = is->audio_buf_size;
memset(stream, , len);
// stream += len;
// len = 0;
SDL_AoutFlushAudio(ffp->aout);
break;
}
len1 = is->audio_buf_size - is->audio_buf_index;
if (len1 > len)
len1 = len;
if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
else {
memset(stream, , len1);
if (!is->muted && is->audio_buf)
SDL_MixAudio(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1, is->audio_volume);
}
len -= len1;
stream += len1;
is->audio_buf_index += len1;
}
is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
/* Let's assume the audio driver that is used by SDL has two periods. */
if (!isnan(is->audio_clock)) {
set_clock_at(&is->audclk, is->audio_clock - (double)(is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec - SDL_AoutGetLatencySeconds(ffp->aout), is->audio_clock_serial, ffp->audio_callback_time / 1000000.0);
sync_clock_to_slave(&is->extclk, &is->audclk);
}
} static int audio_decode_frame(FFPlayer *ffp)
{
VideoState *is = ffp->is;
int data_size, resampled_data_size;
int64_t dec_channel_layout;
av_unused double audio_clock0;
int wanted_nb_samples;
Frame *af;
int translate_time = ; if (is->paused || is->step)
return -; if (ffp->sync_av_start && /* sync enabled */
is->video_st && /* has video stream */
!is->viddec.first_frame_decoded && /* not hot */
is->viddec.finished != is->videoq.serial) { /* not finished */
/* waiting for first video frame */
Uint64 now = SDL_GetTickHR();
if (now < is->viddec.first_frame_decoded_time ||
now > is->viddec.first_frame_decoded_time + ) {
is->viddec.first_frame_decoded = ;
} else {
/* video pipeline is not ready yet */
return -;
}
}
reload:
do {
#if defined(_WIN32) || defined(__APPLE__)
while (frame_queue_nb_remaining(&is->sampq) == ) {
if ((av_gettime_relative() - ffp->audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / )
return -;
av_usleep ();
}
#endif
if (!(af = frame_queue_peek_readable(&is->sampq)))
return -;
frame_queue_next(&is->sampq);
} while (af->serial != is->audioq.serial);
... if (is->swr_ctx) {
const uint8_t **in = (const uint8_t **)af->frame->extended_data;
uint8_t **out = &is->audio_buf1;
int out_count = (int)((int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + );
int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, );
int len2;
if (out_size < ) {
av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
return -;
}
if (wanted_nb_samples != af->frame->nb_samples) {
if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < ) {
av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
return -;
}
}
av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size); if (!is->audio_buf1)
return AVERROR(ENOMEM);
len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
if (len2 < ) {
av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
return -;
}
if (len2 == out_count) {
av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
if (swr_init(is->swr_ctx) < )
swr_free(&is->swr_ctx);
}
is->audio_buf = is->audio_buf1;
int bytes_per_sample = av_get_bytes_per_sample(is->audio_tgt.fmt);
resampled_data_size = len2 * is->audio_tgt.channels * bytes_per_sample;
#if defined(__ANDROID__)
if (ffp->soundtouch_enable && ffp->pf_playback_rate != 1.0f && !is->abort_request) {
av_fast_malloc(&is->audio_new_buf, &is->audio_new_buf_size, out_size * translate_time);
for (int i = ; i < (resampled_data_size / ); i++)
{
is->audio_new_buf[i] = (is->audio_buf1[i * ] | (is->audio_buf1[i * + ] << ));
} int ret_len = ijk_soundtouch_translate(is->handle, is->audio_new_buf, (float)(ffp->pf_playback_rate), (float)(1.0f/ffp->pf_playback_rate),
resampled_data_size / , bytes_per_sample, is->audio_tgt.channels, af->frame->sample_rate);
if (ret_len > ) {
is->audio_buf = (uint8_t*)is->audio_new_buf;
resampled_data_size = ret_len;
} else {
translate_time++;
goto reload;
}
}
#endif
} else {
is->audio_buf = af->frame->data[];
resampled_data_size = data_size;
}

第11月第8天 ffmpeg ffplay的更多相关文章

  1. 音频相关 ALSA ffmpeg ffplay 命令用法 g7xx

    采样率: samples 441100 每秒 DAC/ADC 采样的频率,声卡一般还支持 48k 8k 等模式. 通道:channels 2声道 左右声道 也有单声道的声音,5.1 声道 位数: 16 ...

  2. psp进度(11月25号-31号)

    本周psp进度 11月25号 内容 开始时间 结束时间 打断时间 净时间 处理数据集  9:27  11:34  12m  115m 11月27号 内容 开始时间 结束时间 打断时间 净时间  scr ...

  3. 本周psp(11月17-23)

    本周psp进度 11月19号 内容 开始时间 结束时间 打断时间 净时间 发布读书笔记 11:05 11:25 0 20m 看构建之法书 9:50 10:48 5m 53m 11月20号 内容 开始时 ...

  4. Autodesk 2013开发者日(DevDays)又要来了 -- 北京(2013年11月7日)和上海(2013年11月11日)

    各位, 一年一度的Autodesk 开发者日(Devdays)开始注册了,抓紧时间前排占座! 注册地址: www.autodesk.com.cn/adndevday2013 今年开发者日的主题:革命性 ...

  5. GTAC 2015将于11月10号和11号召开

    今年的GTAC注册已经结束,将会在11月10号和11号在Google马萨诸塞州剑桥办公室召开.大家可以关注https://developers.google.com/google-test-autom ...

  6. 11月30日《奥威Power-BI智能分析报表制作方法》腾讯课堂开课啦

    这么快一周就过去了,奥威公开课又要与大家见面咯,上节课老师教的三种报表集成方法你们都掌握了吗?大家都知道,学习的结果在于实际应用,想要熟练掌握新内容的要点就在于去应用它.正是基于这一要点,每一期的课程 ...

  7. 补psp进度(11月4号-9号)

    这周psp进度 11月4号 内容 开始时间 结束时间 打断时间 净时间 小伙伴聊天实现 9:45 10:49 0 64m 学习HttpURLConnection 14:13 15:48 10m 85m ...

  8. MySQL_杭州11月销售昨日未上架的SKU_20161212

    #C034杭州11月销售昨日未上架的SKU SELECT 城市,a.订单日期,a.客户数,a.订单数,b.产品数,a.金额,c.销售确认额,c.毛利额,c.毛利率 FROM ( SELECT 城市,订 ...

  9. 11月23日《奥威Power-BI报表集成到其他系统》腾讯课堂开课啦

    听说明天全国各地区都要冷到爆了,要是天气冷到可以放假就好了.想象一下大冷天的一定要在被窝里度过才对嘛,索性明天晚上来个相约吧,相约在被窝里看奥威Power-BI公开课如何?        上周奥威公开 ...

随机推荐

  1. DRF03

    为了方便接下来的操作,需要在admin站点创建一个管理员. python manage.py createsuperuser 可在setting.py中修改admin站点语言, LANGUAGE_CO ...

  2. Node.js Event Loop 的理解 Timers,process.nextTick()

    写这篇文章的目的是将自己对该文章的理解做一个记录,官方文档链接The Node.js Event Loop, Timers, and process.nextTick() 文章内容可能有错误理解的地方 ...

  3. webWorker

    一.webWorker之初体验 在"setTimeout那些事儿"中,说到JavaScript是单线程.也就是同一时间只能做同一事情. 也好理解,作为浏览器脚本语言,如果JavaS ...

  4. PAT甲题题解-1039. Course List for Student (25)-建立映射+vector

    博主欢迎转载,但请给出本文链接,我尊重你,你尊重我,谢谢~http://www.cnblogs.com/chenxiwenruo/p/6789157.html特别不喜欢那些随便转载别人的原创文章又不给 ...

  5. js实现随机的四则运算题目

    老师给出的题,写一个小程序,可以生成随机的四则运算题目给给小学生用.以前自己就写了一个四则运算的简单js小程序,是这样的: 事件 + - * / 这是个自己输入的算法,但要求是自动产生随机数.于是我用 ...

  6. 《Linux内核 》MOOC 课程

    姬梦馨 原创微博 <Linux内核分析>MOOC课程http://mooc.study.163.com/course/USTC-1000029000 学习笔记 一:什么是冯诺依曼体系结构? ...

  7. Linux内核分析第二周学习笔记

    linux内核分析第二周学习笔记 标签(空格分隔): 20135328陈都 陈都 原创作品转载请注明出处 <Linux内核分析>MOOC课程http://mooc.study.163.co ...

  8. 软件工程课程设计——第一个Spring

    开发会议框架表格: 1.我们团队Reborn针对需求功能进行热烈的讨论会议,从功能的方面分析开发,结合在一起组合为App软件,再另外思考附加的功能性娱乐项目. 2.开发过程中,以表格的形式反思开发过程 ...

  9. Linux版本CentOS、Ubuntu和Debian的异同

    Linux有非常多的发行版本,从性质上划分,大体分为由商业公司维护的商业版本与由开源社区维护的免费发行版本. 商业版本以Redhat为代表,开源社区版本则以debian为代表. #Ubuntu系统 U ...

  10. 基于Vue-cli 快速搭建项目

    Vue-cli可以快速帮助我们创建一个项目,这是官方给我们提供的脚手架.下面我说一下vue-cli的使用方法. 一.准备工作 在使用vue-cli时,首先需要安装nodejs,npm,其次需全局安装v ...