WebRTC VideoEngine超详细教程(三)——集成X264编码和ffmpeg解码
转自:http://blog.csdn.net/nonmarking/article/details/47958395
本系列目前共三篇文章,后续还会更新
WebRTC VideoEngine超详细教程(一)——视频通话的基本流程
WebRTC VideoEngine超详细教程(二)——集成OPENH264编解码器
WebRTC VideoEngine超详细教程(三)——集成X264编码和ffmpeg解码
总述
在前一篇文章中,讲解了如何将OPENH264编解码器集成到WebRTC中,但是OPENH264只能编码baseline的H264视频,而且就编码质量而言,还是X264最好,本文就来讲解一下如何将X264编码器集成到WebRTC中,为了实现解码,同时要用到ffmpeg。总体流程和之前一样,分为重新封装编解码器和注册调用两大步骤,注册调用这一步没有任何不同,主要是重新封装这一步骤有较大区别。
重新封装X264编码功能
- #include <stdint.h>
- #include <stdio.h>
- #include <x264.h>
- int main( int argc, char **argv )
- {
- int width, height;
- x264_param_t param;
- x264_picture_t pic;
- x264_picture_t pic_out;
- x264_t *h;
- int i_frame = 0;
- int i_frame_size;
- x264_nal_t *nal;
- int i_nal;
- /* Get default params for preset/tuning */
- if( x264_param_default_preset( ¶m, "medium", NULL ) < 0 )
- goto fail;
- /* Configure non-default params */
- param.i_csp = X264_CSP_I420;
- param.i_width = width;
- param.i_height = height;
- param.b_vfr_input = 0;
- param.b_repeat_headers = 1;
- param.b_annexb = 1;
- /* Apply profile restrictions. */
- if( x264_param_apply_profile( ¶m, "high" ) < 0 )
- goto fail;
- if( x264_picture_alloc( &pic, param.i_csp, param.i_width, param.i_height ) < 0 )
- goto fail;
- h = x264_encoder_open( ¶m);
- if( !h )
- goto fail;
- int luma_size = width * height;
- int chroma_size = luma_size / 4;
- /* Encode frames */
- for( ;; i_frame++ )
- {
- /* Read input frame */
- if( fread( pic.img.plane[0], 1, luma_size, stdin ) != luma_size )
- break;
- if( fread( pic.img.plane[1], 1, chroma_size, stdin ) != chroma_size )
- break;
- if( fread( pic.img.plane[2], 1, chroma_size, stdin ) != chroma_size )
- break;
- pic.i_pts = i_frame;
- i_frame_size = x264_encoder_encode( h, &nal, &i_nal, &pic, &pic_out );
- if( i_frame_size < 0 )
- goto fail;
- else if( i_frame_size )
- {
- if( !fwrite( nal->p_payload, i_frame_size, 1, stdout ) )
- goto fail;
- }
- }
- /* Flush delayed frames */
- while( x264_encoder_delayed_frames( h ) )
- {
- i_frame_size = x264_encoder_encode( h, &nal, &i_nal, NULL, &pic_out );
- if( i_frame_size < 0 )
- goto fail;
- else if( i_frame_size )
- {
- if( !fwrite( nal->p_payload, i_frame_size, 1, stdout ) )
- goto fail;
- }
- }
- x264_encoder_close( h );
- x264_picture_clean( &pic );
- return 0;
- }
还是一样,照葫芦画瓢,改写上一篇文章中提到的H264EncoderImpl类
- x264_picture_t pic;
- x264_picture_t pic_out;
- x264_t *encoder_;
- int i_frame = 0;//frame index
- x264_nal_t *nal;
相应的,构造函数和析构函数也要改变,这里就不赘述了,重点看InitEncode方法和Encode方法。
- int H264EncoderImpl::InitEncode(const VideoCodec* inst,
- int number_of_cores,
- size_t max_payload_size) {
- if (inst == NULL) {
- return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
- }
- if (inst->maxFramerate < 1) {
- return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
- }
- // allow zero to represent an unspecified maxBitRate
- if (inst->maxBitrate > 0 && inst->startBitrate > inst->maxBitrate) {
- return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
- }
- if (inst->width < 1 || inst->height < 1) {
- return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
- }
- if (number_of_cores < 1) {
- return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
- }
- int ret_val = Release();
- if (ret_val < 0) {
- return ret_val;
- }
- /* Get default params for preset/tuning */
- x264_param_t param;
- ret_val = x264_param_default_preset(¶m, "medium", NULL);
- if (ret_val != 0) {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, -1,
- "H264EncoderImpl::InitEncode() fails to initialize encoder ret_val %d",
- ret_val);
- x264_encoder_close(encoder_);
- encoder_ = NULL;
- return WEBRTC_VIDEO_CODEC_ERROR;
- }
- /* Configure non-default params */
- param.i_csp = X264_CSP_I420;
- param.i_width = inst->width;
- param.i_height = inst->height;
- param.b_vfr_input = 0;
- param.b_repeat_headers = 1;
- param.b_annexb = 0;//这里设置为0,是为了使编码后的NAL统一有4字节的起始码,便于处理,否则会同时有3字节和4字节的起始码,很麻烦
- param.i_fps_num = 1;
- param.i_fps_num = codec_.maxFramerate;
- param.rc.i_bitrate = codec_.maxBitrate;
- /* Apply profile restrictions. */
- ret_val = x264_param_apply_profile(¶m, "high");
- if (ret_val != 0) {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, -1,
- "H264EncoderImpl::InitEncode() fails to initialize encoder ret_val %d",
- ret_val);
- x264_encoder_close(encoder_);
- encoder_ = NULL;
- return WEBRTC_VIDEO_CODEC_ERROR;
- }
- ret_val = x264_picture_alloc(&pic, param.i_csp, param.i_width, param.i_height);
- if (ret_val != 0) {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, -1,
- "H264EncoderImpl::InitEncode() fails to initialize encoder ret_val %d",
- ret_val);
- x264_encoder_close(encoder_);
- encoder_ = NULL;
- return WEBRTC_VIDEO_CODEC_ERROR;
- }
- encoder_ = x264_encoder_open(¶m);
- if (!encoder_){
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, -1,
- "H264EncoderImpl::InitEncode() fails to initialize encoder ret_val %d",
- ret_val);
- x264_encoder_close(encoder_);
- x264_picture_clean(&pic);
- encoder_ = NULL;
- return WEBRTC_VIDEO_CODEC_ERROR;
- }
- if (&codec_ != inst) {
- codec_ = *inst;
- }
- if (encoded_image_._buffer != NULL) {
- delete[] encoded_image_._buffer;
- }
- encoded_image_._size = CalcBufferSize(kI420, codec_.width, codec_.height);
- encoded_image_._buffer = new uint8_t[encoded_image_._size];
- encoded_image_._completeFrame = true;
- inited_ = true;
- WEBRTC_TRACE(webrtc::kTraceApiCall, webrtc::kTraceVideoCoding, -1,
- "H264EncoderImpl::InitEncode(width:%d, height:%d, framerate:%d, start_bitrate:%d, max_bitrate:%d)",
- inst->width, inst->height, inst->maxFramerate, inst->startBitrate, inst->maxBitrate);
- return WEBRTC_VIDEO_CODEC_OK;
- }
Encode方法的实现改写如下
- int H264EncoderImpl::Encode(const I420VideoFrame& input_image,
- const CodecSpecificInfo* codec_specific_info,
- const std::vector<VideoFrameType>* frame_types) {
- if (!inited_) {
- return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
- }
- if (input_image.IsZeroSize()) {
- return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
- }
- if (encoded_complete_callback_ == NULL) {
- return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
- }
- VideoFrameType frame_type = kDeltaFrame;
- // We only support one stream at the moment.
- if (frame_types && frame_types->size() > 0) {
- frame_type = (*frame_types)[0];
- }
- bool send_keyframe = (frame_type == kKeyFrame);
- if (send_keyframe) {
- pic.b_keyframe = TRUE;
- WEBRTC_TRACE(webrtc::kTraceApiCall, webrtc::kTraceVideoCoding, -1,
- "H264EncoderImpl::EncodeKeyFrame(width:%d, height:%d)",
- input_image.width(), input_image.height());
- }
- // Check for change in frame size.
- if (input_image.width() != codec_.width ||
- input_image.height() != codec_.height) {
- int ret = UpdateCodecFrameSize(input_image);
- if (ret < 0) {
- return ret;
- }
- }
- /* Read input frame */
- pic.img.plane[0] = const_cast<uint8_t*>(input_image.buffer(kYPlane));
- pic.img.plane[1] = const_cast<uint8_t*>(input_image.buffer(kUPlane));
- pic.img.plane[2] = const_cast<uint8_t*>(input_image.buffer(kVPlane));
- pic.i_pts = i_frame;
- int i_nal = 0;
- int i_frame_size = x264_encoder_encode(encoder_, &nal, &i_nal, &pic, &pic_out);
- if (i_frame_size < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, -1,
- "H264EncoderImpl::Encode() fails to encode %d",
- i_frame_size);
- x264_encoder_close(encoder_);
- x264_picture_clean(&pic);
- encoder_ = NULL;
- return WEBRTC_VIDEO_CODEC_ERROR;
- }
- RTPFragmentationHeader frag_info;
- if (i_frame_size)
- {
- if (i_nal == 0) {
- return WEBRTC_VIDEO_CODEC_OK;
- }
- frag_info.VerifyAndAllocateFragmentationHeader(i_nal);
- encoded_image_._length = 0;
- uint32_t totalNaluIndex = 0;
- for (int nal_index = 0; nal_index < i_nal; nal_index++)
- {
- uint32_t currentNaluSize = 0;
- currentNaluSize = nal[nal_index].i_payload - 4; //x264_encoder_encode编码得到的nal单元是已经带有起始码的,此外,这里直接使用nal[index]即可,不必再使用x264_nal_encode函数
- memcpy(encoded_image_._buffer + encoded_image_._length, nal[nal_index].p_payload + 4, currentNaluSize);//encoded_image_中存有的是去掉起始码的数据
- encoded_image_._length += currentNaluSize;
- WEBRTC_TRACE(webrtc::kTraceApiCall, webrtc::kTraceVideoCoding, -1,
- "H264EncoderImpl::Encode() nal_type %d, length:%d",
- nal[nal_index].i_type, encoded_image_._length);
- frag_info.fragmentationOffset[totalNaluIndex] = encoded_image_._length - currentNaluSize;
- frag_info.fragmentationLength[totalNaluIndex] = currentNaluSize;
- frag_info.fragmentationPlType[totalNaluIndex] = nal[nal_index].i_type;
- frag_info.fragmentationTimeDiff[totalNaluIndex] = 0;
- totalNaluIndex++;
- }
- }
- i_frame++;
- if (encoded_image_._length > 0) {
- encoded_image_._timeStamp = input_image.timestamp();
- encoded_image_.capture_time_ms_ = input_image.render_time_ms();
- encoded_image_._encodedHeight = codec_.height;
- encoded_image_._encodedWidth = codec_.width;
- encoded_image_._frameType = frame_type;
- // call back
- encoded_complete_callback_->Encoded(encoded_image_, NULL, &frag_info);
- }
- return WEBRTC_VIDEO_CODEC_OK;
- }
其他方法的实现均没有改变。
重新封装ffmpeg解码功能
- AVCodec *codec = avcodec_find_decoder(AV_CODEC_ID_H264);
- AVCodecContext *codecCtx = avcodec_alloc_context3(codec);
- avcodec_open2(codecCtx, codec, nil);
- char *videoData;
- int len;
- AVFrame *frame = av_frame_alloc();
- AVPacket packet;
- av_new_packet(&packet, len);
- memcpy(packet.data, videoData, len);
- int ret, got_picture;
- ret = avcodec_decode_video2(codecCtx, frame, &got_picture, &packet);
- if (ret > 0){
- if(got_picture){
- //进行下一步的处理
- }
- }
相应的,对H264DecoderImpl类的定义和各方法的实现要进行改写。
首先是类的定义,去掉了ISVCDecoder* decoder_,加入了以下私有成员变量
- AVCodecContext *pCodecCtx;
- AVCodec *pCodec;
- AVFrame *pFrame, *pFrameYUV;
- AVPacket *packet;
- struct SwsContext *img_convert_ctx;
- uint8_t *decode_buffer;//存储最开始收到的SPS、PPS和IDR帧以便进行最开始的解码
- uint8_t *out_buffer;
- int framecnt = 0;
- int encoded_length = 0;
- int H264DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) {
- if (inst == NULL) {
- return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
- }
- int ret_val = Release();
- if (ret_val < 0) {
- return ret_val;
- }
- if (&codec_ != inst) {
- // Save VideoCodec instance for later; mainly for duplicating the decoder.
- codec_ = *inst;
- }
- pCodec = avcodec_find_decoder(AV_CODEC_ID_H264);
- pCodecCtx = avcodec_alloc_context3(pCodec);
- pCodecCtx->pix_fmt = PIX_FMT_YUV420P;
- pCodecCtx->width = codec_.width;
- pCodecCtx->height = codec_.height;
- //pCodecCtx->bit_rate = codec_.targetBitrate*1000;
- pCodecCtx->time_base.num = 1;
- pCodecCtx->time_base.den = codec_.maxFramerate;
- if (pCodec == NULL){
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, -1,
- "H264DecoderImpl::InitDecode, Codec not found.");
- return WEBRTC_VIDEO_CODEC_ERROR;
- }
- if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0){
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, -1,
- "H264DecoderImpl::InitDecode, Could not open codec.");
- return WEBRTC_VIDEO_CODEC_ERROR;
- }
- inited_ = true;
- // Always start with a complete key frame.
- key_frame_required_ = true;
- WEBRTC_TRACE(webrtc::kTraceApiCall, webrtc::kTraceVideoCoding, -1,
- "H264DecoderImpl::InitDecode(width:%d, height:%d, framerate:%d, start_bitrate:%d, max_bitrate:%d)",
- inst->width, inst->height, inst->maxFramerate, inst->startBitrate, inst->maxBitrate);
- return WEBRTC_VIDEO_CODEC_OK;
- }
Decode方法的实现改写如下
- int H264DecoderImpl::Decode(const EncodedImage& input_image,
- bool missing_frames,
- const RTPFragmentationHeader* fragmentation,
- const CodecSpecificInfo* codec_specific_info,
- int64_t /*render_time_ms*/) {
- if (!inited_) {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, -1,
- "H264DecoderImpl::Decode, decoder is not initialized");
- return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
- }
- if (decode_complete_callback_ == NULL) {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, -1,
- "H264DecoderImpl::Decode, decode complete call back is not set");
- return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
- }
- if (input_image._buffer == NULL) {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, -1,
- "H264DecoderImpl::Decode, null buffer");
- return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
- }
- if (!codec_specific_info) {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, -1,
- "H264EncoderImpl::Decode, no codec info");
- return WEBRTC_VIDEO_CODEC_ERROR;
- }
- if (codec_specific_info->codecType != kVideoCodecH264) {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, -1,
- "H264EncoderImpl::Decode, non h264 codec %d", codec_specific_info->codecType);
- return WEBRTC_VIDEO_CODEC_ERROR;
- }
- WEBRTC_TRACE(webrtc::kTraceApiCall, webrtc::kTraceVideoCoding, -1,
- "H264DecoderImpl::Decode(frame_type:%d, length:%d",
- input_image._frameType, input_image._length);
- if (framecnt < 2)
- {//存储最开始的SPS PPS 和 IDR帧以便进行初始的解码
- memcpy(decode_buffer + encoded_length, input_image._buffer, input_image._length);
- encoded_length += input_image._length;
- framecnt++;
- }
- else
- {
- pFrame = av_frame_alloc();
- pFrameYUV = av_frame_alloc();
- out_buffer = (uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
- avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
- img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
- pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
- if (framecnt == 2)
- {
- packet = (AVPacket *)av_malloc(sizeof(AVPacket));
- av_new_packet(packet, encoded_length);
- memcpy(packet->data, decode_buffer, encoded_length);
- av_free(decode_buffer);
- framecnt++;
- printf("\n\nLoading");
- }
- else
- {
- packet = (AVPacket *)av_malloc(sizeof(AVPacket));
- av_new_packet(packet, input_image._length);
- memcpy(packet->data, input_image._buffer, input_image._length);
- }
- int got_picture = 0;
- int ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
- if (ret < 0){
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, -1,
- "H264DecoderImpl::Decode, Decode Error.");
- return WEBRTC_VIDEO_CODEC_ERROR;
- }
- if (got_picture){
- sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
- pFrameYUV->data, pFrameYUV->linesize);
- int size_y = pFrameYUV->linesize[0] * pCodecCtx->height;
- int size_u = pFrameYUV->linesize[1] * pCodecCtx->height / 2;
- int size_v = pFrameYUV->linesize[2] * pCodecCtx->height / 2;
- decoded_image_.CreateFrame(size_y, static_cast<uint8_t*>(pFrameYUV->data[0]),
- size_u, static_cast<uint8_t*>(pFrameYUV->data[1]),
- size_v, static_cast<uint8_t*>(pFrameYUV->data[2]),
- pCodecCtx->width,
- pCodecCtx->height,
- pFrameYUV->linesize[0],
- pFrameYUV->linesize[1],
- pFrameYUV->linesize[2]);
- decoded_image_.set_timestamp(input_image._timeStamp);
- decode_complete_callback_->Decoded(decoded_image_);
- return WEBRTC_VIDEO_CODEC_OK;
- }
- else
- printf(".");
- av_free_packet(packet);
- }
- return WEBRTC_VIDEO_CODEC_OK;
- }
其他方法的实现保持不变,至此ffmpeg解码功能的重新封装也完成了。
本项目源代码
WebRTC VideoEngine超详细教程(三)——集成X264编码和ffmpeg解码的更多相关文章
- 数学规划求解器lp_solve超详细教程
前言 最近小编学了运筹学中的单纯形法.于是,很快便按奈不住跳动的心.这不得不让我拿起纸和笔思考着,一个至关重要的问题:如何用单纯形法装一个完备的13? 恰巧,在我坐在图书馆陷入沉思的时候,一位漂亮的小 ...
- Github上传代码菜鸟超详细教程【转】
最近需要将课设代码上传到Github上,之前只是用来fork别人的代码. 这篇文章写得是windows下的使用方法. 第一步:创建Github新账户 第二步:新建仓库 第三部:填写名称,简介(可选), ...
- 安装64位Oracle 10g超详细教程
安装64位Oracle 10g超详细教程 1. 安装准备阶段 1.1 安装Oracle环境 经过上一篇博文的过程,已经完成了对Linux系统的安装,本例使用X-Manager来实现与Linux系统的连 ...
- NumPy 超详细教程(3):ndarray 的内部机理及高级迭代
系列文章地址 NumPy 最详细教程(1):NumPy 数组 NumPy 超详细教程(2):数据类型 NumPy 超详细教程(3):ndarray 的内部机理及高级迭代 ndarray 对象的内部机理 ...
- NumPy 超详细教程(2):数据类型
系列文章地址 NumPy 最详细教程(1):NumPy 数组 NumPy 超详细教程(2):数据类型 NumPy 超详细教程(3):ndarray 的内部机理及高级迭代 文章目录 NumPy 数据类型 ...
- NumPy 超详细教程(1):NumPy 数组
系列文章地址 NumPy 最详细教程(1):NumPy 数组 NumPy 超详细教程(2):数据类型 NumPy 超详细教程(3):ndarray 的内部机理及高级迭代 文章目录 Numpy 数组:n ...
- 【python】10分钟教你用python打造贪吃蛇超详细教程
10分钟教你用python打造贪吃蛇超详细教程 在家闲着没妹子约, 刚好最近又学了一下python,听说pygame挺好玩的.今天就在家研究一下, 弄了个贪吃蛇出来.希望大家喜欢. 先看程序效果: 0 ...
- c++ 网络编程课设入门超详细教程 ---目录
原文作者:aircraft 原文链接:https://www.cnblogs.com/DOMLX/p/9663167.html c++ 网络编程(一)TCP/UDP windows/linux 下入门 ...
- c++ 网络编程(九)LINUX/windows-IOCP模型 多线程超详细教程及多线程实现服务端
原文作者:aircraft 原文链接:https://www.cnblogs.com/DOMLX/p/9661012.html 先讲Linux下(windows下在后面可以直接跳到后面看): 一.线程 ...
随机推荐
- latex+bibtex+jabref(zz)
很好的的latex使用心得: bibtex现学现卖 http://derecks.blog.sohu.com/118984444.html latex+bibtex+jabref http://blo ...
- CSS 仿Excel表格功能
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/ ...
- SQL Server 2005 数据库复制(转载)
对于一个地域分散的大型企业组织来说,构建具有典型的分布式计算机特征的大型企业管理信息系统时,总要解决一个很重要的问题:如何在多个不同数 据库服务器之间保证共享数据的一致性.之所以有这个重要的问题在于企 ...
- 一个Java对象到底占用多大内存?
最近在读<深入理解Java虚拟机>,对Java对象的内存布局有了进一步的认识,于是脑子里自然而然就有一个很普通的问题,就是一个Java对象到底占用多大内存? 在网上搜到了一篇博客讲的非常好 ...
- LoadRunner中响应时间与事物时间详解
1. 响应时间 事务是指用户在客户端做一种或多种业务所需要的操作集,通过事务函数可以标记完成该业务所需要的操作内容:另一方面事务可以用来统计用户操作的响应时间,事务响应时间是通过记录用户请求的开始时间 ...
- PHP生成CSV文件
CSV文件的定义这里就不多做介绍了,难能可贵的是用Excel可以直接打开CSV文件.用PHP输出CSV文件本身很简单,但是大家如果有业务需求,下面的代码可以作为参考. $tableheader = a ...
- ubuntu 修改保存报错E37:No write since last change(add ! to override)的解决方法
报错信息如下: E37: No write since last change (add ! to override) 解决办法是: 在修改完后,将命令 :q 改成 :wq 即可.
- 【云计算】docker build如何支持参数化构建?
docker 1.9.0版本之后,已经支持docker build参数化构建. docker 版本更新记录: github讨论: 参开资料: https://github.com/docker/doc ...
- 【转】基于LDA的Topic Model变形
转载自wentingtu 基于LDA的Topic Model变形最近几年来,随着LDA的产生和发展,涌现出了一批搞Topic Model的牛人.我主要关注了下面这位大牛和他的学生:David M. B ...
- Jump Game | & ||
Jump Game | Given an array of non-negative integers, you are initially positioned at the first index ...