第11月第8天 ffmpeg ffplay
- static int ffplay_video_thread(void *arg)
- {
- FFPlayer *ffp = arg;
- VideoState *is = ffp->is;
- AVFrame *frame = av_frame_alloc();
- ...
- for (;;) {
- ret = get_video_frame(ffp, frame);
- ...
- ret = queue_picture(ffp, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);
- av_frame_unref(frame);
- #if CONFIG_AVFILTER
- }
- ...
- /* alloc or resize hardware picture buffer */
- if (!vp->bmp || !vp->allocated ||
- vp->width != src_frame->width ||
- vp->height != src_frame->height ||
- vp->format != src_frame->format) {
- if (vp->width != src_frame->width || vp->height != src_frame->height)
- ffp_notify_msg3(ffp, FFP_MSG_VIDEO_SIZE_CHANGED, src_frame->width, src_frame->height);
- vp->allocated = ;
- vp->width = src_frame->width;
- vp->height = src_frame->height;
- vp->format = src_frame->format;
- /* the allocation must be done in the main thread to avoid
- locking problems. */
- alloc_picture(ffp, src_frame->format);
- if (is->videoq.abort_request)
- return -;
- }
- /* if the frame is not skipped, then display it */
- if (vp->bmp) {
- /* get a pointer on the bitmap */
- SDL_VoutLockYUVOverlay(vp->bmp);
- #ifdef FFP_MERGE
- #if CONFIG_AVFILTER
- // FIXME use direct rendering
- av_image_copy(data, linesize, (const uint8_t **)src_frame->data, src_frame->linesize,
- src_frame->format, vp->width, vp->height);
- #else
- // sws_getCachedContext(...);
- #endif
- #endif
- // FIXME: set swscale options
- if (SDL_VoutFillFrameYUVOverlay(vp->bmp, src_frame) < ) {
- av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
- exit();
- }
- ...
- static void alloc_picture(FFPlayer *ffp, int frame_format)
- {
- VideoState *is = ffp->is;
- Frame *vp;
- #ifdef FFP_MERGE
- int sdl_format;
- #endif
- vp = &is->pictq.queue[is->pictq.windex];
- free_picture(vp);
- #ifdef FFP_MERGE
- video_open(is, vp);
- #endif
- SDL_VoutSetOverlayFormat(ffp->vout, ffp->overlay_format);
- vp->bmp = SDL_Vout_CreateOverlay(vp->width, vp->height,
- frame_format,
- ffp->vout);
- ...
- int SDL_VoutFillFrameYUVOverlay(SDL_VoutOverlay *overlay, const AVFrame *frame)
- {
- if (!overlay || !overlay->func_fill_frame)
- return -;
- return overlay->func_fill_frame(overlay, frame);
- }
- static int get_video_frame(FFPlayer *ffp, AVFrame *frame)
- {
- VideoState *is = ffp->is;
- int got_picture;
- ffp_video_statistic_l(ffp);
- if ((got_picture = decoder_decode_frame(ffp, &is->viddec, frame, NULL)) < )
- return -;
- ...
rindex
- /* called to display each frame */
- static void video_refresh(FFPlayer *opaque, double *remaining_time)
- {
- ..
- display:
- /* display picture */
- if (!ffp->display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
- video_display2(ffp);
- ...
- /* display the current picture, if any */
- static void video_display2(FFPlayer *ffp)
- {
- VideoState *is = ffp->is;
- if (is->video_st)
- video_image_display2(ffp);
- }
- static void video_image_display2(FFPlayer *ffp)
- {
- VideoState *is = ffp->is;
- Frame *vp;
- Frame *sp = NULL;
- vp = frame_queue_peek_last(&is->pictq);
- int latest_seek_load_serial = __atomic_exchange_n(&(is->latest_seek_load_serial), -, memory_order_seq_cst);
- if (latest_seek_load_serial == vp->serial)
- ffp->stat.latest_seek_load_duration = (av_gettime() - is->latest_seek_load_start_at) / ;
- if (vp->bmp) {
- if (is->subtitle_st) {
- if (frame_queue_nb_remaining(&is->subpq) > ) {
- sp = frame_queue_peek(&is->subpq);
- if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / )) {
- if (!sp->uploaded) {
- if (sp->sub.num_rects > ) {
- char buffered_text[];
- if (sp->sub.rects[]->text) {
- strncpy(buffered_text, sp->sub.rects[]->text, );
- }
- else if (sp->sub.rects[]->ass) {
- parse_ass_subtitle(sp->sub.rects[]->ass, buffered_text);
- }
- ffp_notify_msg4(ffp, FFP_MSG_TIMED_TEXT, , , buffered_text, sizeof(buffered_text));
- }
- sp->uploaded = ;
- }
- }
- }
- }
- SDL_VoutDisplayYUVOverlay(ffp->vout, vp->bmp);
- ffp->stat.vfps = SDL_SpeedSamplerAdd(&ffp->vfps_sampler, FFP_SHOW_VFPS_FFPLAY, "vfps[ffplay]");
- if (!ffp->first_video_frame_rendered) {
- ffp->first_video_frame_rendered = ;
- ffp_notify_msg1(ffp, FFP_MSG_VIDEO_RENDERING_START);
- }
- }
- }
- static Frame *frame_queue_peek_last(FrameQueue *f)
- {
- return &f->queue[f->rindex];
- }
1.
- static int func_fill_frame(SDL_VoutOverlay *overlay, const AVFrame *frame)
- {
- assert(overlay);
- SDL_VoutOverlay_Opaque *opaque = overlay->opaque;
- AVFrame swscale_dst_pic = { { } };
- av_frame_unref(opaque->linked_frame);
- int need_swap_uv = ;
- int use_linked_frame = ;
- enum AVPixelFormat dst_format = AV_PIX_FMT_NONE;
- switch (overlay->format) {
- case SDL_FCC_YV12:
- need_swap_uv = ;
- // no break;
- case SDL_FCC_I420:
- if (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUVJ420P) {
- // ALOGE("direct draw frame");
- use_linked_frame = ;
- dst_format = frame->format;
- } else {
- // ALOGE("copy draw frame");
- dst_format = AV_PIX_FMT_YUV420P;
- }
- break;
- case SDL_FCC_I444P10LE:
- if (frame->format == AV_PIX_FMT_YUV444P10LE) {
- // ALOGE("direct draw frame");
- use_linked_frame = ;
- dst_format = frame->format;
- } else {
- // ALOGE("copy draw frame");
- dst_format = AV_PIX_FMT_YUV444P10LE;
- }
- break;
- case SDL_FCC_RV32:
- dst_format = AV_PIX_FMT_0BGR32;
- break;
- case SDL_FCC_RV24:
- dst_format = AV_PIX_FMT_RGB24;
- break;
- case SDL_FCC_RV16:
- dst_format = AV_PIX_FMT_RGB565;
- break;
- default:
- ALOGE("SDL_VoutFFmpeg_ConvertPicture: unexpected overlay format %s(%d)",
- (char*)&overlay->format, overlay->format);
- return -;
- }
- // setup frame
- if (use_linked_frame) {
- // linked frame
- av_frame_ref(opaque->linked_frame, frame);
- overlay_fill(overlay, opaque->linked_frame, opaque->planes);
- ...
- static void overlay_fill(SDL_VoutOverlay *overlay, AVFrame *frame, int planes)
- {
- overlay->planes = planes;
- for (int i = ; i < AV_NUM_DATA_POINTERS; ++i) {
- overlay->pixels[i] = frame->data[i];
- overlay->pitches[i] = frame->linesize[i];
- }
- }
- ...
- static GLboolean yuv420p_uploadTexture(IJK_GLES2_Renderer *renderer, SDL_VoutOverlay *overlay)
- {
- if (!renderer || !overlay)
- return GL_FALSE;
- int planes[] = { , , };
- const GLsizei widths[] = { overlay->pitches[], overlay->pitches[], overlay->pitches[] };
- const GLsizei heights[] = { overlay->h, overlay->h / , overlay->h / };
- const GLubyte *pixels[] = { overlay->pixels[], overlay->pixels[], overlay->pixels[] };
- switch (overlay->format) {
- case SDL_FCC_I420:
- break;
- case SDL_FCC_YV12:
- planes[] = ;
- planes[] = ;
- break;
- default:
- ALOGE("[yuv420p] unexpected format %x\n", overlay->format);
- return GL_FALSE;
- }
- for (int i = ; i < ; ++i) {
- int plane = planes[i];
- glBindTexture(GL_TEXTURE_2D, renderer->plane_textures[i]);
- glTexImage2D(GL_TEXTURE_2D,
- ,
- GL_LUMINANCE,
- widths[plane],
- heights[plane],
- ,
- GL_LUMINANCE,
- GL_UNSIGNED_BYTE,
- pixels[plane]);
- }
- return GL_TRUE;
- }
http://blog.csdn.net/liujiakunit/article/details/46899229
2.
- /* open a given stream. Return 0 if OK */
- static int stream_component_open(FFPlayer *ffp, int stream_index)
- {
- VideoState *is = ffp->is;
- AVFormatContext *ic = is->ic;
- AVCodecContext *avctx;
- AVCodec *codec = NULL;
- const char *forced_codec_name = NULL;
- AVDictionary *opts = NULL;
- AVDictionaryEntry *t = NULL;
- int sample_rate, nb_channels;
- int64_t channel_layout;
- int ret = ;
- int stream_lowres = ffp->lowres;
- if (stream_index < || stream_index >= ic->nb_streams)
- return -;
- avctx = avcodec_alloc_context3(NULL);
- if (!avctx)
- return AVERROR(ENOMEM);
- ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
- if (ret < )
- goto fail;
- av_codec_set_pkt_timebase(avctx, ic->streams[stream_index]->time_base);
- codec = avcodec_find_decoder(avctx->codec_id);
- ...
- case AVMEDIA_TYPE_VIDEO:
- is->video_stream = stream_index;
- is->video_st = ic->streams[stream_index];
- decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
- ...
- static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
- memset(d, , sizeof(Decoder));
- d->avctx = avctx;
- d->queue = queue;
- d->empty_queue_cond = empty_queue_cond;
- d->start_pts = AV_NOPTS_VALUE;
- d->first_frame_decoded_time = SDL_GetTickHR();
- d->first_frame_decoded = ;
- SDL_ProfilerReset(&d->decode_profiler, -);
- }
- static int get_video_frame(FFPlayer *ffp, AVFrame *frame)
- {
- VideoState *is = ffp->is;
- int got_picture;
- ffp_video_statistic_l(ffp);
- if ((got_picture = decoder_decode_frame(ffp, &is->viddec, frame, NULL)) < )
- return -;
- ...
- static int decoder_decode_frame(FFPlayer *ffp, Decoder *d, AVFrame *frame, AVSubtitle *sub) {
- int got_frame = ;
- do {
- int ret = -;
- if (d->queue->abort_request)
- return -;
- if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
- AVPacket pkt;
- do {
- if (d->queue->nb_packets == )
- SDL_CondSignal(d->empty_queue_cond);
- if (packet_queue_get_or_buffering(ffp, d->queue, &pkt, &d->pkt_serial, &d->finished) < )
- return -;
- if (pkt.data == flush_pkt.data) {
- avcodec_flush_buffers(d->avctx);
- d->finished = ;
- d->next_pts = d->start_pts;
- d->next_pts_tb = d->start_pts_tb;
- }
- } while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);
- av_packet_unref(&d->pkt);
- d->pkt_temp = d->pkt = pkt;
- d->packet_pending = ;
- }
- switch (d->avctx->codec_type) {
- case AVMEDIA_TYPE_VIDEO: {
- ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);
- if (got_frame) {
- ffp->stat.vdps = SDL_SpeedSamplerAdd(&ffp->vdps_sampler, FFP_SHOW_VDPS_AVCODEC, "vdps[avcodec]");
- if (ffp->decoder_reorder_pts == -) {
- frame->pts = av_frame_get_best_effort_timestamp(frame);
- } else if (!ffp->decoder_reorder_pts) {
- frame->pts = frame->pkt_dts;
- }
- }
- }
- break;
- case AVMEDIA_TYPE_AUDIO:
- ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);
- if (got_frame) {
- AVRational tb = (AVRational){, frame->sample_rate};
- if (frame->pts != AV_NOPTS_VALUE)
- frame->pts = av_rescale_q(frame->pts, av_codec_get_pkt_timebase(d->avctx), tb);
- else if (d->next_pts != AV_NOPTS_VALUE)
- frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
- if (frame->pts != AV_NOPTS_VALUE) {
- d->next_pts = frame->pts + frame->nb_samples;
- d->next_pts_tb = tb;
- }
- }
- break;
- case AVMEDIA_TYPE_SUBTITLE:
- ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp);
- break;
- default:
- break;
- }
- if (ret < ) {
- d->packet_pending = ;
- } else {
- d->pkt_temp.dts =
- d->pkt_temp.pts = AV_NOPTS_VALUE;
- if (d->pkt_temp.data) {
- if (d->avctx->codec_type != AVMEDIA_TYPE_AUDIO)
- ret = d->pkt_temp.size;
- d->pkt_temp.data += ret;
- d->pkt_temp.size -= ret;
- if (d->pkt_temp.size <= )
- d->packet_pending = ;
- } else {
- if (!got_frame) {
- d->packet_pending = ;
- d->finished = d->pkt_serial;
- }
- }
- }
- } while (!got_frame && !d->finished);
- return got_frame;
- }
3.
- /* prepare a new audio buffer */
- static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
- {
- FFPlayer *ffp = opaque;
- VideoState *is = ffp->is;
- int audio_size, len1;
- if (!ffp || !is) {
- memset(stream, , len);
- return;
- }
- ffp->audio_callback_time = av_gettime_relative();
- if (ffp->pf_playback_rate_changed) {
- ffp->pf_playback_rate_changed = ;
- #if defined(__ANDROID__)
- if (!ffp->soundtouch_enable) {
- SDL_AoutSetPlaybackRate(ffp->aout, ffp->pf_playback_rate);
- }
- #else
- SDL_AoutSetPlaybackRate(ffp->aout, ffp->pf_playback_rate);
- #endif
- }
- if (ffp->pf_playback_volume_changed) {
- ffp->pf_playback_volume_changed = ;
- SDL_AoutSetPlaybackVolume(ffp->aout, ffp->pf_playback_volume);
- }
- while (len > ) {
- if (is->audio_buf_index >= is->audio_buf_size) {
- audio_size = audio_decode_frame(ffp);
- ...
- if (audio_size < ) {
- /* if error, just output silence */
- is->audio_buf = NULL;
- is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
- } else {
- if (is->show_mode != SHOW_MODE_VIDEO)
- update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
- is->audio_buf_size = audio_size;
- }
- is->audio_buf_index = ;
- }
- if (is->auddec.pkt_serial != is->audioq.serial) {
- is->audio_buf_index = is->audio_buf_size;
- memset(stream, , len);
- // stream += len;
- // len = 0;
- SDL_AoutFlushAudio(ffp->aout);
- break;
- }
- len1 = is->audio_buf_size - is->audio_buf_index;
- if (len1 > len)
- len1 = len;
- if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
- memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
- else {
- memset(stream, , len1);
- if (!is->muted && is->audio_buf)
- SDL_MixAudio(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1, is->audio_volume);
- }
- len -= len1;
- stream += len1;
- is->audio_buf_index += len1;
- }
- is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
- /* Let's assume the audio driver that is used by SDL has two periods. */
- if (!isnan(is->audio_clock)) {
- set_clock_at(&is->audclk, is->audio_clock - (double)(is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec - SDL_AoutGetLatencySeconds(ffp->aout), is->audio_clock_serial, ffp->audio_callback_time / 1000000.0);
- sync_clock_to_slave(&is->extclk, &is->audclk);
- }
- }
- static int audio_decode_frame(FFPlayer *ffp)
- {
- VideoState *is = ffp->is;
- int data_size, resampled_data_size;
- int64_t dec_channel_layout;
- av_unused double audio_clock0;
- int wanted_nb_samples;
- Frame *af;
- int translate_time = ;
- if (is->paused || is->step)
- return -;
- if (ffp->sync_av_start && /* sync enabled */
- is->video_st && /* has video stream */
- !is->viddec.first_frame_decoded && /* not hot */
- is->viddec.finished != is->videoq.serial) { /* not finished */
- /* waiting for first video frame */
- Uint64 now = SDL_GetTickHR();
- if (now < is->viddec.first_frame_decoded_time ||
- now > is->viddec.first_frame_decoded_time + ) {
- is->viddec.first_frame_decoded = ;
- } else {
- /* video pipeline is not ready yet */
- return -;
- }
- }
- reload:
- do {
- #if defined(_WIN32) || defined(__APPLE__)
- while (frame_queue_nb_remaining(&is->sampq) == ) {
- if ((av_gettime_relative() - ffp->audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / )
- return -;
- av_usleep ();
- }
- #endif
- if (!(af = frame_queue_peek_readable(&is->sampq)))
- return -;
- frame_queue_next(&is->sampq);
- } while (af->serial != is->audioq.serial);
- ...
- if (is->swr_ctx) {
- const uint8_t **in = (const uint8_t **)af->frame->extended_data;
- uint8_t **out = &is->audio_buf1;
- int out_count = (int)((int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + );
- int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, );
- int len2;
- if (out_size < ) {
- av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
- return -;
- }
- if (wanted_nb_samples != af->frame->nb_samples) {
- if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
- wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < ) {
- av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
- return -;
- }
- }
- av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
- if (!is->audio_buf1)
- return AVERROR(ENOMEM);
- len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
- if (len2 < ) {
- av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
- return -;
- }
- if (len2 == out_count) {
- av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
- if (swr_init(is->swr_ctx) < )
- swr_free(&is->swr_ctx);
- }
- is->audio_buf = is->audio_buf1;
- int bytes_per_sample = av_get_bytes_per_sample(is->audio_tgt.fmt);
- resampled_data_size = len2 * is->audio_tgt.channels * bytes_per_sample;
- #if defined(__ANDROID__)
- if (ffp->soundtouch_enable && ffp->pf_playback_rate != 1.0f && !is->abort_request) {
- av_fast_malloc(&is->audio_new_buf, &is->audio_new_buf_size, out_size * translate_time);
- for (int i = ; i < (resampled_data_size / ); i++)
- {
- is->audio_new_buf[i] = (is->audio_buf1[i * ] | (is->audio_buf1[i * + ] << ));
- }
- int ret_len = ijk_soundtouch_translate(is->handle, is->audio_new_buf, (float)(ffp->pf_playback_rate), (float)(1.0f/ffp->pf_playback_rate),
- resampled_data_size / , bytes_per_sample, is->audio_tgt.channels, af->frame->sample_rate);
- if (ret_len > ) {
- is->audio_buf = (uint8_t*)is->audio_new_buf;
- resampled_data_size = ret_len;
- } else {
- translate_time++;
- goto reload;
- }
- }
- #endif
- } else {
- is->audio_buf = af->frame->data[];
- resampled_data_size = data_size;
- }
第11月第8天 ffmpeg ffplay的更多相关文章
- 音频相关 ALSA ffmpeg ffplay 命令用法 g7xx
采样率: samples 441100 每秒 DAC/ADC 采样的频率,声卡一般还支持 48k 8k 等模式. 通道:channels 2声道 左右声道 也有单声道的声音,5.1 声道 位数: 16 ...
- psp进度(11月25号-31号)
本周psp进度 11月25号 内容 开始时间 结束时间 打断时间 净时间 处理数据集 9:27 11:34 12m 115m 11月27号 内容 开始时间 结束时间 打断时间 净时间 scr ...
- 本周psp(11月17-23)
本周psp进度 11月19号 内容 开始时间 结束时间 打断时间 净时间 发布读书笔记 11:05 11:25 0 20m 看构建之法书 9:50 10:48 5m 53m 11月20号 内容 开始时 ...
- Autodesk 2013开发者日(DevDays)又要来了 -- 北京(2013年11月7日)和上海(2013年11月11日)
各位, 一年一度的Autodesk 开发者日(Devdays)开始注册了,抓紧时间前排占座! 注册地址: www.autodesk.com.cn/adndevday2013 今年开发者日的主题:革命性 ...
- GTAC 2015将于11月10号和11号召开
今年的GTAC注册已经结束,将会在11月10号和11号在Google马萨诸塞州剑桥办公室召开.大家可以关注https://developers.google.com/google-test-autom ...
- 11月30日《奥威Power-BI智能分析报表制作方法》腾讯课堂开课啦
这么快一周就过去了,奥威公开课又要与大家见面咯,上节课老师教的三种报表集成方法你们都掌握了吗?大家都知道,学习的结果在于实际应用,想要熟练掌握新内容的要点就在于去应用它.正是基于这一要点,每一期的课程 ...
- 补psp进度(11月4号-9号)
这周psp进度 11月4号 内容 开始时间 结束时间 打断时间 净时间 小伙伴聊天实现 9:45 10:49 0 64m 学习HttpURLConnection 14:13 15:48 10m 85m ...
- MySQL_杭州11月销售昨日未上架的SKU_20161212
#C034杭州11月销售昨日未上架的SKU SELECT 城市,a.订单日期,a.客户数,a.订单数,b.产品数,a.金额,c.销售确认额,c.毛利额,c.毛利率 FROM ( SELECT 城市,订 ...
- 11月23日《奥威Power-BI报表集成到其他系统》腾讯课堂开课啦
听说明天全国各地区都要冷到爆了,要是天气冷到可以放假就好了.想象一下大冷天的一定要在被窝里度过才对嘛,索性明天晚上来个相约吧,相约在被窝里看奥威Power-BI公开课如何? 上周奥威公开 ...
随机推荐
- 猎豹CEO傅盛:95%的人碌碌无为,只是不懂这一点!
我一直在思索,怎么才能让一家公司更快地成长?一个人怎么才能从一群人的竞争当中脱颖而出? 1. 人的四种认知状态 最近我看了一幅图,我在其上加了一个数字注脚. 这是一个人认知的四种状态——“不知道自 ...
- Alpha 冲刺二
团队成员 051601135 岳冠宇 051604103 陈思孝 031602629 刘意晗 031602248 郑智文 031602234 王淇 会议照片 项目燃尽图 项目进展 暂无进展, 项目描述 ...
- 获取移动端 touchend 事件中真正触摸点下方的元素
移动端的touchstart, touchmove, touchend三个事件,拖动元素结束时,获取到了touchend事件, 但是event.touches[0].target所指向的元素却是tou ...
- 安装spring-tool-suite插件
spring-tool-suite是一个非常好用的spring插件,由于eclipse是一个很简洁的IDE,因此许多插件,需要我们自己去手动下载.而Spring-tool-suite插件即是其中之一. ...
- 安装 oracle
先下载3个东西:链接忘记了,大家自己找一下 1 ORA+11+G+R2+server+64bit+for+windows.iso (oracle 安装文件) 2 PLSql 3 oracle6 ...
- Spring中ClassPathXmlApplication与FileSystemXmlApplicationContext的区别以及ClassPathXmlApplicationContext 的具体路径
一.ClassPathXmlApplicationContext 的具体路径 String s[] = System.getProperty("java.class.path"). ...
- MT【203】连续型的最值
(北大自招)已知$-6\le x_i\le 10 (i=1,2,\cdots,10),\sum\limits_{i=1}^{10}x_i=50,$当$\sum\limits_{i=1}^{10}x^2 ...
- 洛谷P4180 [BJWC2010]次小生成树(最小生成树,LCT,主席树,倍增LCA,倍增,树链剖分)
洛谷题目传送门 %%%TPLY巨佬和ysner巨佬%%% 他们的题解 思路分析 具体思路都在各位巨佬的题解中.这题做法挺多的,我就不对每个都详细讲了,泛泛而谈吧. 大多数算法都要用kruskal把最小 ...
- 网络传输---HttpURLConnection
HttpURLConnection是java做网络传输的一种,一般用于做数据的传输如xml数据传输 1.创建及配置: 1.1创建一个url对象,并指定url的地址 URL url = new URL( ...
- 【codevs1245】最小的 N 个和
题目大意:给定两个有 N 个数字的序列,从这两个序列中任取一个数相加,共有 \(N^2\) 个和,求这些和中最小的 N 个. 题解:由于数据量是 10W,必须减少每次选取的决策集合中元素的个数.可以发 ...