参考代码

TVPlayCount.java

  1. package com.dajiangtai.hadoop.tvplay;
  2.  
  3. import java.io.IOException;
  4.  
  5. import org.apache.hadoop.conf.Configuration;
  6. import org.apache.hadoop.conf.Configured;
  7. import org.apache.hadoop.fs.FileSystem;
  8. import org.apache.hadoop.fs.Path;
  9. import org.apache.hadoop.io.Text;
  10. import org.apache.hadoop.mapreduce.Job;
  11. import org.apache.hadoop.mapreduce.Mapper;
  12. import org.apache.hadoop.mapreduce.Reducer;
  13. import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
  14. import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
  15. import org.apache.hadoop.mapreduce.lib.output.MultipleOutputs;
  16. import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
  17. import org.apache.hadoop.util.Tool;
  18. import org.apache.hadoop.util.ToolRunner;
  19.  
  20. import com.sun.org.apache.bcel.internal.generic.NEW;
  21.  
  22. public class TVPlayCount extends Configured implements Tool{
  23.  
  24. public static class TVPlayMapper extends Mapper<Text, TVPlayData, Text, TVPlayData>{
  25. @Override
  26. protected void map(Text key, TVPlayData value,Context context)
  27. throws IOException, InterruptedException
  28. {
  29. context.write(key, value);
  30. }
  31. }
  32.  
  33. public static class TVPlayReducer extends Reducer<Text, TVPlayData, Text, Text>
  34. {
  35. private Text m_key=new Text();
  36. private Text m_value = new Text();
  37. private MultipleOutputs<Text, Text> mos;
  38.  
  39. //将多路输出打开
  40. protected void setup(Context context) throws IOException,InterruptedException
  41. {
  42. mos = new MultipleOutputs<Text, Text>(context);
  43. }
  44.  
  45. protected void reduce (Text Key,Iterable<TVPlayData> Values, Context context)
  46. throws IOException, InterruptedException{
  47. int daynumber = ;
  48. int collectnumber = ;
  49. int commentnumber = ;
  50. int againstnumber = ;
  51. int supportnumber = ;
  52.  
  53. for (TVPlayData tv : Values){
  54. daynumber+=tv.getDaynumber();
  55. collectnumber+=tv.getCollectnumber();
  56. commentnumber += tv.getCommentnumber();
  57. againstnumber += tv.getAgainstnumber();
  58. supportnumber += tv.getSupportnumber();
  59. }
  60.  
  61. String[] records=Key.toString().split("\t");
  62.  
  63. // 1优酷 2搜狐 3 土豆 4爱奇艺 5迅雷看看
  64. String source =records[]; // 媒体类别
  65. m_key.set(records[]);
  66. m_value.set(daynumber+"\t"+collectnumber+"\t" +commentnumber+"\t"+againstnumber+"\t"+supportnumber);
  67. if(source.equals("")){
  68. mos.write("youku", m_key, m_value);
  69. }else if (source.equals("")) {
  70. mos.write("souhu", m_key, m_value);
  71. } else if (source.equals("")) {
  72. mos.write("tudou", m_key, m_value);
  73. } else if (source.equals("")) {
  74. mos.write("aiqiyi", m_key, m_value);
  75. } else if (source.equals("")) {
  76. mos.write("xunlei", m_key, m_value);
  77. }
  78. }
  79.  
  80. //关闭 MultipleOutputs,也就是关闭 RecordWriter,并且是一堆 RecordWriter,因为这里会有很多 reduce 被调用。
  81. protected void cleanup( Context context) throws IOException,InterruptedException {
  82. mos.close();
  83. }
  84. }
  85.  
  86. @Override
  87. public int run(String[] args) throws Exception {
  88. Configuration conf = new Configuration(); // 配置文件对象
  89. Path mypath = new Path(args[]);
  90. FileSystem hdfs = mypath.getFileSystem(conf);// 创建输出路径
  91. if (hdfs.isDirectory(mypath)) {
  92. hdfs.delete(mypath, true);
  93. }
  94.  
  95. Job job = new Job(conf, "tvplay");// 构造任务
  96. job.setJarByClass(TVPlayCount.class);// 设置主类
  97.  
  98. job.setMapperClass(TVPlayMapper.class);// 设置Mapper
  99. job.setMapOutputKeyClass(Text.class);// key输出类型
  100. job.setMapOutputValueClass(TVPlayData.class);// value输出类型
  101. job.setInputFormatClass(TVPlayInputFormat.class);//自定义输入格式
  102.  
  103. job.setReducerClass(TVPlayReducer.class);// 设置Reducer
  104. job.setOutputKeyClass(Text.class);// reduce key类型
  105. job.setOutputValueClass(Text.class);// reduce value类型
  106. // 自定义文件输出格式,通过路径名(pathname)来指定输出路径
  107. MultipleOutputs.addNamedOutput(job, "youku", TextOutputFormat.class,
  108. Text.class, Text.class);
  109. MultipleOutputs.addNamedOutput(job, "souhu", TextOutputFormat.class,
  110. Text.class, Text.class);
  111. MultipleOutputs.addNamedOutput(job, "tudou", TextOutputFormat.class,
  112. Text.class, Text.class);
  113. MultipleOutputs.addNamedOutput(job, "aiqiyi", TextOutputFormat.class,
  114. Text.class, Text.class);
  115. MultipleOutputs.addNamedOutput(job, "xunlei", TextOutputFormat.class,
  116. Text.class, Text.class);
  117.  
  118. FileInputFormat.addInputPath(job, new Path(args[]));// 输入路径
  119. FileOutputFormat.setOutputPath(job, new Path(args[]));// 输出路径
  120. job.waitForCompletion(true);
  121. return ;
  122. }
  123.  
  124. public static void main(String[] args) throws Exception{
  125. String[] args0={"hdfs://master:9000/tvplay/",
  126. "hdfs://master:9000/tvplay/out"};
  127. int ec = ToolRunner.run(new Configuration(), new TVPlayCount(), args0);
  128. System.exit(ec);
  129. }
  130. }

TVPlayData.java

  1. package com.dajiangtai.hadoop.tvplay;
  2.  
  3. import java.io.DataInput;
  4. import java.io.DataOutput;
  5. import java.io.IOException;
  6.  
  7. import org.apache.hadoop.io.WritableComparable;
  8. /**
  9. *
  10. * @author yangjun
  11. * @function 自定义对象
  12. */
  13. public class TVPlayData implements WritableComparable<Object>{
  14. private int daynumber;
  15. private int collectnumber;
  16. private int commentnumber;
  17. private int againstnumber;
  18. private int supportnumber;
  19. public TVPlayData(){}
  20. public void set(int daynumber,int collectnumber,int commentnumber,int againstnumber,int supportnumber){
  21. this.daynumber = daynumber;
  22. this.collectnumber = collectnumber;
  23. this.commentnumber = commentnumber;
  24. this.againstnumber = againstnumber;
  25. this.supportnumber = supportnumber;
  26. }
  27. public int getDaynumber() {
  28. return daynumber;
  29. }
  30. public void setDaynumber(int daynumber) {
  31. this.daynumber = daynumber;
  32. }
  33. public int getCollectnumber() {
  34. return collectnumber;
  35. }
  36. public void setCollectnumber(int collectnumber) {
  37. this.collectnumber = collectnumber;
  38. }
  39. public int getCommentnumber() {
  40. return commentnumber;
  41. }
  42. public void setCommentnumber(int commentnumber) {
  43. this.commentnumber = commentnumber;
  44. }
  45. public int getAgainstnumber() {
  46. return againstnumber;
  47. }
  48. public void setAgainstnumber(int againstnumber) {
  49. this.againstnumber = againstnumber;
  50. }
  51. public int getSupportnumber() {
  52. return supportnumber;
  53. }
  54. public void setSupportnumber(int supportnumber) {
  55. this.supportnumber = supportnumber;
  56. }
  57. @Override
  58. public void readFields(DataInput in) throws IOException {
  59. daynumber = in.readInt();
  60. collectnumber = in.readInt();
  61. commentnumber = in.readInt();
  62. againstnumber = in.readInt();
  63. supportnumber = in.readInt();
  64. }
  65. @Override
  66. public void write(DataOutput out) throws IOException {
  67. out.writeInt(daynumber);
  68. out.writeInt(collectnumber);
  69. out.writeInt(commentnumber);
  70. out.writeInt(againstnumber);
  71. out.writeInt(supportnumber);
  72. }
  73. @Override
  74. public int compareTo(Object o) {
  75. return ;
  76. };
  77. }

TVPlayInputFormat.java

  1. package com.dajiangtai.hadoop.tvplay;
  2.  
  3. import java.io.IOException;
  4.  
  5. import org.apache.hadoop.conf.Configuration;
  6. import org.apache.hadoop.fs.FSDataInputStream;
  7. import org.apache.hadoop.fs.FileSystem;
  8. import org.apache.hadoop.fs.Path;
  9. import org.apache.hadoop.io.Text;
  10. import org.apache.hadoop.mapreduce.InputSplit;
  11. import org.apache.hadoop.mapreduce.RecordReader;
  12. import org.apache.hadoop.mapreduce.TaskAttemptContext;
  13. import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
  14. import org.apache.hadoop.mapreduce.lib.input.FileSplit;
  15. import org.apache.hadoop.util.LineReader;
  16. /**
  17. *
  18. * @author yangjun
  19. * @function key vlaue 输入格式
  20. */
  21. public class TVPlayInputFormat extends FileInputFormat<Text,TVPlayData>{
  22.  
  23. @Override
  24. public RecordReader<Text, TVPlayData> createRecordReader(InputSplit input,
  25. TaskAttemptContext context) throws IOException, InterruptedException {
  26. return new TVPlayRecordReader();
  27. }
  28.  
  29. public class TVPlayRecordReader extends RecordReader<Text, TVPlayData>{
  30. public LineReader in;
  31. public Text lineKey;
  32. public TVPlayData lineValue;
  33. public Text line;
  34. @Override
  35. public void close() throws IOException {
  36. if(in !=null){
  37. in.close();
  38. }
  39. }
  40.  
  41. @Override
  42. public Text getCurrentKey() throws IOException, InterruptedException {
  43. return lineKey;
  44. }
  45.  
  46. @Override
  47. public TVPlayData getCurrentValue() throws IOException, InterruptedException {
  48. return lineValue;
  49. }
  50.  
  51. @Override
  52. public float getProgress() throws IOException, InterruptedException {
  53. return ;
  54. }
  55.  
  56. @Override
  57. public void initialize(InputSplit input, TaskAttemptContext context)
  58. throws IOException, InterruptedException {
  59. FileSplit split=(FileSplit)input;
  60. Configuration job=context.getConfiguration();
  61. Path file=split.getPath();
  62. FileSystem fs=file.getFileSystem(job);
  63.  
  64. FSDataInputStream filein=fs.open(file);
  65. in=new LineReader(filein,job);
  66. line=new Text();
  67. lineKey=new Text();
  68. lineValue = new TVPlayData();
  69. }
  70.  
  71. @Override
  72. public boolean nextKeyValue() throws IOException, InterruptedException {
  73. int linesize=in.readLine(line);
  74. if(linesize==) return false;
  75. String[] pieces = line.toString().split("\t");
  76. if(pieces.length != ){
  77. throw new IOException("Invalid record received");
  78. }
  79. lineKey.set(pieces[]+"\t"+pieces[]);
  80. lineValue.set(Integer.parseInt(pieces[]),Integer.parseInt(pieces[]),Integer.parseInt(pieces[])
  81. ,Integer.parseInt(pieces[]),Integer.parseInt(pieces[]));
  82. return true;
  83. }
  84. }
  85. }

先启动3节点集群

与自己在本地搭建的3节点集群的hdfs连接上

在终端显示的运行结果,程序没有错误

  1. -- ::, INFO [org.apache.hadoop.conf.Configuration.deprecation] - session.id is deprecated. Instead, use dfs.metrics.session-id
  2. -- ::, INFO [org.apache.hadoop.metrics.jvm.JvmMetrics] - Initializing JVM Metrics with processName=JobTracker, sessionId=
  3. -- ::, WARN [org.apache.hadoop.mapreduce.JobSubmitter] - Hadoop command-line option parsing not performed. Implement the Tool interface and execute your application with ToolRunner to remedy this.
  4. -- ::, WARN [org.apache.hadoop.mapreduce.JobSubmitter] - No job jar file set. User classes may not be found. See Job or Job#setJar(String).
  5. -- ::, INFO [org.apache.hadoop.mapreduce.lib.input.FileInputFormat] - Total input paths to process :
  6. -- ::, INFO [org.apache.hadoop.mapreduce.JobSubmitter] - number of splits:
  7. -- ::, INFO [org.apache.hadoop.conf.Configuration.deprecation] - user.name is deprecated. Instead, use mapreduce.job.user.name
  8. -- ::, INFO [org.apache.hadoop.conf.Configuration.deprecation] - mapred.output.value.class is deprecated. Instead, use mapreduce.job.output.value.class
  9. -- ::, INFO [org.apache.hadoop.conf.Configuration.deprecation] - mapred.mapoutput.value.class is deprecated. Instead, use mapreduce.map.output.value.class
  10. -- ::, INFO [org.apache.hadoop.conf.Configuration.deprecation] - mapreduce.map.class is deprecated. Instead, use mapreduce.job.map.class
  11. -- ::, INFO [org.apache.hadoop.conf.Configuration.deprecation] - mapred.job.name is deprecated. Instead, use mapreduce.job.name
  12. -- ::, INFO [org.apache.hadoop.conf.Configuration.deprecation] - mapreduce.reduce.class is deprecated. Instead, use mapreduce.job.reduce.class
  13. -- ::, INFO [org.apache.hadoop.conf.Configuration.deprecation] - mapreduce.inputformat.class is deprecated. Instead, use mapreduce.job.inputformat.class
  14. -- ::, INFO [org.apache.hadoop.conf.Configuration.deprecation] - mapred.input.dir is deprecated. Instead, use mapreduce.input.fileinputformat.inputdir
  15. -- ::, INFO [org.apache.hadoop.conf.Configuration.deprecation] - mapred.output.dir is deprecated. Instead, use mapreduce.output.fileoutputformat.outputdir
  16. -- ::, INFO [org.apache.hadoop.conf.Configuration.deprecation] - mapred.map.tasks is deprecated. Instead, use mapreduce.job.maps
  17. -- ::, INFO [org.apache.hadoop.conf.Configuration.deprecation] - mapred.output.key.class is deprecated. Instead, use mapreduce.job.output.key.class
  18. -- ::, INFO [org.apache.hadoop.conf.Configuration.deprecation] - mapred.mapoutput.key.class is deprecated. Instead, use mapreduce.map.output.key.class
  19. -- ::, INFO [org.apache.hadoop.conf.Configuration.deprecation] - mapred.working.dir is deprecated. Instead, use mapreduce.job.working.dir
  20. -- ::, INFO [org.apache.hadoop.mapreduce.JobSubmitter] - Submitting tokens for job: job_local300699497_0001
  21. -- ::, WARN [org.apache.hadoop.conf.Configuration] - file:/tmp/hadoop-Administrator/mapred/staging/Administrator300699497/.staging/job_local300699497_0001/job.xml:an attempt to override final parameter: mapreduce.job.end-notification.max.retry.interval; Ignoring.
  22. -- ::, WARN [org.apache.hadoop.conf.Configuration] - file:/tmp/hadoop-Administrator/mapred/staging/Administrator300699497/.staging/job_local300699497_0001/job.xml:an attempt to override final parameter: mapreduce.job.end-notification.max.attempts; Ignoring.
  23. -- ::, WARN [org.apache.hadoop.conf.Configuration] - file:/tmp/hadoop-Administrator/mapred/local/localRunner/Administrator/job_local300699497_0001/job_local300699497_0001.xml:an attempt to override final parameter: mapreduce.job.end-notification.max.retry.interval; Ignoring.
  24. -- ::, WARN [org.apache.hadoop.conf.Configuration] - file:/tmp/hadoop-Administrator/mapred/local/localRunner/Administrator/job_local300699497_0001/job_local300699497_0001.xml:an attempt to override final parameter: mapreduce.job.end-notification.max.attempts; Ignoring.
  25. -- ::, INFO [org.apache.hadoop.mapreduce.Job] - The url to track the job: http://localhost:8080/
  26. -- ::, INFO [org.apache.hadoop.mapreduce.Job] - Running job: job_local300699497_0001
  27. -- ::, INFO [org.apache.hadoop.mapred.LocalJobRunner] - OutputCommitter set in config null
  28. -- ::, INFO [org.apache.hadoop.mapred.LocalJobRunner] - OutputCommitter is org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter
  29. -- ::, INFO [org.apache.hadoop.mapred.LocalJobRunner] - Waiting for map tasks
  30. -- ::, INFO [org.apache.hadoop.mapred.LocalJobRunner] - Starting task: attempt_local300699497_0001_m_000000_0
  31. -- ::, INFO [org.apache.hadoop.yarn.util.ProcfsBasedProcessTree] - ProcfsBasedProcessTree currently is supported only on Linux.
  32. -- ::, INFO [org.apache.hadoop.mapred.Task] - Using ResourceCalculatorProcessTree : org.apache.hadoop.yarn.util.WindowsBasedProcessTree@1b9156ad
  33. -- ::, INFO [org.apache.hadoop.mapred.MapTask] - Processing split: hdfs://master:9000/tvplay/tvplay.txt:0+10833923
  34. -- ::, INFO [org.apache.hadoop.mapreduce.Job] - Job job_local300699497_0001 running in uber mode : false
  35. -- ::, INFO [org.apache.hadoop.mapreduce.Job] - map % reduce %
  36. -- ::, INFO [org.apache.hadoop.mapred.MapTask] - Map output collector class = org.apache.hadoop.mapred.MapTask$MapOutputBuffer
  37. -- ::, INFO [org.apache.hadoop.mapred.MapTask] - (EQUATOR) kvi ()
  38. -- ::, INFO [org.apache.hadoop.mapred.MapTask] - mapreduce.task.io.sort.mb:
  39. -- ::, INFO [org.apache.hadoop.mapred.MapTask] - soft limit at
  40. -- ::, INFO [org.apache.hadoop.mapred.MapTask] - bufstart = ; bufvoid =
  41. -- ::, INFO [org.apache.hadoop.mapred.MapTask] - kvstart = ; length =
  42. -- ::, INFO [org.apache.hadoop.mapred.LocalJobRunner] -
  43. -- ::, INFO [org.apache.hadoop.mapred.MapTask] - Starting flush of map output
  44. -- ::, INFO [org.apache.hadoop.mapred.MapTask] - Spilling map output
  45. -- ::, INFO [org.apache.hadoop.mapred.MapTask] - bufstart = ; bufend = ; bufvoid =
  46. -- ::, INFO [org.apache.hadoop.mapred.MapTask] - kvstart = (); kvend = (); length = /
  47. -- ::, INFO [org.apache.hadoop.mapred.MapTask] - Finished spill
  48. -- ::, INFO [org.apache.hadoop.mapred.Task] - Task:attempt_local300699497_0001_m_000000_0 is done. And is in the process of committing
  49. -- ::, INFO [org.apache.hadoop.mapred.LocalJobRunner] - map
  50. -- ::, INFO [org.apache.hadoop.mapred.Task] - Task 'attempt_local300699497_0001_m_000000_0' done.
  51. -- ::, INFO [org.apache.hadoop.mapred.LocalJobRunner] - Finishing task: attempt_local300699497_0001_m_000000_0
  52. -- ::, INFO [org.apache.hadoop.mapred.LocalJobRunner] - Map task executor complete.
  53. -- ::, INFO [org.apache.hadoop.yarn.util.ProcfsBasedProcessTree] - ProcfsBasedProcessTree currently is supported only on Linux.
  54. -- ::, INFO [org.apache.hadoop.mapred.Task] - Using ResourceCalculatorProcessTree : org.apache.hadoop.yarn.util.WindowsBasedProcessTree@fba110e
  55. -- ::, INFO [org.apache.hadoop.mapred.Merger] - Merging sorted segments
  56. -- ::, INFO [org.apache.hadoop.mapred.Merger] - Down to the last merge-pass, with segments left of total size: bytes
  57. -- ::, INFO [org.apache.hadoop.mapred.LocalJobRunner] -
  58. -- ::, INFO [org.apache.hadoop.conf.Configuration.deprecation] - mapred.skip.on is deprecated. Instead, use mapreduce.job.skiprecords
  59. -- ::, INFO [org.apache.hadoop.mapreduce.Job] - map % reduce %
  60. -- ::, INFO [org.apache.hadoop.mapred.Task] - Task:attempt_local300699497_0001_r_000000_0 is done. And is in the process of committing
  61. -- ::, INFO [org.apache.hadoop.mapred.LocalJobRunner] -
  62. -- ::, INFO [org.apache.hadoop.mapred.Task] - Task attempt_local300699497_0001_r_000000_0 is allowed to commit now
  63. -- ::, INFO [org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter] - Saved output of task 'attempt_local300699497_0001_r_000000_0' to hdfs://master:9000/tvplay/out/_temporary/0/task_local300699497_0001_r_000000
  64. -- ::, INFO [org.apache.hadoop.mapred.LocalJobRunner] - reduce > reduce
  65. -- ::, INFO [org.apache.hadoop.mapred.Task] - Task 'attempt_local300699497_0001_r_000000_0' done.
  66. -- ::, INFO [org.apache.hadoop.mapreduce.Job] - map % reduce %
  67. -- ::, INFO [org.apache.hadoop.mapreduce.Job] - Job job_local300699497_0001 completed successfully
  68. -- ::, INFO [org.apache.hadoop.mapreduce.Job] - Counters:
  69. File System Counters
  70. FILE: Number of bytes read=
  71. FILE: Number of bytes written=
  72. FILE: Number of read operations=
  73. FILE: Number of large read operations=
  74. FILE: Number of write operations=
  75. HDFS: Number of bytes read=
  76. HDFS: Number of bytes written=
  77. HDFS: Number of read operations=
  78. HDFS: Number of large read operations=
  79. HDFS: Number of write operations=
  80. Map-Reduce Framework
  81. Map input records=
  82. Map output records=
  83. Map output bytes=
  84. Map output materialized bytes=
  85. Input split bytes=
  86. Combine input records=
  87. Combine output records=
  88. Reduce input groups=
  89. Reduce shuffle bytes=
  90. Reduce input records=
  91. Reduce output records=
  92. Spilled Records=
  93. Shuffled Maps =
  94. Failed Shuffles=
  95. Merged Map outputs=
  96. GC time elapsed (ms)=
  97. CPU time spent (ms)=
  98. Physical memory (bytes) snapshot=
  99. Virtual memory (bytes) snapshot=
  100. Total committed heap usage (bytes)=
  101. File Input Format Counters
  102. Bytes Read=
  103. File Output Format Counters
  104. Bytes Written=

查看hdfs上的输出结果

mapreduce深入剖析5大视频的更多相关文章

  1. 如何使用油猴脚本不要vip就能观看各大视频网站如腾讯,爱奇艺等的vip视频

    如何使用油猴脚本不要vip就能观看各大视频网站如腾讯,爱奇艺等的vip视频 首先打开谷歌商店(这里需要fq,如不能fq的小伙伴请看上面写的Chrome怎么访问外网) 搜索Tampermonkey,点击 ...

  2. 关于大视频video播放的问题以及解决方案(m3u8的播放)

    在HTML5里,提供了<video>标签,可以直接播放视频,video的使用很简单: <video width="320" height="240&qu ...

  3. 详细剖析pyecharts大屏的Page函数配置文件:chart_config.json

    目录 一.问题背景 二.揭开json文件神秘面纱 三.巧用json文件 四.关于Table图表 五.同步讲解视频 5.1 讲解json的视频 5.2 讲解全流程大屏的视频 5.3 讲解全流程大屏的文章 ...

  4. 百度编辑器上传大视频报http请求错误怎么办

    百度编辑器UEditor是由百度web前端研发部开发所见即所得富文本web编辑器,具有轻量,可定制,注重用户体验等特点,开源基于MIT协议,允许自由使用和修改代码,所以受到很多开放人员的青睐.但是有时 ...

  5. 深度剖析 | 基于大数据架构的BI应用

    说起互联网.电商的数据分析,更多的是谈应用案例,如何去实践数据化管理运营.而这里,我们要从技术角度分享关于数据的技术架构干货,如何应用BI. 原文是云猴网BI总经理王卫东在帆软大数据上的演讲,以下是整 ...

  6. 简单获取各大视频网站的flash地址

    最近做网站的时候遇到一个需求:给定一个视频地址,获取它的swf地址.例如,给一个优酷的视频地址:http://v.youku.com /v_show/id_XNDg4MzY5ODU2.html,想获取 ...

  7. webUploader上传大视频文件相关web.config配置

    在webuploader上传大文件时必须配置一下,不然请求后台处理程序时,会请求超时.出现404! <system.web> <httpRuntime maxRequestLengt ...

  8. java大视频上传实现

    理清思路: 引入了两个概念:块(block)和片(chunk).每个块由一到多个片组成,而一个资源则由一到多个块组成 块是服务端的永久数据存储单位,片则只在分片上传过程中作为临时存储的单位.服务端会以 ...

  9. php+大视频文件上传+进度条

    该项目核心就是文件分块上传.前后端要高度配合,需要双方约定好一些数据,才能完成大文件分块,我们在项目中要重点解决的以下问题. * 如何分片: * 如何合成一个文件: * 中断了从哪个分片开始. 如何分 ...

随机推荐

  1. C语言面试题1

    1.分析下面代码有什么问题? 1 2 3 4 5 6 void test1() {  char string[10];  char* str1 = "0123456789";  s ...

  2. video conference s/w

    CamFrogWindows | Mac OS | Linux (Server only) | iOS | Android | Windows PhoneCamFrog lets you set up ...

  3. taro 微信小程序原生作用域获取

    在 Taro 的页面和组件类中,this 指向的是 Taro页面或组件实例. 但是一般我们需要获取 Taro的页面和组件 所对应的 小程序原生页面和组件实例,这个时候我们可以通过 this.$scop ...

  4. EnvironmentError: mysql_config not found问题解决(centos7下python安装mysql-python)

    centos7下python安装mysql-python模块,执行命令: pip install mysql-python 出现报错:EnvironmentError: mysql_config no ...

  5. golang gorilla websocket例子

    WebSocket协议是基于TCP的一种新的网络协议.它实现了浏览器与服务器全双工(full-duplex)通信--允许服务器主动发送信息给客户端. WebSocket通信协议于2011年被IETF定 ...

  6. 【转】Linux安装HDF5及遇到的问题总结

    Linux安装HDF5及遇到的问题总结 转自: http://www.linuxdiyf.com/linux/26164.html   ubuntu版本:16.04.2 64位 从HDF官网(http ...

  7. 阿里巴巴Java编码规范,来测测你能得多少分

    今年年初,<阿里巴巴Java开发手册>正式发布,阿里官方Java代码规范标准首次对外公开.手册发布之后在业界.学术界都获得了广泛的关注,仅微博浏览量就突破千万,下载访问量超过40万,多家媒 ...

  8. 基于OLSR的路由协议实现Ad-Hoc组网

    一.软件包的安装 1. olsrd软件包的安装 libpthread_0.9.33.2-1_ar71xx.ipk olsrd_0.6.6.2-4_ar71xx.ipk 2. luci的安装 olsrd ...

  9. js 实现滚动字幕

    <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8&quo ...

  10. Windows消息【一】 消息队列

    看了MSDN后,以下是我个人的理解! 消息能够被分为「队列化消息」和「非队列化消息」. 队列化消息是指当程序发生某事件时,由Windows主动捕获并把消息放入系统消息队列中,而程序在运行时会初始化一个 ...