开发+运行第一个Mahout的程序

代码:

/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ package chen.test.kmeans; import java.util.List;
import java.util.Map; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.ToolRunner;
import org.apache.mahout.clustering.Cluster;
import org.apache.mahout.clustering.canopy.CanopyDriver;
import org.apache.mahout.clustering.conversion.InputDriver;
import org.apache.mahout.clustering.kmeans.KMeansDriver;
import org.apache.mahout.clustering.kmeans.RandomSeedGenerator;
import org.apache.mahout.common.AbstractJob;
import org.apache.mahout.common.ClassUtils;
import org.apache.mahout.common.HadoopUtil;
import org.apache.mahout.common.commandline.DefaultOptionCreator;
import org.apache.mahout.common.distance.DistanceMeasure;
import org.apache.mahout.common.distance.EuclideanDistanceMeasure;
import org.apache.mahout.common.distance.SquaredEuclideanDistanceMeasure;
import org.apache.mahout.utils.clustering.ClusterDumper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory; public final class TwoJob extends AbstractJob { private static final Logger log = LoggerFactory.getLogger(TwoJob.class); private static final String DIRECTORY_CONTAINING_CONVERTED_INPUT = "data"; private TwoJob() {
} public static void main(String[] args) throws Exception {
if (args.length > 0) {
log.info("Running with only user-supplied arguments");
ToolRunner.run(new Configuration(), new TwoJob(), args);
} else {
log.info("Running with default arguments");
Path output = new Path("output");
Configuration conf = new Configuration();
HadoopUtil.delete(conf, output);
run(conf, new Path("testdata"), output, new EuclideanDistanceMeasure(), 2, 0.5, 10);
}
} @Override
public int run(String[] args) throws Exception {
addInputOption();
addOutputOption();
addOption(DefaultOptionCreator.distanceMeasureOption().create());
addOption(DefaultOptionCreator.numClustersOption().create());
addOption(DefaultOptionCreator.t1Option().create());
addOption(DefaultOptionCreator.t2Option().create());
addOption(DefaultOptionCreator.convergenceOption().create());
addOption(DefaultOptionCreator.maxIterationsOption().create());
addOption(DefaultOptionCreator.overwriteOption().create()); Map<String,List<String>> argMap = parseArguments(args);
if (argMap == null) {
return -1;
} Path input = getInputPath();
Path output = getOutputPath();
String measureClass = getOption(DefaultOptionCreator.DISTANCE_MEASURE_OPTION);
if (measureClass == null) {
measureClass = SquaredEuclideanDistanceMeasure.class.getName();
}
double convergenceDelta = Double.parseDouble(getOption(DefaultOptionCreator.CONVERGENCE_DELTA_OPTION));
int maxIterations = Integer.parseInt(getOption(DefaultOptionCreator.MAX_ITERATIONS_OPTION));
if (hasOption(DefaultOptionCreator.OVERWRITE_OPTION)) {
HadoopUtil.delete(getConf(), output);
}
DistanceMeasure measure = ClassUtils.instantiateAs(measureClass, DistanceMeasure.class);
if (hasOption(DefaultOptionCreator.NUM_CLUSTERS_OPTION)) {
int k = Integer.parseInt(getOption(DefaultOptionCreator.NUM_CLUSTERS_OPTION));
run(getConf(), input, output, measure, k, convergenceDelta, maxIterations);
} else {
double t1 = Double.parseDouble(getOption(DefaultOptionCreator.T1_OPTION));
double t2 = Double.parseDouble(getOption(DefaultOptionCreator.T2_OPTION));
run(getConf(), input, output, measure, t1, t2, convergenceDelta, maxIterations);
}
return 0;
} /**
* Run the kmeans clustering job on an input dataset using the given the number of clusters k and iteration
* parameters. All output data will be written to the output directory, which will be initially deleted if it exists.
* The clustered points will reside in the path <output>/clustered-points. By default, the job expects a file
* containing equal length space delimited data that resides in a directory named "testdata", and writes output to a
* directory named "output".
*
* @param conf
* the Configuration to use
* @param input
* the String denoting the input directory path
* @param output
* the String denoting the output directory path
* @param measure
* the DistanceMeasure to use
* @param k
* the number of clusters in Kmeans
* @param convergenceDelta
* the double convergence criteria for iterations
* @param maxIterations
* the int maximum number of iterations
*/
public static void run(Configuration conf, Path input, Path output, DistanceMeasure measure, int k,
double convergenceDelta, int maxIterations) throws Exception {
Path directoryContainingConvertedInput = new Path(output, DIRECTORY_CONTAINING_CONVERTED_INPUT);
log.info("Preparing Input");
InputDriver.runJob(input, directoryContainingConvertedInput, "org.apache.mahout.math.RandomAccessSparseVector");
log.info("Running random seed to get initial clusters");
Path clusters = new Path(output, "random-seeds");
clusters = RandomSeedGenerator.buildRandom(conf, directoryContainingConvertedInput, clusters, k, measure);
log.info("Running KMeans with k = {}", k);
KMeansDriver.run(conf, directoryContainingConvertedInput, clusters, output, convergenceDelta,
maxIterations, true, 0.0, false);
// run ClusterDumper
Path outGlob = new Path(output, "clusters-*-final");
Path clusteredPoints = new Path(output,"clusteredPoints");
log.info("Dumping out clusters from clusters: {} and clusteredPoints: {}", outGlob, clusteredPoints);
ClusterDumper clusterDumper = new ClusterDumper(outGlob, clusteredPoints); //print the result
clusterDumper.printClusters(null); } /**
* Run the kmeans clustering job on an input dataset using the given distance measure, t1, t2 and iteration
* parameters. All output data will be written to the output directory, which will be initially deleted if it exists.
* The clustered points will reside in the path <output>/clustered-points. By default, the job expects the a file
* containing synthetic_control.data as obtained from
* http://archive.ics.uci.edu/ml/datasets/Synthetic+Control+Chart+Time+Series resides in a directory named "testdata",
* and writes output to a directory named "output".
*
* @param conf
* the Configuration to use
* @param input
* the String denoting the input directory path
* @param output
* the String denoting the output directory path
* @param measure
* the DistanceMeasure to use
* @param t1
* the canopy T1 threshold
* @param t2
* the canopy T2 threshold
* @param convergenceDelta
* the double convergence criteria for iterations
* @param maxIterations
* the int maximum number of iterations
*/
public static void run(Configuration conf, Path input, Path output, DistanceMeasure measure, double t1, double t2,
double convergenceDelta, int maxIterations) throws Exception {
Path directoryContainingConvertedInput = new Path(output, DIRECTORY_CONTAINING_CONVERTED_INPUT);
log.info("Preparing Input");
InputDriver.runJob(input, directoryContainingConvertedInput, "org.apache.mahout.math.RandomAccessSparseVector");
log.info("Running Canopy to get initial clusters");
Path canopyOutput = new Path(output, "canopies");
CanopyDriver.run(new Configuration(), directoryContainingConvertedInput, canopyOutput, measure, t1, t2, false, 0.0,
false);
log.info("Running KMeans");
KMeansDriver.run(conf, directoryContainingConvertedInput, new Path(canopyOutput, Cluster.INITIAL_CLUSTERS_DIR
+ "-final"), output, convergenceDelta, maxIterations, true, 0.0, false);
// run ClusterDumper
ClusterDumper clusterDumper = new ClusterDumper(new Path(output, "clusters-*-final"), new Path(output,
"clusteredPoints"));
clusterDumper.printClusters(null);
}
}

上面的代码就是上一篇的example 例子,使用kmeans 实现聚集。

build.xml代码

<project name="mahout_test" default="jar">

   <property name="root.dir" value="." />
<property name="src.dir" value="${root.dir}/src" />
<property name="lib.dir" value="${root.dir}/lib" />
<property name="build.dir" value="${root.dir}/build" /> <target name="clean" depends="">
<echo>root = ${root.dir}</echo>
<delete dir="${build.dir}" /> <mkdir dir="${build.dir}" /> </target> <target name="build" depends="clean">
<javac fork="true" debug="true" srcdir="${src.dir}" destdir="${build.dir}">
<classpath>
<fileset dir="${lib.dir}" includes="*.jar" />
</classpath>
</javac> </target> <target name="jar" depends="build">
<mkdir dir="${build.dir}/lib" />
<!--
<copy file="${lib.dir}/mahout-core-0.9.jar" todir="${build.dir}/lib" />
<copy file="${lib.dir}/mahout-integration-0.9.jar" todir="${build.dir}/lib" />
<copy file="${lib.dir}/hadoop-core-1.2.1.jar" todir="${build.dir}/lib" />
--> <copy file="${lib.dir}/mahout-examples-0.9-job.jar" todir="${build.dir}/lib" />
<!--
<copy file="${lib.dir}/mahout-integration-0.9.jar" todir="${build.dir}/lib" />
-->
<jar destfile="${root.dir}/mahout_test.jar" basedir="${build.dir}" >
<manifest>
<!--
<attribute name="Main-Class" value="chen/test/Job" />
-->
</manifest>
</jar>
</target> </project>

编译命令:

ant -f build.xml

编译后,它会在${root.dir}下生成一个 mahout_test.jar 的文件。

编译程序依赖的jar包:mahout-core-0.9-job.jar、mahout-examples-0.9-job.jar、hadoop-core-1.2.1.jar

其中mahout-core-0.9.jar 包只是使用了org.slf4j.Logger、org.slf4j.LoggerFactory 类

你也可以依赖 hadoop lib 的 slf4j-api-1.4.3.jar 包来替换 mahout-core-0.9-job.jar 包。

制作Mahout 程序的关键在与在生成 jar 包时,要包含mahout-examples-0.9-job.jar 包。否则hadoop jar **.jar 运行是会出错。

<copy file="${lib.dir}/mahout-examples-0.9-job.jar" todir="${build.dir}/lib" />

mahout-examples-0.9-job.jar 包里面的类和 mahout-core-0.9-job.jar 包的类有很多是重叠的,这个实在太坑了。如果同时加载两个jar 包,它就报错,说找不到相应的类。

我被这个问题困扰了很久。

而且编译时,不要指定Main Class ,否则也会出错,原因我也没有细究,知道的同学可以留言。

运行命令:

bin/hadoop jar /mnt/hgfs/mnt/chenfool/mahout_test.jar  chen.test.kmeans.TwoJob

运行的环境和上一篇的要求相似,也需要再 HDFS 的 /user/${user}/testdata 目录下存在向量文件。

学习Mahout(三)的更多相关文章

  1. 学习Mahout (四)

    在Mahout 学习(三)中,我贴了example的代码,里面生成向量文件的代码: InputDriver.runJob(input, directoryContainingConvertedInpu ...

  2. Oracle学习笔记三 SQL命令

    SQL简介 SQL 支持下列类别的命令: 1.数据定义语言(DDL) 2.数据操纵语言(DML) 3.事务控制语言(TCL) 4.数据控制语言(DCL)  

  3. 从零开始学习jQuery (三) 管理jQuery包装集

    本系列文章导航 从零开始学习jQuery (三) 管理jQuery包装集 一.摘要 在使用jQuery选择器获取到jQuery包装集后, 我们需要对其进行操作. 本章首先讲解如何动态的创建元素, 接着 ...

  4. 前端学习 第三弹: JavaScript语言的特性与发展

    前端学习 第三弹: JavaScript语言的特性与发展 javascript的缺点 1.没有命名空间,没有多文件的规范,同名函数相互覆盖 导致js的模块化很差 2.标准库很小 3.null和unde ...

  5. Android Animation学习(三) ApiDemos解析:XML动画文件的使用

    Android Animation学习(三) ApiDemos解析:XML动画文件的使用 可以用XML文件来定义Animation. 文件必须有一个唯一的根节点: <set>, <o ...

  6. 三、Android学习第三天——Activity的布局初步介绍(转)

    (转自:http://wenku.baidu.com/view/af39b3164431b90d6c85c72f.html) 三.Android学习第三天——Activity的布局初步介绍 今天总结下 ...

  7. JavaWeb学习总结(三)——Tomcat服务器学习和使用(二) 包含https 非对称秘钥 NB

    JavaWeb学习总结(三)--Tomcat服务器学习和使用(二) 一.打包JavaWeb应用 在Java中,使用"jar"命令来对将JavaWeb应用打包成一个War包,jar命 ...

  8. MyEclipse Spring 学习总结三 SpringMVC

    MyEclipse Spring 学习总结三 SpringMVC 一.SpringMVC原理 1.Springmvc 框架介绍 1)Spring 框架停工了构建Web应用程序的全功能MVC模块.Spr ...

  9. Quartz定时任务学习(二)web应用/Quartz定时任务学习(三)属性文件和jar

    web中使用Quartz 1.首先在web.xml文件中加入 如下内容(根据自己情况设定) 在web.xml中添加QuartzInitializerServlet,Quartz为能够在web应用中使用 ...

随机推荐

  1. EasyDarwin EasyClient开源流媒体播放器,支持多窗口显示

    EasyDarwin开源团队开源的EasyClient客户端将支持流媒体采集.编码.推送.播放.抓图.录像.Onvif 等全套功能(大家持续关注我们Github的commit),其中播放功能是开源流媒 ...

  2. linux修改进程的名字

    1 修改linux进程名字的基本原理 linux进程以argv[0]作为进程的名字,因此只需要修改argv[0]处的字符串就修改了linux进程的名字. 2 直接修改argv[0]会导致的问题 如果名 ...

  3. mybatis入门小结(六)

    入门小结---查询 1.1.1.1.1 #{}和${} #{}表示一个占位符号,通过#{}可以实现preparedStatement向占位符中设置值,自动进行java类型和jdbc类型转换,#{}可以 ...

  4. 阿里Java开发手册学习 3 MYSQL规约

    @import url(http://i.cnblogs.com/Load.ashx?type=style&file=SyntaxHighlighter.css);@import url(/c ...

  5. ABAP调用新任务代码

    *&调用函数‘ZMLTOTAL_CHECK’启用新任务'jx'执行'ZMLSCP1_FR0003'; IF zmlcbwlcgdd_ok[] is not INITIAL. CALL FUNC ...

  6. TortoiseSVN使用笔记

    TortoiseSVN版本冲突详解   下列步骤展示了如何将分支A中的修改合并到分支B. 1.在分支B的本地副本目录中选择"合并(Merge)". 2.选择“合并一个版本范围(Me ...

  7. LwIP移植uCos+stm32f407

    LwIP同操作系统一起工作的时候模型如下: 1.TCP/IP协议栈和应用程序以分离的任务运行 2.应用同协议栈沟通是通过API函数调用(API函数调用事实上就是通过OS自带的进程间通信机制,由应用程序 ...

  8. Appium java 环境配置

    一.安装node.js 下载地址:http://pan.baidu.com/s/1qYyNDm8 点击安装,next下一步就ok. 安装完成,命令行输入:npm 这样显示的话就ok了.  二.下载Ap ...

  9. codeforces C. Cows and Sequence 解题报告

    题目链接:http://codeforces.com/problemset/problem/284/C 题目意思:给出3种操作:t = 1:在前 a 个数中每个数都加上x: t= 2:在数组末尾增加一 ...

  10. SqlServer--学习触发器

    触发器是一种特殊的存储过程,一种不能被显式执行,而必须依附于一个事件的过程 主要作用:自动化操作;减少手动操作以及出错的几率. 触发器分类:DML(Data Manipulation Language ...