三台虚拟机,centos6.5

  1. 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
  2. :: localhost localhost.localdomain localhost6 localhost6.localdomain6
  3. 192.168.59.130 m1
  4. 192.168.59.131 s1
  5. 192.168.59.132 s2

修改主机名

  1. [root@m1 hadoop]# cat /etc/sysconfig/network
  2. NETWORKING=yes
  3. HOSTNAME=m1

修改主机映射

  1. [root@m1 hadoop]# cat /etc/hosts
  2. 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
  3. :: localhost localhost.localdomain localhost6 localhost6.localdomain6
  4. 192.168.59.130 m1
  5. 192.168.59.131 s1
  6. 192.168.59.132 s2

ssh免密码登陆(注意! 要求每台机子互相都能ssh包括本机)

  1. ssh-keygen -t rsa
  2. ssh-copy-id -i ~/.ssh/id_rsa.pub m2

安装jdk

  1. http://www.cnblogs.com/xiaojf/p/6568426.html

安装hadoop2.7.3

解压,重命名

  1. [root@m1 soft]# ll
  2. total
  3. drwxr-xr-x. root root Aug hadoop
  4. drwxr-xr-x. root root Mar : jar
  5. drwxr-xr-x. uucp Dec : jdk
  6. drwxr-xr-x. root root Mar : kafka
  7. drwxrwxr-x. Mar scala-2.11.
  8. drwxr-xr-x. root root Mar : tmp
  9. drwxr-xr-x. Aug zookeeper-3.4.

创建目录存放日志文件还要有数据文件

  1. mkdir -p /usr/local/soft/tmp/hadoop/tmp
  2. mkdir -p /usr/local/soft/tmp/hadoop/dfs/name
  3. mkdir -p /usr/local/soft/tmp/hadoop/dfs/data

修改配置文件

  1. [root@m1 soft]# cd /usr/local/soft/hadoop/etc/hadoop/
  2. [root@m1 hadoop]# ll
  3. total
  4. -rw-r--r--. root root Aug capacity-scheduler.xml
  5. -rw-r--r--. root root Aug configuration.xsl
  6. -rw-r--r--. root root Aug container-executor.cfg
  7. -rw-r--r--. root root Aug core-site.xml
  8. -rw-r--r--. root root Aug hadoop-env.cmd
  9. -rw-r--r--. root root Aug hadoop-env.sh
  10. -rw-r--r--. root root Aug hadoop-metrics2.properties
  11. -rw-r--r--. root root Aug hadoop-metrics.properties
  12. -rw-r--r--. root root Aug hadoop-policy.xml
  13. -rw-r--r--. root root Aug hdfs-site.xml
  14. -rw-r--r--. root root Aug httpfs-env.sh
  15. -rw-r--r--. root root Aug httpfs-log4j.properties
  16. -rw-r--r--. root root Aug httpfs-signature.secret
  17. -rw-r--r--. root root Aug httpfs-site.xml
  18. -rw-r--r--. root root Aug kms-acls.xml
  19. -rw-r--r--. root root Aug kms-env.sh
  20. -rw-r--r--. root root Aug kms-log4j.properties
  21. -rw-r--r--. root root Aug kms-site.xml
  22. -rw-r--r--. root root Aug log4j.properties
  23. -rw-r--r--. root root Aug mapred-env.cmd
  24. -rw-r--r--. root root Aug mapred-env.sh
  25. -rw-r--r--. root root Aug mapred-queues.xml.template
  26. -rw-r--r--. root root Aug mapred-site.xml.template
  27. -rw-r--r--. root root Aug slaves
  28. -rw-r--r--. root root Aug ssl-client.xml.example
  29. -rw-r--r--. root root Aug ssl-server.xml.example
  30. -rw-r--r--. root root Aug yarn-env.cmd
  31. -rw-r--r--. root root Aug yarn-env.sh
  32. -rw-r--r--. root root Aug yarn-site.xml

yarn-env.sh

  1. [root@m1 hadoop]# vi yarn-env.sh
  1. # Licensed to the Apache Software Foundation (ASF) under one
  2.  
  3. # or more contributor license agreements. See the NOTICE file
  4.  
  5. # distributed with this work for additional information
  6.  
  7. # regarding copyright ownership. The ASF licenses this file
  8.  
  9. # to you under the Apache License, Version 2.0 (the
  10.  
  11. # "License"); you may not use this file except in compliance
  12.  
  13. # with the License. You may obtain a copy of the License at
  14.  
  15. #
  16.  
  17. # http://www.apache.org/licenses/LICENSE-2.0
  18.  
  19. #
  20.  
  21. # Unless required by applicable law or agreed to in writing, software
  22.  
  23. # distributed under the License is distributed on an "AS IS" BASIS,
  24.  
  25. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  26.  
  27. # See the License for the specific language governing permissions and
  28.  
  29. # limitations under the License.
  30.  
  31. # Set Hadoop-specific environment variables here.
  32.  
  33. # The only required environment variable is JAVA_HOME. All others are
  34.  
  35. # optional. When running a distributed configuration it is best to
  36.  
  37. # set JAVA_HOME in this file, so that it is correctly defined on
  38.  
  39. # remote nodes.
  40.  
  41. # The java implementation to use.
  42.  
  43. export JAVA_HOME=/usr/local/soft/jdk

slaves

  1. [root@m1 hadoop]# vi slaves 
    s1
  2. s2

core-site.xml

  1. <configuration>
  2.  
  3. <property>
  4.  
  5. <name>fs.defaultFS</name>
  6.  
  7. <value>hdfs://m1:9000</value>
  8.  
  9. </property>
  10.  
  11. <property>
  12.  
  13. <name>io.file.buffer.size</name>
  14.  
  15. <value></value>
  16.  
  17. </property>
  18.  
  19. <property>
  20.  
  21. <name>hadoop.tmp.dir</name>
  22.  
  23. <value>file:/usr/local/soft/tmp/hadoop/tmp</value>
  24.  
  25. <description>Abase for other temporary directories.</description>
  26.  
  27. </property>
  28.  
  29. </configuration>

hdfs-site.xml

  1. <configuration>
  2. <property>
  3. <name>dfs.namenode.secondary.http-address</name>
  4. <value>m1:9001</value>
  5. </property>
  6. <property>
  7. <name>dfs.namenode.name.dir</name>
  8. <value>file:/usr/hadoop/dfs/name</value>
  9. </property>
  10. <property>
  11. <name>dfs.datanode.data.dir</name>
  12. <value>file:/usr/hadoop/dfs/data</value>
  13. </property>
  14. <property>
  15. <name>dfs.replication</name>
  16. <value>2</value>
  17. </property>
  18. <property>
  19. <name>dfs.webhdfs.enabled</name>
  20. <value>true</value>
  21. </property>
  22. </configuration>

mapred-site.xml

  1. <configuration>
  2.  
  3. <property>
  4.  
  5. <name>mapreduce.framework.name</name>
  6.  
  7. <value>yarn</value>
  8.  
  9. </property>
  10.  
  11. <property>
  12.  
  13. <name>mapreduce.jobhistory.address</name>
  14.  
  15. <value>m1:</value>
  16.  
  17. </property>
  18.  
  19. <property>
  20.  
  21. <name>mapreduce.jobhistory.webapp.address</name>
  22.  
  23. <value>m1:</value>
  24.  
  25. </property>
  26.  
  27. </configuration>

yarn-site.xml

  1. <configuration>
  2.  
  3. <!-- Site specific YARN configuration properties -->
  4.  
  5. <property>
  6.  
  7. <name>yarn.nodemanager.aux-services</name>
  8.  
  9. <value>mapreduce_shuffle</value>
  10.  
  11. </property>
  12.  
  13. <property>
  14.  
  15. <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
  16.  
  17. <value>org.apache.hadoop.mapred.ShuffleHandler</value>
  18.  
  19. </property>
  20.  
  21. <property>
  22.  
  23. <name>yarn.resourcemanager.address</name>
  24.  
  25. <value>m1:</value>
  26.  
  27. </property>
  28.  
  29. <property>
  30.  
  31. <name>yarn.resourcemanager.scheduler.address</name>
  32.  
  33. <value>m1:</value>
  34.  
  35. </property>
  36.  
  37. <property>
  38.  
  39. <name>yarn.resourcemanager.resource-tracker.address</name>
  40.  
  41. <value>m1:</value>
  42.  
  43. </property>
  44.  
  45. <property>
  46.  
  47. <name>yarn.resourcemanager.admin.address</name>
  48.  
  49. <value>m1:</value>
  50.  
  51. </property>
  52.  
  53. <property>
  54.  
  55. <name>yarn.resourcemanager.webapp.address</name>
  56.  
  57. <value>m1:</value>
  58.  
  59. </property>
  60.  
  61. </configuration>

设置Hadoop环境变量

  1. export HADOOP_HOME=/usr/local/soft/hadoop
  2. export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin

分发代码

  1. [root@m1 soft]# scp -r hadoop root@s2:/usr/local/soft/

namenode format

  1. [root@m1 soft]# hdfs namenode -format
  2. DEPRECATED: Use of this script to execute hdfs command is deprecated.
  3. Instead use the hdfs command for it.
  4.  
  5. // :: INFO namenode.NameNode: STARTUP_MSG:
  6. /************************************************************
  7. STARTUP_MSG: Starting NameNode
  8. STARTUP_MSG: host = m1/192.168.59.130
  9. STARTUP_MSG: args = [-format]
  10. STARTUP_MSG: version = 2.7.3

启动

  1. [root@m1 soft]# start-all.sh
  2. This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
  3. Starting namenodes on [m1]
  4. m1: starting namenode, logging to /usr/local/soft/hadoop/logs/hadoop-root-namenode-m1.out
  5. s1: starting datanode, logging to /usr/local/soft/hadoop/logs/hadoop-root-datanode-s1.out
  6. s2: starting datanode, logging to /usr/local/soft/hadoop/logs/hadoop-root-datanode-s2.out
  7. Starting secondary namenodes [master]
  8. master: ssh: Could not resolve hostname master: Name or service not known
  9. starting yarn daemons
  10. starting resourcemanager, logging to /usr/local/soft/hadoop/logs/yarn-root-resourcemanager-m1.out
  11. s1: starting nodemanager, logging to /usr/local/soft/hadoop/logs/yarn-root-nodemanager-s1.out
  12. s2: starting nodemanager, logging to /usr/local/soft/hadoop/logs/yarn-root-nodemanager-s2.out

验证

  1. [root@m1 soft]# hadoop dfs -ls /
  2. DEPRECATED: Use of this script to execute hdfs command is deprecated.
  3. Instead use the hdfs command for it.
  4.  
  5. [root@m1 soft]# hadoop dfs -mkdir /xiaojf
  6. DEPRECATED: Use of this script to execute hdfs command is deprecated.
  7. Instead use the hdfs command for it.
  8.  
  9. [root@m1 soft]# hadoop dfs -ls /
  10. DEPRECATED: Use of this script to execute hdfs command is deprecated.
  11. Instead use the hdfs command for it.
  12.  
  13. Found items
  14. drwxr-xr-x - root supergroup -- : /xiaojf

完成

hadoop 2.7.3 集群安装的更多相关文章

  1. hadoop 2.2.0集群安装详细步骤(简单配置,无HA)

    安装环境操作系统:CentOS 6.5 i586(32位)java环境:JDK 1.7.0.51hadoop版本:社区版本2.2.0,hadoop-2.2.0.tar.gz 安装准备设置集群的host ...

  2. hadoop 2.2.0集群安装

    相关阅读: hbase 0.98.1集群安装 本文将基于hadoop 2.2.0解说其在linux集群上的安装方法,并对一些重要的设置项进行解释,本文原文链接:http://blog.csdn.net ...

  3. Hadoop 2.6.1 集群安装配置教程

    集群环境: 192.168.56.10 master 192.168.56.11 slave1 192.168.56.12 slave2 下载安装包/拷贝安装包 # 存放路径: cd /usr/loc ...

  4. Hive之 hive-1.2.1 + hadoop 2.7.4 集群安装

    一. 相关概念 Hive Metastore有三种配置方式,分别是: Embedded Metastore Database (Derby) 内嵌模式Local Metastore Server 本地 ...

  5. Hadoop完全高可用集群安装

    架构图(HA模型没有SNN节点) 用vm规划了8台机器,用到了7台,SNN节点没用   NN DN SN ZKFC ZK JNN RM NM node1 *     *         node2 * ...

  6. Hadoop 2.4.x集群安装配置问题总结

    配置文件:/etc/profile export JAVA_HOME=/usr/java/latest export HADOOP_PREFIX=/opt/hadoop-2.4.1 export HA ...

  7. CentOS系统下Hadoop 2.4.1集群安装配置(简易版)

    安装配置 1.软件下载 JDK下载:jdk-7u65-linux-i586.tar.gz http://www.oracle.com/technetwork/java/javase/downloads ...

  8. Hadoop 2.5.1集群安装配置

    本文的安装只涉及了hadoop-common.hadoop-hdfs.hadoop-mapreduce和hadoop-yarn,并不包含HBase.Hive和Pig等. http://blog.csd ...

  9. hadoop 1.0.1集群安装及配置

    1.hadoop下载地址:http://www.apache.org/dyn/closer.cgi/hadoop/core/ 2.下载java6软件包,分别在三台安装 3.三台虚拟机,一台作为mast ...

随机推荐

  1. 学习面向对象编程OOP 第一天

    面向对象编程 Object Oriented Programming 一.什么是面向对象编程OOP 1.计算机编程架构; 2.计算机程序是由一个能够起到子程序作用的单元或者对象组合而成.也就是说由多个 ...

  2. toastr.js插件用法

    toastr.js插件用法 toastr.js是一个基于jQuery的非阻塞通知的JavaScript库.toastr.js可以设定四种通知模式:成功.出错.警告.提示.提示窗口的位置.动画效果等都可 ...

  3. Docker aufs存储驱动layer、diff、mnt目录的区别

    Docker基础信息 首先,先查询Docker使用的后端存储.使用命令docker info,主要关注Storage Driver相关的部分. $ docker info ... Server Ver ...

  4. 用 parseInt()解决的 小 bug

    在做轮播模块的时候遇到问题是:你在 连续指示小按钮 时候再去 只有 点击 下一张按钮,出现bug: 指示小按钮的 className 当前显示的 calssName 为 undefined ! // ...

  5. GPIO的配置过程

    今天看到一篇很好的博文,,看这里:http://www.cnblogs.com/crazyxu/archive/2011/10/14/2212337.html 下面总结一下,加深一下理解. 要使用GP ...

  6. 区分词法作用域(js)与动态作用域(精!)

    在js学习中,词法作用域是必须要掌握的! 在这里,我将总结一下<你不知道的JS>一书中词法作用域的重点并分享给大家! 首先带来一段代码示例: function foo(){ console ...

  7. JavaScript中screen对象的两个属性

    Screen 对象 Screen 对象包含有关客户端显示屏幕的信息. 这里说一下今天用到的两个属性:availHeigth,availWidth avaiHeigth返回显示屏幕的高度 (除 Wind ...

  8. ECharts 实现人民的名义关系图谱 代码开源

    1.什么是ECharts ECharts是百度开源的纯 Javascript 图表库,目前开源可以与highcharts相匹敌的一个图表库,相信有很多国内用户使用. 官网 http://echarts ...

  9. java8 Lambda表达式的新手上车指南(1)

    背景 java9的一再推迟发布,似乎让我们恍然想起离发布java8已经过去了三年之久,java8应该算的上java语言在历代版本中变化最大的一个版本了,最大的新特性应该算得上是增加了lambda表达式 ...

  10. 厉害了我的雅虎!卖掉主业后更名为阿里他爸(Altaba)

    据雅虎周一向美国证券交易委员会(SEC)提交的文件显示,在美国通信巨头Verizon斥资48亿美元收购雅虎的交易完成后,该公司首席执行官玛丽莎o梅耶尔(Marissa Mayer)将退出公司董事会. ...