一:配置基础环境

一、修改主机名

修改 master 机器主机名

[root@server ~]# hostnamectl set-hostname master-wzg
[root@server ~]# bash
[root@master-wzg ~]# hostname
master-wzg

修改 slave1 机器主机名

[root@client ~]# hostnamectl set-hostname slave1-wzg
[root@client ~]# bash
[root@slave1-wzg ~]# hostname
slave1-wzg

修改 slave2 机器主机名

[root@localhost ~]# hostnamectl set-hostname slave2-wzg
[root@localhost ~]# bash
[root@slave2-wzg ~]# hostname
slave2-wzg

二、配置网络环境

master-wzg的IP为10.10.10.128

slave1-wzg的IP为10.10.10.129

slave2-wzg的IP为10.10.10.130

子网掩码均为255.255.255.0

所有网关均为10.10.10.2

DNS均设置为114.114.114.114

master节点:(slave1和slave2的IP为10.10.10.129和10.10.10.130)

[root@server ~]# vim /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE=Ethernet
BOOTPROTO=static
DEFROUTE=yes
PEERDNS=yes
PEERROUTES=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_PEERDNS=yes
IPV6_PEERROUTES=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
UUID=da1a701d-8cee-4e1d-9423-56280232e595
DEVICE=ens33
ONBOOT=yes
IPADDR=10.10.10.128
PREFIX=24
GATEWAY=10.10.10.2
DNS1=114.114.114.114 [root@server ~]# systemctl restart network

查看 master IP地址

[root@master-wzg ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:af:2f:d2 brd ff:ff:ff:ff:ff:ff
inet 10.10.10.128/24 brd 10.10.10.255 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::9ef7:e697:cc63:418b/64 scope link
valid_lft forever preferred_lft forever
3: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN qlen 1000
link/ether 52:54:00:08:d4:17 brd ff:ff:ff:ff:ff:ff
inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
valid_lft forever preferred_lft forever
4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN qlen 1000
link/ether 52:54:00:08:d4:17 brd ff:ff:ff:ff:ff:ff

查看 slave1 IP地址

[root@slave1-wzg ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:91:3d:e2 brd ff:ff:ff:ff:ff:ff
inet 10.10.10.129/24 brd 10.10.10.255 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::8f79:feb9:1325:f537/64 scope link
valid_lft forever preferred_lft forever
3: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN qlen 1000
link/ether 52:54:00:58:7d:55 brd ff:ff:ff:ff:ff:ff
inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
valid_lft forever preferred_lft forever
4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN qlen 1000
link/ether 52:54:00:58:7d:55 brd ff:ff:ff:ff:ff:ff

查看 slave1 IP地址

[root@slave2-wzg ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:2a:a9:80 brd ff:ff:ff:ff:ff:ff
inet 10.10.10.130/24 brd 10.10.10.255 scope global ens32
valid_lft forever preferred_lft forever
inet6 fe80::2e7b:ba70:8834:5425/64 scope link
valid_lft forever preferred_lft forever
3: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN qlen 1000
link/ether 52:54:00:5a:cb:78 brd ff:ff:ff:ff:ff:ff
inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
valid_lft forever preferred_lft forever
4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN qlen 1000
link/ether 52:54:00:5a:cb:78 brd ff:ff:ff:ff:ff:ff
[root@slave2-wzg ~]#

三、配置域名解析

分别修改“/etc/hosts”配置文件

10.10.10.128 master-wzg master.example.com

10.10.10.129 slave1-wzg slave1.example.com

10.10.10.130 slave2-wzg slave2.example.com

master节点:

[root@master-wzg ~]# vi /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.10.10.128 master-wzg master.example.com
10.10.10.129 slave1-wzg slave1.example.com
10.10.10.130 slave2-wzg slave2.example.com

slave1节点:

[root@slave1-wzg ~]# vi /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.10.10.128 master-wzg master.example.com
10.10.10.129 slave1-wzg slave1.example.com
10.10.10.130 slave2-wzg slave2.example.com

slave2节点:

[root@slave2-wzg ~]# vi /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.10.10.128 master-wzg master.example.com
10.10.10.129 slave1-wzg slave1.example.com
10.10.10.130 slave2-wzg slave2.example.com

配置完成后,各节点之间可以相互ping同

[root@master-wzg ~]# ping master-wzg
PING master-wzg (10.10.10.128) 56(84) bytes of data.
64 bytes from master-wzg (10.10.10.128): icmp_seq=1 ttl=64 time=0.038 ms
64 bytes from master-wzg (10.10.10.128): icmp_seq=2 ttl=64 time=0.029 ms
^C
--- master-wzg ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1000ms
rtt min/avg/max/mdev = 0.029/0.033/0.038/0.007 ms
[root@master-wzg ~]# ping slave1-wzg
PING slave1-wzg (10.10.10.129) 56(84) bytes of data.
64 bytes from slave1-wzg (10.10.10.129): icmp_seq=1 ttl=64 time=1.12 ms
64 bytes from slave1-wzg (10.10.10.129): icmp_seq=2 ttl=64 time=0.451 ms
^C
--- slave1-wzg ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1001ms
rtt min/avg/max/mdev = 0.451/0.787/1.123/0.336 ms
[root@master-wzg ~]# ping slave2-wzg
PING slave2-wzg (10.10.10.130) 56(84) bytes of data.
64 bytes from slave2-wzg (10.10.10.130): icmp_seq=1 ttl=64 time=1.28 ms
64 bytes from slave2-wzg (10.10.10.130): icmp_seq=2 ttl=64 time=0.903 ms
64 bytes from slave2-wzg (10.10.10.130): icmp_seq=3 ttl=64 time=0.489 ms
^C
--- slave2-wzg ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2004ms
rtt min/avg/max/mdev = 0.489/0.891/1.282/0.324 ms

二:SSH 无密码验证配置

一、生成 SSH 密钥,并配置自我免密登录

步骤一、配置 SSH 服务配置文件

使用 root 用户登录,修改 SSH 配置文件"/etc/ssh/sshd_config"的内容,需要将PubkeyAuthentication yes前面的#号删除,启用公钥私钥配对认证方式。设置完后需要重启 SSH 服务,才能使配置生效。

(在所有节点上执行)

[root@master-wzg ~]# vi /etc/ssh/sshd_config
PubkeyAuthentication yes
[root@master-wzg ~]# systemctl restart sshd

步骤二、创建 hadoop 用户

(在所有节点上执行)

[root@master-wzg ~]# useradd hadoop
[root@master-wzg ~]# echo 'hadoop' | passwd --stdin hadoop
Changing password for user hadoop.
passwd: all authentication tokens updated successfully.

步骤三、切换 hadoop 用户,生成秘钥对

(在所有节点上执行)

[root@master-wzg ~]# su - hadoop
[hadoop@master-wzg ~]$ ssh-keygen -t rsa -P ''
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa):
Created directory '/home/hadoop/.ssh'.
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
cc:31:89:1c:69:69:6b:8c:5b:b8:61:64:12:f5:7a:62 hadoop@master-wzg
The key's randomart image is:
+--[ RSA 2048]----+
| .o. .o |
| . oo=o . |
| + *+.+ |
| =.=o o |
| .E*. S |
| .oo |
| |
| |
| |
+-----------------+

查看.ssh文件下是否有两个刚生产的无密码密钥对

[hadoop@master-wzg ~]$ cd ~/.ssh/
[hadoop@master-wzg .ssh]$ ll
total 8
-rw-------. 1 hadoop hadoop 1679 Mar 19 12:05 id_rsa
-rw-r--r--. 1 hadoop hadoop 399 Mar 19 12:05 id_rsa.pub

步骤四、将 id_rsa.pub 追加到授权 key 文件中

(在所有节点上执行)

[hadoop@master-wzg .ssh]$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
[hadoop@master-wzg .ssh]$ ll ~/.ssh/
total 12
-rw-rw-r--. 1 hadoop hadoop 399 Mar 19 12:07 authorized_keys
-rw-------. 1 hadoop hadoop 1679 Mar 19 12:05 id_rsa
-rw-r--r--. 1 hadoop hadoop 399 Mar 19 12:05 id_rsa.pub

步骤五、修改文件"authorized_keys"权限

(在所有节点上执行)

修改后 authorized_keys 文件的权限为600,表示所有者可读写,其他用户没有访问权限。如果该文件权限太大,ssh 服务会拒绝工作,出现无法通过密钥文件进行登录认证的情况。

[hadoop@master-wzg .ssh]$ chmod 600 ~/.ssh/authorized_keys
[hadoop@master-wzg .ssh]$ ll ~/.ssh/
total 12
-rw-------. 1 hadoop hadoop 399 Mar 19 12:07 authorized_keys
-rw-------. 1 hadoop hadoop 1679 Mar 19 12:05 id_rsa
-rw-r--r--. 1 hadoop hadoop 399 Mar 19 12:05 id_rsa.pub

步骤六、验证 SSH 登录本机

通过ssh localhost命令,在 hadoop 用户下验证能否嵌套登录本机,若可以不输入密码登录,则本机通过密钥登录认证成功。

首次登录时会提示系统无法确认 host 主机的真实性,只知道它的公钥指纹,询问用户是否还想继续连接。需要输入“yes”,表示继续登录。第二次再登录同一个主机,则不会再出现该提示。

(在所有节点上执行)

[hadoop@master-wzg .ssh]$ cd
[hadoop@master-wzg ~]$ ssh localhost
The authenticity of host 'localhost (::1)' can't be established.
ECDSA key fingerprint is e6:c6:14:00:9c:6e:33:68:0a:b5:bb:6a:54:c4:ba:8d.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'localhost' (ECDSA) to the list of known hosts.
Last login: Sat Mar 19 12:05:30 2022
[hadoop@master-wzg ~]$ exit
logout
Connection to localhost closed.

slave1节点上做相同操作

[root@slave1-wzg ~]# systemctl restart sshd
[root@slave1-wzg ~]# useradd hadoop
[root@slave1-wzg ~]# echo 'hadoop' | passwd --stdin hadoop
Changing password for user hadoop.
passwd: all authentication tokens updated successfully.
[root@slave1-wzg ~]# su - hadoop
[hadoop@slave1-wzg ~]$ ssh-keygen -t rsa -P ''
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa):
Created directory '/home/hadoop/.ssh'.
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
c5:cb:e1:e4:20:4f:9a:d9:4f:42:d4:0b:a2:b1:8d:53 hadoop@slave1-wzg
The key's randomart image is:
+--[ RSA 2048]----+
| .. |
| . E.... |
| B..+.=. |
| = .X B.o |
| .+ S * |
| + |
| . |
| |
| |
+-----------------+
[hadoop@slave1-wzg ~]$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
[hadoop@slave1-wzg ~]$ chmod 600 ~/.ssh/authorized_keys
[hadoop@slave1-wzg ~]$ cd
[hadoop@slave1-wzg ~]$ ssh localhost
The authenticity of host 'localhost (::1)' can't be established.
ECDSA key fingerprint is 03:f0:73:9b:d5:ea:a4:28:9f:f1:83:e4:26:8b:00:5f.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'localhost' (ECDSA) to the list of known hosts.
Last login: Sat Mar 19 12:05:33 2022
[hadoop@slave1-wzg ~]$ exit
logout
Connection to localhost closed.

slave2节点上做相同操作

[root@slave2-wzg ~]# systemctl restart sshd
[root@slave2-wzg ~]# useradd hadoop
[root@slave2-wzg ~]# echo 'hadoop' | passwd --stdin hadoop
更改用户 hadoop 的密码 。
passwd:所有的身份验证令牌已经成功更新。
[root@slave2-wzg ~]# su - hadoop
[hadoop@slave2-wzg ~]$ ssh-keygen -t rsa -P ''
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa):
Created directory '/home/hadoop/.ssh'.
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
88:77:d5:c6:d6:09:3c:3e:e4:95:55:4a:41:f0:f0:63 hadoop@slave2-wzg
The key's randomart image is:
+--[ RSA 2048]----+
| .+o+++|
| o+Boo |
| .+=oE |
| . . . o+. . |
| . o S . |
| . . |
| |
| |
| |
+-----------------+
[hadoop@slave2-wzg ~]$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
[hadoop@slave2-wzg ~]$
[hadoop@slave2-wzg ~]$ chmod 600 ~/.ssh/authorized_keys
[hadoop@slave2-wzg ~]$ ssh localhost
The authenticity of host 'localhost (::1)' can't be established.
ECDSA key fingerprint is d8:9e:43:3d:35:a1:a5:41:7c:a0:44:23:93:1b:52:b5.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'localhost' (ECDSA) to the list of known hosts.
Last login: Sat Mar 19 12:05:35 2022
[hadoop@slave2-wzg ~]$ exit
登出
Connection to localhost closed.

二:配置master免密登录slave1和slave2节点

步骤一、将Master节点的公钥复制到每个Slave节点

hadoop 用户登录,通过 scp 命令实现密钥拷贝。

首次远程连接时系统会询问用户是否要继续连接。需要输入“yes”,表示继续。因为目前尚未完成密钥认证的配置,所以使用 scp 命令拷贝文件需要输入 slave1 节点 hadoop用户的密码。

(master节点)

[hadoop@master-wzg ~]$ scp ~/.ssh/id_rsa.pub hadoop@slave1-wzg:~/
The authenticity of host 'slave1-wzg (10.10.10.129)' can't be established.
ECDSA key fingerprint is 03:f0:73:9b:d5:ea:a4:28:9f:f1:83:e4:26:8b:00:5f.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'slave1-wzg,10.10.10.129' (ECDSA) to the list of known hosts.
hadoop@slave1-wzg's password:
id_rsa.pub 100% 399 0.4KB/s 00:00
[hadoop@master-wzg ~]$ scp ~/.ssh/id_rsa.pub hadoop@slave2:~/
ssh: Could not resolve hostname slave2: Name or service not known
lost connection
[hadoop@master-wzg ~]$ scp ~/.ssh/id_rsa.pub hadoop@slave2-wzg:~/
The authenticity of host 'slave2-wzg (10.10.10.130)' can't be established.
ECDSA key fingerprint is d8:9e:43:3d:35:a1:a5:41:7c:a0:44:23:93:1b:52:b5.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'slave2-wzg,10.10.10.130' (ECDSA) to the list of known hosts.
hadoop@slave2-wzg's password:
id_rsa.pub 100% 399 0.4KB/s 00:00

步骤二、在Slave节点将公钥复制到文件

在每个 Slave 节点把 Master 节点复制的公钥复制到 authorized_keys 文件,并删除 id_rsa.pub 文件hadoop 用户登录 slave1 和 slave2 节点,执行命令。

(slave1和slave2节点)

[hadoop@slave1-wzg ~]$ cat ~/id_rsa.pub >>~/.ssh/authorized_keys
[hadoop@slave1-wzg ~]$ rm -f ~/id_rsa.pub [hadoop@slave2-wzg ~]$ cat ~/id_rsa.pub >>~/.ssh/authorized_keys
[hadoop@slave2-wzg ~]$ rm -f ~/id_rsa.pub

步骤三、验证Master到每个Slave节点无密码登录

hadoop 用户登录 master 节点,执行 SSH 命令登录 slave1 和 slave2 节点。可以观察到不需要输入密码即可实现 SSH 登录。

(master节点)

[hadoop@master-wzg ~]$ ssh slave1-wzg
Last login: Sat Mar 19 12:09:53 2022 from localhost
[hadoop@slave1-wzg ~]$ exit
logout
Connection to slave1-wzg closed.
[hadoop@master-wzg ~]$ ssh slave2-wzg
Last login: Sat Mar 19 12:09:57 2022 from localhost
[hadoop@slave2-wzg ~]$ exit
logout
Connection to slave2-wzg closed.

三:配置slave节点免密登录master和对方

步骤一、将 Slave1和Slave2 节点的公钥保存到Master

使用 ssh-copy-id hadoop@master-wzg 一条命令更方便

(slave1节点)

[hadoop@slave1-wzg ~]$ ssh-copy-id hadoop@master-wzg
The authenticity of host 'master-wzg (10.10.10.128)' can't be established.
ECDSA key fingerprint is e6:c6:14:00:9c:6e:33:68:0a:b5:bb:6a:54:c4:ba:8d.
Are you sure you want to continue connecting (yes/no)? yes
/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
hadoop@master-wzg's password: Number of key(s) added: 1 Now try logging into the machine, with: "ssh 'hadoop@master-wzg'"
and check to make sure that only the key(s) you wanted were added. [hadoop@slave1-wzg ~]$ ssh-copy-id hadoop@slave2-wzg
The authenticity of host 'slave2-wzg (10.10.10.130)' can't be established.
ECDSA key fingerprint is d8:9e:43:3d:35:a1:a5:41:7c:a0:44:23:93:1b:52:b5.
Are you sure you want to continue connecting (yes/no)? yes
/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
hadoop@slave2-wzg's password: Number of key(s) added: 1 Now try logging into the machine, with: "ssh 'hadoop@slave2-wzg'"
and check to make sure that only the key(s) you wanted were added.

(slave2节点)

[hadoop@slave2-wzg ~]$ ssh-copy-id hadoop@master-wzg
The authenticity of host 'master-wzg (10.10.10.128)' can't be established.
ECDSA key fingerprint is e6:c6:14:00:9c:6e:33:68:0a:b5:bb:6a:54:c4:ba:8d.
Are you sure you want to continue connecting (yes/no)? yes
/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
hadoop@master-wzg's password: Number of key(s) added: 1 Now try logging into the machine, with: "ssh 'hadoop@master-wzg'"
and check to make sure that only the key(s) you wanted were added. [hadoop@slave2-wzg ~]$ ssh-copy-id hadoop@slave1-wzg
The authenticity of host 'slave1-wzg (10.10.10.129)' can't be established.
ECDSA key fingerprint is 03:f0:73:9b:d5:ea:a4:28:9f:f1:83:e4:26:8b:00:5f.
Are you sure you want to continue connecting (yes/no)? yes
/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
hadoop@slave1-wzg's password: Number of key(s) added: 1 Now try logging into the machine, with: "ssh 'hadoop@slave1-wzg'"
and check to make sure that only the key(s) you wanted were added.

步骤二、查看各节点的公钥

查看 Master 节点 authorized_keys 文件

[hadoop@master-wzg ~]$ cat ~/.ssh/authorized_keys
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCz0hVATPM5X3Wr4J9vVLeNihrcix++wQ0S7EtCcnhLI1fZfqLbGs7lApUt1UzejdDAopO2CYi93knzkmTDD7evJ1H5caz+4qZl3Owd5+8XCBpCU9EtJFIU5yLgl93gjSIJ/GpKOGaHlp/KPHXNn9uRvidDNUKumRq2fJDfYkFxCsvQEg+j2t6SNvUvBQ53txmPYBWAJkr8jRgudilRszCDsRpwnYGDIbigWowtnktCYz7zc/s0aqbdXoqkOtcA4H/OwwdAB0SX8HXfnNA1PwHMPPeERsQqTIgU82Tj1UKsibcNhD7F4r12CS3ity+d9GvuwQ4YooUsbsPzVr0YAk6p hadoop@master-wzg
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9XSD+SmFjeVz3DCZLzGJV+5/EVEV1D8mkNTDcPzVsoGZn1g5wuqaGm1PJ3HYffhkHp1SsgDH5hMxeeV25+3jsyzaMT3qlm7kG/Cz+DwhM1UyeKfVh5Pev46UfG1i3GeMBmP1Lx6EbN50sAVTWKJuGg2Y9gfdIHv/9BL9A9JJlap4tOKqfMcsMivEZAL8gSUv7PGQp3tfqxaFu6ZqOWDDuC06+8q2NDfZQmw6n4W1kdXYLR9iP8STc7IedlEJ8vHoNifJE/QW2uSq+yhxgPF+TPo3mq0iJa5L27cUHVOcbFstJ/cXAKMFre43mnDCK4br7ajFWgLuDyafOg831rGFJ hadoop@slave1-wzg
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDKt6zf1MvvzHIrV/q+r7diZxT7M9VH1YSfhu0OA1g0kM+bXAGcJsEOMyJ4Z+qjINPdhj/Y3qGVIeiRoCS30JnVPHNtFALmTejqwLW9OUNY8/SMRI6C2efeBHFP8F1lNToHGwm94mgqQReKKQ47CRKd7QykxFqJ+TcZNuwTUhOrFEK0aV/9cPZndELs63k3Y9Hf1fqbWWOLEtcYRXOKIuRXodLqASMfZ4bOaTQxQ4BJ6HxrFZyShnO+CFLIXGaz0hyt2Pbo8qqt+W/g+/dLgFYb4Ej8SyFUAztj74haU1SJeO+QNMYf7XdnRFyH/h75OZqGL5RgnhTQQ3Ej7qOBdnl hadoop@slave2-wzg
[hadoop@master-wzg ~]$

查看 Slave1 节点 authorized_keys 文件

[hadoop@slave1-wzg ~]$ cat ~/.ssh/authorized_keys
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9XSD+SmFjeVz3DCZLzGJV+5/EVEV1D8mkNTDcPzVsoGZn1g5wuqaGm1PJ3HYffhkHp1SsgDH5hMxeeV25+3jsyzaMT3qlm7kG/Cz+DwhM1UyeKfVh5Pev46UfG1i3GeMBmP1Lx6EbN50sAVTWKJuGg2Y9gfdIHv/9BL9A9JJlap4tOKqfMcsMivEZAL8gSUv7PGQp3tfqxaFu6ZqOWDDuC06+8q2NDfZQmw6n4W1kdXYLR9iP8STc7IedlEJ8vHoNifJE/QW2uSq+yhxgPF+TPo3mq0iJa5L27cUHVOcbFstJ/cXAKMFre43mnDCK4br7ajFWgLuDyafOg831rGFJ hadoop@slave1-wzg
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCz0hVATPM5X3Wr4J9vVLeNihrcix++wQ0S7EtCcnhLI1fZfqLbGs7lApUt1UzejdDAopO2CYi93knzkmTDD7evJ1H5caz+4qZl3Owd5+8XCBpCU9EtJFIU5yLgl93gjSIJ/GpKOGaHlp/KPHXNn9uRvidDNUKumRq2fJDfYkFxCsvQEg+j2t6SNvUvBQ53txmPYBWAJkr8jRgudilRszCDsRpwnYGDIbigWowtnktCYz7zc/s0aqbdXoqkOtcA4H/OwwdAB0SX8HXfnNA1PwHMPPeERsQqTIgU82Tj1UKsibcNhD7F4r12CS3ity+d9GvuwQ4YooUsbsPzVr0YAk6p hadoop@master-wzg
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDKt6zf1MvvzHIrV/q+r7diZxT7M9VH1YSfhu0OA1g0kM+bXAGcJsEOMyJ4Z+qjINPdhj/Y3qGVIeiRoCS30JnVPHNtFALmTejqwLW9OUNY8/SMRI6C2efeBHFP8F1lNToHGwm94mgqQReKKQ47CRKd7QykxFqJ+TcZNuwTUhOrFEK0aV/9cPZndELs63k3Y9Hf1fqbWWOLEtcYRXOKIuRXodLqASMfZ4bOaTQxQ4BJ6HxrFZyShnO+CFLIXGaz0hyt2Pbo8qqt+W/g+/dLgFYb4Ej8SyFUAztj74haU1SJeO+QNMYf7XdnRFyH/h75OZqGL5RgnhTQQ3Ej7qOBdnl hadoop@slave2-wzg
[hadoop@slave1-wzg ~]$

查看 Slave2 节点 authorized_keys 文件

[hadoop@slave2-wzg ~]$ cat ~/.ssh/authorized_keys
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDKt6zf1MvvzHIrV/q+r7diZxT7M9VH1YSfhu0OA1g0kM+bXAGcJsEOMyJ4Z+qjINPdhj/Y3qGVIeiRoCS30JnVPHNtFALmTejqwLW9OUNY8/SMRI6C2efeBHFP8F1lNToHGwm94mgqQReKKQ47CRKd7QykxFqJ+TcZNuwTUhOrFEK0aV/9cPZndELs63k3Y9Hf1fqbWWOLEtcYRXOKIuRXodLqASMfZ4bOaTQxQ4BJ6HxrFZyShnO+CFLIXGaz0hyt2Pbo8qqt+W/g+/dLgFYb4Ej8SyFUAztj74haU1SJeO+QNMYf7XdnRFyH/h75OZqGL5RgnhTQQ3Ej7qOBdnl hadoop@slave2-wzg
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCz0hVATPM5X3Wr4J9vVLeNihrcix++wQ0S7EtCcnhLI1fZfqLbGs7lApUt1UzejdDAopO2CYi93knzkmTDD7evJ1H5caz+4qZl3Owd5+8XCBpCU9EtJFIU5yLgl93gjSIJ/GpKOGaHlp/KPHXNn9uRvidDNUKumRq2fJDfYkFxCsvQEg+j2t6SNvUvBQ53txmPYBWAJkr8jRgudilRszCDsRpwnYGDIbigWowtnktCYz7zc/s0aqbdXoqkOtcA4H/OwwdAB0SX8HXfnNA1PwHMPPeERsQqTIgU82Tj1UKsibcNhD7F4r12CS3ity+d9GvuwQ4YooUsbsPzVr0YAk6p hadoop@master-wzg
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9XSD+SmFjeVz3DCZLzGJV+5/EVEV1D8mkNTDcPzVsoGZn1g5wuqaGm1PJ3HYffhkHp1SsgDH5hMxeeV25+3jsyzaMT3qlm7kG/Cz+DwhM1UyeKfVh5Pev46UfG1i3GeMBmP1Lx6EbN50sAVTWKJuGg2Y9gfdIHv/9BL9A9JJlap4tOKqfMcsMivEZAL8gSUv7PGQp3tfqxaFu6ZqOWDDuC06+8q2NDfZQmw6n4W1kdXYLR9iP8STc7IedlEJ8vHoNifJE/QW2uSq+yhxgPF+TPo3mq0iJa5L27cUHVOcbFstJ/cXAKMFre43mnDCK4br7ajFWgLuDyafOg831rGFJ hadoop@slave1-wzg
[hadoop@slave2-wzg ~]$

可以看到每个节点 authorized_keys 文件中包括 master、slave1、slave2 三个节点的公钥。

步骤三、验证每个Slave节点无密码登录Master和对方

验证 Slave1 节点到 Master 节点无密码登录

[hadoop@slave1-wzg ~]$ ssh master-wzg
Last login: Sat Mar 19 12:09:48 2022 from localhost
[hadoop@master-wzg ~]$ exit
logout
Connection to master-wzg closed.
[hadoop@slave1-wzg ~]$ ssh slave2-wzg
Last login: Sat Mar 19 12:13:40 2022 from master-wzg
[hadoop@slave2-wzg ~]$ exit
logout
Connection to slave2-wzg closed.

验证 Slave2 节点到 Master 节点无密码登录

[hadoop@slave2-wzg ~]$ ssh master-wzg
Last login: Sat Mar 19 12:15:40 2022 from slave1-wzg
[hadoop@master-wzg ~]$ exit
登出
Connection to master-wzg closed.
[hadoop@slave2-wzg ~]$ ssh slave1-wzg
Last login: Sat Mar 19 12:13:30 2022 from master-wzg
[hadoop@slave1-wzg ~]$ exit
登出
Connection to slave1-wzg closed.

即可实现三台节点(Master 、Slave1、Slave2 )相互免密登录

声明:未经许可,不得转载

hadoop-SSH免密登录配置的更多相关文章

  1. SSH免密登录配置

    SSH免密登录配置 本地生成密钥文件: $ ssh-keygen 输出: Generating public/private rsa key pair. Enter file in which to ...

  2. (11)ssh免密登录配置

    ***在Linux命令行中登录到另一台虚拟机(需要用到ssh协议) Linux中默认有ssh的服务器端和客户端,客户端的名字就叫ssh 前提是当前使用的用户名在待连接的虚拟机中存在 格式:  ssh ...

  3. Cmder下ssh免密登录配置

    1.本地生成ssh-key 在本地cmder终端下运行下面的命令生成ssh的公钥和私钥文件: ssh-keygen -t rsa 其中,.ssh/id_rsa为私钥文件,留在本地使用,而.ssh/id ...

  4. Linux主机之间ssh免密登录配置方法

    由于公司的生产环境有很多台Linux的CentOS服务器, 为了方便机子(假设两台机子A,B)互相之间免密ssh, scp命令操作,配置如下 1. 在A.B上分别创建本机的公钥和私钥,输入命令后连续三 ...

  5. ssh免密登录配置后,登陆失败问题

    本文转自博主:_Lance 本文转自:https://blog.csdn.net/qq_19648191/article/details/54845440 相关资料http://blog.csdn.n ...

  6. ssh免密登录配置方法

    方法一 1.#ssh-keygen -t rsa 在客户端生成密钥对 把公钥拷贝给要登录的目标主机, 目标主机上将这个公钥加入到授权列表 #cat id_rsa.pub >> author ...

  7. SSH免密登录详解

    SSH免密登录详解 SSH(Security Shell)安全外壳协议,是较为可靠的,专为远程登录会话和其他网络服务提供安全保证的协议. ​ 对于传统的网络服务程序(例如,FTP,Telnet等)来说 ...

  8. 配置ssh免密登录遇到的问题——使用VMware多虚拟机搭建Hadoop集群

    搭建环境: 虚拟机 VMware12Pro      操作系统  centos6.8        hadoop 1.2.1 1.导入镜像文件,添加java环境 1.查看当前系统中安装的java,ls ...

  9. Ubuntu如何配置SSH免密登录

    前言 在搭建hadoop集群时,需要主机和副机之间实现SSH免密登录 一.环境准备 1.ubuntu两台 二.安装SSH 1.首先检测一下本机有没有安装SSH服务,如果没有任何打印说明未安装 sudo ...

  10. hadoop(八)集群namenode启动ssh免密登录(完全分布式五)|10

    前置章节:hadoop集群配置同步(hadoop完全分布式四)|10 启动namenode之前: 1. 先查看有无节点启动,执行jps查看,有的话停掉 [shaozhiqi@hadoop102 ~]$ ...

随机推荐

  1. svn使用规范、在Windows下使用svn命令行工具、svn命令行的解释

    以前在公司一直使用git,现在公司有用svn,一时间还真的不知道如何下手,在网上搜寻了很多大神和官网文档的指导,总结了下面一份教程,希望能够帮助大家快速上手,如果想更细致的了解相关内容,可以点击每个小 ...

  2. Category基本概念

    1.什么是Category Category有很多种翻译: 分类 \ 类别 \ 类目 (一般叫分类) Category是OC特有的语法, 其他语言没有的语法 Category的作用 可以在不修改原来类 ...

  3. 1Appium Desktop 的简单应用

    由于Appium Desktop出来了,所以使用appium要比以前简单许多,现在根据以前的文章针对Appium Desktop做下修改更新 之前文章链接:https://testerhome.com ...

  4. 在VMware上安装Linux虚拟机

    1.新建虚拟机 2.选择典型安装 3.点击稍后安装操作系统 4.选择类型和版本 5.选择一个英文路径 6. 7.调整硬件 8. 9. 10.选择第一项 11.选择中文 12.选择最小安装 13. 14 ...

  5. 在win10上安装face_recognition(人脸识别)

    github上有个项目face_recognition,是用于人脸识别的 主要是window上安装这个项目会繁琐些,linux上据项目文档上介绍是妥妥的. 项目地址:  https://github. ...

  6. Java中的Unsafe在安全领域的一些应用总结和复现

    目录 0 前言 1 基本使用 1.1 内存级别修改值 1.2 创建对象 1.3 创建VM Anonymous Class 2 利用姿势 2.1 修改值以关闭RASP等防御措施 2.2 创建Native ...

  7. 移动BI应该怎么规划?每一个数据产品经理必看

    在移动化.大数据浪潮的今天,基于数据做决策应该是每一家公司的标配:每家公司都有专门负责数据的人,也都应该有一个BI部门. 而移动BI,基于手机端随时随地进行数据查询和分析--更是BI中不可或缺的一部分 ...

  8. 现在的BI软件是不是很贵?

    目前一个企业光有现在狭义的拖拉拽自助 BI 够用吗?那明显是不够的!那么企业应该需要什么样的BI系统? 一个很多企业真正需要的 BI 解决方案一般有一下几类: 1.数据呈现 这是最关键也最基本的功能, ...

  9. .Net Core之选项模式Options使用

    一.简要阐述 ASP.NET Core引入了Options模式,使用类来表示相关的设置组.简单的来说,就是用强类型的类来表达配置项,这带来了很多好处.利用了系统的依赖注入,并且还可以利用配置系统.它使 ...

  10. 【C#表达式树 一】Expressions 命名空间 38个类 2个接口 3个枚举

    注解 抽象类 Expression 提供用于为表达式树建模的类层次结构的根. 此命名空间中派生自的类 Expression (例如 MemberExpression 和 ParameterExpres ...