1、安装Docker

在21、22、200三台机器上安装Docker。安装命令:

在21、22、200三台主机上部署Docker。

~]# curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun

1.1 配置Docker

/etc/docker/daemon.json

{
"graph": "/data/docker",
"storage-driver": "overlay2",
"insecure-registries": ["registry.access.redhat.com","quay.io","harbor.od.com"],
"registry-mirrors": ["https://q2gr04ke.mirror.aliyuncs.com"],
"bip": "172.7.21.1/24",
"exec-opts": ["native.cgroupdriver=systemd"],
"live-restore": true
}

说明:

exec-opts:CPU/MEM的资源管理方式

registry-mirrors:镜像源

insecure-registries:信任的HTTP镜像仓库

bip根据不同的主机修改:

21:172.7.21.1/24

22:172.7.22.1/24

200: 172.7.200.1/24

创建目录

[root@hdss7-21 ~]# mkdir -pv /data/docker
mkdir: created directory ‘/data’
mkdir: created directory ‘/data/docker’
[root@hdss7-21 ~]#

1.2 启动docker

~]# systemctl enable docker
~]# systemctl start docker
~]# systemctl status docker -l
~]# docker info
~]# docker version

2 部署kube-apiserver集群

2.1 集群规划

主机名 角色 IP
CFZX55-21.host.com kube-apiserver 10.211.55.21
CFZX55-22.host.com kube-apiserver 10.211.55.22
CFZX55-11.host.com 4层负载均衡 Nginx+Keepalived 10.211.55.11
CFZX55-12.host.com 4层负载均衡 Nginx+Keepalived 10.211.55.12

注意:这里10.211.55.11和10.211.55.12使用nginx做4层负载均衡,用keepalived跑一个vip:10.211.55.10,代理两个kube-apiserver,实现高可用。

2.2 下载软件、解压、做软链接

在21主机上操作

本例使用1.23.4版本。

[root@cfzx55-21 src]# tar xf kubernetes-server-linux-amd64.tar.gz -C /opt/
[root@cfzx55-21 src]# cd ..
[root@cfzx55-21 kubernetes]# rm kubernetes-src.tar.gz -f
[root@cfzx55-21 kubernetes]# rm -rf LICENSES/
[root@cfzx55-21 kubernetes]# rm -rf addons/
[root@cfzx55-21 kubernetes]# cd server/
[root@cfzx55-21 server]# mv bin/ ../
[root@cfzx55-21 server]# cd ..
[root@cfzx55-21 kubernetes]# rm -rf server/
[root@cfzx55-21 kubernetes]# cd bin/
[root@cfzx55-21 bin]# rm *.tar -f
[root@cfzx55-21 bin]# rm *_tag -f
[root@cfzx55-21 opt]# vim /etc/profile
export PATH=$PATH:/opt/etcd:/opt/kubernetes/bin
[root@cfzx55-21 opt]# source /etc/profile

2.3 签发kube-apiserver证书

在200主机上操作

/opt/certs/kube-apiserver-csr.json

{
"CN": "kube-apiserver",
"hosts": [
"127.0.0.1",
"192.168.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local",
"10.211.55.10",
"10.211.55.21",
"10.211.55.22",
"10.211.55.23"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "system:masters",
"OU": "system"
}
]
}

说明:

  • CN:K8S会提取CN字段的值作为用户名,实际是指K8S的"RoleBinding/ClusterRoleBinding"资源中,“subjects.kind”的值为“User",
  • hosts:包括所有Master节点的IP地址,LB节点、LB集群节点、ClusterIP的首个IP,K8S的“ClusterIP”的范围在“--service-cluster-ip-range”中指定,取值为192.168.0.0/16,此处配置为192.168.0.1
  • names
    • C:CN
    • ST:
    • L:
    • O:“system:masters”,定义“O”值的原因:apiserver向kubelet发起请求时,将复用此证书,参看官方文档。K8S默认会提取“O”字段的值作为组,这实际是指K8S的“RoleBinding/ClusterRoleBinding”资源中"subjects.kind"的值为“Group”,

生成证书

[root@cfzx55-200 certs]# cfssl gencert \
> -ca=ca.pem \
> -ca-key=ca-key.pem \
> -config=ca-config.json \
> -profile=kubernetes \
> kube-apiserver-csr.json | cfssl-json -bare kube-apiserver
2022/03/12 21:28:43 [INFO] generate received request
2022/03/12 21:28:43 [INFO] received CSR
2022/03/12 21:28:43 [INFO] generating key: rsa-2048
2022/03/12 21:28:43 [INFO] encoded CSR
2022/03/12 21:28:43 [INFO] signed certificate with serial number 218531774642654852589087643914770351081106577228
[root@cfzx55-200 certs]# ll kube-apiserver*
-rw-r--r-- 1 root root 636 Mar 12 21:27 kube-apiserver-csr.json
-rw------- 1 root root 1679 Mar 12 21:28 kube-apiserver-key.pem
-rw-r--r-- 1 root root 1289 Mar 12 21:28 kube-apiserver.csr
-rw-r--r-- 1 root root 1655 Mar 12 21:28 kube-apiserver.pem
[root@cfzx55-200 certs]#

2.4 拷贝证书至各运算节点

在21主机上操作,把6张证书和密钥从200主机上拷贝到certs目录下

[root@cfzx55-21 certs]# pwd
/opt/kubernetes/certs
[root@cfzx55-21 certs]# ll
total 24
-rw------- 1 root root 1679 Mar 12 21:32 ca-key.pem
-rw-r--r-- 1 root root 1310 Mar 12 21:32 ca.pem
-rw------- 1 root root 1675 Mar 12 21:32 etcd-key.pem
-rw-r--r-- 1 root root 1448 Mar 12 21:32 etcd.pem
-rw------- 1 root root 1679 Mar 12 21:32 kube-apiserver-key.pem
-rw-r--r-- 1 root root 1655 Mar 12 21:32 kube-apiserver.pem
[root@cfzx55-21 certs]#

2.5 生成token.csv文件

该文件的作用是,在工作节点(kubelet)加入K8S集群的过程中,向kube-apiserver申请签发证书。

/opt/kubernetes/bin/certs/kube-apiserver.token.csv

[root@cfzx55-21 certs]# head -c 16 /dev/urandom | od -An -t x | tr -d " "
cceb7b589306a60ab6afe922f1f32d50
[root@cfzx55-21 certs]# echo cceb7b589306a60ab6afe922f1f32d50,kubelet-bootstrap,10001,"system:kubelet-bootstrap" > kube-apiserver.token.csv
[root@cfzx55-21 certs]# cat kube-apiserver.token.csv
cceb7b589306a60ab6afe922f1f32d50,kubelet-bootstrap,10001,system:kubelet-bootstrap
[root@cfzx55-21 certs]#

2.6 创建启动脚本

在21主机上操作

创建启动脚本

/opt/kubernetes/bin/kube-apiserver-startup.sh

#!/bin/bash
./kube-apiserver \
--runtime-config=api/all=true \
--anonymous-auth=false \
--bind-address=0.0.0.0 \
--advertise-address=10.211.55.21 \
--secure-port=6443 \
--tls-cert-file=./certs/kube-apiserver.pem \
--tls-private-key-file=./certs/kube-apiserver-key.pem \
--client-ca-file=./certs/ca.pem \
--etcd-cafile=./certs/ca.pem \
--etcd-certfile=./certs/etcd.pem \
--etcd-keyfile=./certs/etcd-key.pem \
--etcd-servers=https://10.211.55.12:2379,https://10.211.55.21:2379,https://10.211.55.22:2379 \
--kubelet-client-certificate=./certs/kube-apiserver.pem \
--kubelet-client-key=./certs/kube-apiserver-key.pem \
--service-account-key-file=./certs/ca.pem \
--service-account-signing-key-file=./certs/ca-key.pem \
--service-account-issuer=https://kubernetes.default.svc.cluster.local \
--enable-bootstrap-token-auth=true \
--token-auth-file=./certs/kube-apiserver.token.csv \
--allow-privileged=true \
--service-cluster-ip-range=192.168.0.0/16 \
--service-node-port-range=8000-20000 \
--authorization-mode=RBAC,Node \
--enable-aggregator-routing=true \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--v=2 \
--audit-log-path=/data/logs/kubernetes/kube-apiserver/audit-log \
--log-dir=/data/logs/kubernetes/kube-apiserver

2.7 调整权限和目录

[root@cfzx55-21 bin]# chmod +x kube-apiserver-startup.sh
[root@cfzx55-21 bin]# mkdir -pv /data/logs/kubernetes/kube-apiserver

2.8 创建supervisor配置

/etc/supervisord.d/kube-apiserver.ini

[program:kube-apiserver-55-21]
command=/opt/kubernetes/bin/kube-apiserver-startup.sh
numprocs=1
directory=/opt/kubernetes/bin
autostart=true
autorestart=true
startsecs=30
startretries=3
exitcodes=0,2
stopsignal=QUIT
stopwaitsecs=10
user=root
redirect_stderr=true
stdout_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stdout.log
stdout_logfile_maxbytes=64MB
stdout_logfile_backups=5
stdout_capture_maxbytes=1MB
stdout_events_enabled=false

2.9 启动服务并检查

[root@cfzx55-21 bin]# supervisorctl update
[root@cfzx55-21 bin]# supervisorctl status
etcd-server-55-21 RUNNING pid 12536, uptime 2:29:07
kube-apiserver-55-21 RUNNING pid 13122, uptime 0:00:40
[root@cfzx55-21 bin]# netstat -luntp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 0.0.0.0:6443 0.0.0.0:* LISTEN 13123/./kube-apiser
tcp 0 0 10.211.55.21:2379 0.0.0.0:* LISTEN 12537/./etcd
tcp 0 0 127.0.0.1:2379 0.0.0.0:* LISTEN 12537/./etcd
tcp 0 0 10.211.55.21:2380 0.0.0.0:* LISTEN 12537/./etcd
tcp 0 0 10.211.55.21:2381 0.0.0.0:* LISTEN 12537/./etcd
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 912/sshd
udp 0 0 127.0.0.1:323 0.0.0.0:* 700/chronyd
[root@cfzx55-21 bin]#

2.10 安装部署所有节点

安装22节点

# 在21节点上,把kubernetes文件拷贝到22节点
[root@cfzx55-21 ~]# scp -r /opt/kubernetes/ root@cfzx55-22:/opt/
[root@cfzx55-21 ~]# scp /etc/supervisord.d/kube-apiserver.ini root@cfzx55-22:/etc/supervisord.d/
# 在22节点上,创建目录
[root@cfzx55-22 certs]# mkdir -pv /data/logs/kubernetes/kube-apiserver
# 修改 kube-apiserver-startup.sh 中的ip地址
# 修改 /etc/supervisord.d/kube-apiserver.ini 中的名称
# 启动
[root@cfzx55-22 bin]# supervisorctl update
[root@cfzx55-22 bin]# supervisorctl status
etcd-server-55-22 RUNNING pid 12495, uptime 2:37:27
kube-apiserver-55-22 RUNNING pid 12675, uptime 0:00:38
[root@cfzx55-22 bin]# netstat -luntp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 0.0.0.0:6443 0.0.0.0:* LISTEN 12676/./kube-apiser
tcp 0 0 10.211.55.22:2379 0.0.0.0:* LISTEN 12496/./etcd
tcp 0 0 127.0.0.1:2379 0.0.0.0:* LISTEN 12496/./etcd
tcp 0 0 10.211.55.22:2380 0.0.0.0:* LISTEN 12496/./etcd
tcp 0 0 10.211.55.22:2381 0.0.0.0:* LISTEN 12496/./etcd
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 914/sshd
udp 0 0 127.0.0.1:323 0.0.0.0:* 704/chronyd
[root@cfzx55-22 bin]#

检查集群状态

[root@cfzx55-21 ~]# curl --insecure https://10.211.55.21:6443/
{
"kind": "Status",
"apiVersion": "v1",
"metadata": {},
"status": "Failure",
"message": "Unauthorized",
"reason": "Unauthorized",
"code": 401
}[root@cfzx55-21 ~]# curl --insecure https://10.211.55.22:6443/
{
"kind": "Status",
"apiVersion": "v1",
"metadata": {},
"status": "Failure",
"message": "Unauthorized",
"reason": "Unauthorized",
"code": 401
}[root@cfzx55-21 ~]#

至此,kube-apiserver安装完成。

2.11 配置4层反向代理

apiserver监听端口

[root@cfzx55-21 ~]# netstat -luntp | grep kube
tcp 0 0 0.0.0.0:6443 0.0.0.0:* LISTEN 13123/./kube-apiser
[root@cfzx55-22 ~]# netstat -luntp | grep kube
tcp 0 0 0.0.0.0:6443 0.0.0.0:* LISTEN 12676/./kube-apiser

用keepalived跑一个10.211.55.10的vip

用10.211.55.10上的7443,反向代理10.211.55.21和10.211.55.22上的6443端口。

下面的操作在11和12主机上进行。

安装nginx

~]# yum install nginx -y
~]# yum install nginx-mod-stream -y

配置nginx

把下面内容,添加到/etc/nginx/nginx.conf文件最后,也就是并列在http模块的后面。

stream {
upstream kube-apiserver {
server 10.211.55.21:6443 max_fails=3 fail_timeout=30s;
server 10.211.55.22:6443 max_fails=3 fail_timeout=30s;
}
server {
listen 7443;
proxy_connect_timeout 2s;
proxy_timeout 900s;
proxy_pass kube-apiserver;
}
}
[root@cfzx55-11 ~]# nginx -t
nginx: [emerg] unknown directive "stream" in /etc/nginx/nginx.conf:85
nginx: configuration file /etc/nginx/nginx.conf test failed

上面错误提示,是因为没有安装stream模块。

启动nginx

~]# systemctl start nginx
~]# systemctl enable nginx
~]# systemctl status nginx

检查状态

[root@cfzx55-11 ~]# netstat -luntp | grep nginx
tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 1499/nginx: master
tcp 0 0 0.0.0.0:7443 0.0.0.0:* LISTEN 1499/nginx: master
[root@cfzx55-11 ~]#

安装keepalived

[root@cfzx55-11 ~]# yum install keepalived -y
[root@hdss7-12 ~]# yum install keepalived -y

创建监听脚本

/etc/keepalived/check_port.sh

#!/bin/bash
CHK_PORT=$1
if [ -n "$CHK_PORT" ];then
PORT_PROCESS=`ss -lnt | grep $CHK_PORT | wc -l`
if [ $PORT_PROCESS -eq 0 ];then
echo "Port $CHK_PORT Is Not Used, End."
exit 1
fi
else
echo "Check Port Cant Be Empty!"
fi

增加执行权限

~]# chmod +x /etc/keepalived/check_port.sh

测试脚本

[root@cfzx55-11 ~]# /etc/keepalived/check_port.sh
Check Port Cant Be Empty!
[root@cfzx55-11 ~]# /etc/keepalived/check_port.sh 7443
[root@cfzx55-11 ~]# echo $?
0
[root@cfzx55-11 ~]# /etc/keepalived/check_port.sh 7445
Port 7445 Is Not Used, End.
[root@cfzx55-11 ~]# echo $?
1
[root@cfzx55-11 ~]#

keepalived主配置

在11主机上操作

/etc/keepalived/keepalived.conf

! Configuration File for keepalived

global_defs {
router_id 10.211.55.11
} vrrp_script chk_nginx {
script "/etc/keepalived/check_port.sh 7443"
interval 2
weight -20
} vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 251
priority 100
advert_int 1
mcast_src_ip 10.211.55.11
nopreempt #设置非抢占式,当主服务down,vip漂移到备机,当主机服务up,vip依然在备机上 authentication {
auth_type PASS
auth_pass 11111111
} track_script {
chk_nginx
} virtual_ipaddress {
10.211.55.10
}
}

keepalived从配置

在12主机上操作

/etc/keepalived/keepalived.conf

! Configuration File for keepalived

global_defs {
router_id 10.211.55.12
} vrrp_script chk_nginx {
script "/etc/keepalived/check_port.sh 7443"
interval 2
weight -20
} vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 251
priority 90
advert_int 1
mcast_src_ip 10.211.55.12
! 注意 备机上不能有 nopreempt 配置
authentication {
auth_type PASS
auth_pass 11111111
} track_script {
chk_nginx
} virtual_ipaddress {
10.211.55.10
}
}

2.12 启动代理并检查

在11主机上操作

[root@cfzx55-11 ~]# systemctl enable keepalived
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
[root@cfzx55-11 ~]# systemctl start keepalived
[root@cfzx55-11 ~]# systemctl status keepalived
● keepalived.service - LVS and VRRP High Availability Monitor
Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disabled)
Active: active (running) since Sun 2022-03-13 08:21:53 CST; 6s ago
Process: 1580 ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS (code=exited, status=0/SUCCESS)
Main PID: 1581 (keepalived)
CGroup: /system.slice/keepalived.service
├─1581 /usr/sbin/keepalived -D
├─1582 /usr/sbin/keepalived -D
└─1583 /usr/sbin/keepalived -D Mar 13 08:21:53 cfzx55-11.host.com Keepalived_healthcheckers[1582]: Opening file '/etc/keepalive....
Mar 13 08:21:54 cfzx55-11.host.com Keepalived_vrrp[1583]: VRRP_Instance(VI_1) Transition to MAS...TE
Mar 13 08:21:55 cfzx55-11.host.com Keepalived_vrrp[1583]: VRRP_Instance(VI_1) Entering MASTER STATE
Mar 13 08:21:55 cfzx55-11.host.com Keepalived_vrrp[1583]: VRRP_Instance(VI_1) setting protocol VIPs.
Mar 13 08:21:55 cfzx55-11.host.com Keepalived_vrrp[1583]: Sending gratuitous ARP on eth0 for 10...10
Mar 13 08:21:55 cfzx55-11.host.com Keepalived_vrrp[1583]: VRRP_Instance(VI_1) Sending/queueing ...10
Mar 13 08:21:55 cfzx55-11.host.com Keepalived_vrrp[1583]: Sending gratuitous ARP on eth0 for 10...10
Mar 13 08:21:55 cfzx55-11.host.com Keepalived_vrrp[1583]: Sending gratuitous ARP on eth0 for 10...10
Mar 13 08:21:55 cfzx55-11.host.com Keepalived_vrrp[1583]: Sending gratuitous ARP on eth0 for 10...10
Mar 13 08:21:55 cfzx55-11.host.com Keepalived_vrrp[1583]: Sending gratuitous ARP on eth0 for 10...10
Hint: Some lines were ellipsized, use -l to show in full.
[root@cfzx55-11 ~]#

在12主机上操作

[root@cfzx55-12 ~]# systemctl enable keepalived
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
[root@cfzx55-12 ~]# systemctl start keepalived
[root@cfzx55-12 ~]# systemctl status keepalived
● keepalived.service - LVS and VRRP High Availability Monitor
Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disabled)
Active: active (running) since Sun 2022-03-13 08:22:29 CST; 7s ago
Process: 1538 ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS (code=exited, status=0/SUCCESS)
Main PID: 1539 (keepalived)
CGroup: /system.slice/keepalived.service
├─1539 /usr/sbin/keepalived -D
├─1540 /usr/sbin/keepalived -D
└─1541 /usr/sbin/keepalived -D Mar 13 08:22:29 cfzx55-12.host.com Keepalived_vrrp[1541]: Registering gratuitous ARP shared channel
Mar 13 08:22:29 cfzx55-12.host.com Keepalived_vrrp[1541]: Opening file '/etc/keepalived/keepali...'.
Mar 13 08:22:29 cfzx55-12.host.com Keepalived_vrrp[1541]: WARNING - default user 'keepalived_sc...e.
Mar 13 08:22:29 cfzx55-12.host.com Keepalived_vrrp[1541]: Unable to access script `/etc/keepali...h`
Mar 13 08:22:29 cfzx55-12.host.com Keepalived_vrrp[1541]: Disabling track script chk_nginx sinc...nd
Mar 13 08:22:29 cfzx55-12.host.com Keepalived_vrrp[1541]: VRRP_Instance(VI_1) removing protocol...s.
Mar 13 08:22:29 cfzx55-12.host.com Keepalived_vrrp[1541]: Using LinkWatch kernel netlink reflec.....
Mar 13 08:22:29 cfzx55-12.host.com Keepalived_vrrp[1541]: VRRP_Instance(VI_1) Entering BACKUP STATE
Mar 13 08:22:29 cfzx55-12.host.com Keepalived_vrrp[1541]: VRRP sockpool: [ifindex(2), proto(112...)]
Mar 13 08:22:29 cfzx55-12.host.com Keepalived_healthcheckers[1540]: Opening file '/etc/keepalive....
Hint: Some lines were ellipsized, use -l to show in full.
[root@cfzx55-12 ~]#

在11上用ip addr命令,能看到VIP

[root@cfzx55-11 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc
link/ether 00:1c:42:76:65:e1 brd ff:ff:ff:ff:ff:ff
inet 10.211.55.11/24 brd 10.211.55.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet 10.211.55.10/32 scope global eth0
valid_lft forever preferred_lft forever
[root@cfzx55-11 ~]#

Nginx+Keepalived高可用测试

在11主机上,停止nginx,vip不存在了。

[root@cfzx55-11 ~]# systemctl stop nginx
[root@cfzx55-11 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc
link/ether 00:1c:42:76:65:e1 brd ff:ff:ff:ff:ff:ff
inet 10.211.55.11/24 brd 10.211.55.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
[root@cfzx55-11 ~]#

12主机上查看,vip跑在了12主机上

[root@cfzx55-12 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc
link/ether 00:1c:42:e2:45:7b brd ff:ff:ff:ff:ff:ff
inet 10.211.55.12/24 brd 10.211.55.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet 10.211.55.10/32 scope global eth0
valid_lft forever preferred_lft forever
[root@cfzx55-12 ~]#

重启nginx,vip还在12主机上。

这是因为在11上配置了 nopreempt ,设置非抢占式,当主服务down,vip漂移到备机,当主机服务up,vip依然在备机上。

如果要想vip回到11上,重新启动keepalived。

3 部署kubectl组件

3.1 集群规划

主机名 角色 IP
CFZX55-21.host.com kubectl 10.211.55.21
CFZX55-22.host.com kubectl 10.211.55.22

3.2 签发kubectl证书

在运维主机200上操作

生成kubectl证书请求csr文件

/opt/certs/kubectl-csr.json

{
"CN": "clusteradmin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "system:masters",
"OU": "system"
}
]
}

说明

  • CN:kubectl证书中的CN值没有意义,随便取值
  • O:因为希望使用kubectl时有完整的集群操作权限,所以取值为“system:masters”,K8S默认会提取O字段的值作为组,这实际是指K8S里“RoleBinding/ClusterRoleBinding”资源中"subjects.kind"的值为“Group”
  • 后续 kube-apiserver 使用 RBAC 对客户端(如 kubelet、kube-proxy、Pod)请求进行授权;

    kube-apiserver 预定义了一些 RBAC 使用的 RoleBindings,如 cluster-admin 将 Group system:masters 与 Role cluster-admin 绑定,该 Role 授予了调用kube-apiserver 的所有 API的权限;
  • O指定该证书的 Group 为 system:masters,kubelet 使用该证书访问 kube-apiserver 时 ,由于证书被 CA 签名,所以认证通过,同时由于证书用户组为经过预授权的 system:masters,所以被授予访问所有 API 的权限;
  • 这个证书,是将来生成管理员用的kube config 配置文件用的,现在我们一般建议使用RBAC 来对kubernetes 进行角色权限控制, kubernetes 将证书中的CN 字段 作为User, O 字段作为 Group;

    "O": "system:masters", 必须是system:masters,否则后面kubectl create clusterrolebinding报错。

生成证书

[root@cfzx55-200 certs]# cfssl gencert \
> -ca=ca.pem \
> -ca-key=ca-key.pem \
> -config=ca-config.json \
> -profile=kubernetes \
> kubectl-csr.json | cfssl-json -bare kubectl
2022/03/13 08:48:36 [INFO] generate received request
2022/03/13 08:48:36 [INFO] received CSR
2022/03/13 08:48:36 [INFO] generating key: rsa-2048
2022/03/13 08:48:36 [INFO] encoded CSR
2022/03/13 08:48:36 [INFO] signed certificate with serial number 629903670193912591906490478447930251557864868755
2022/03/13 08:48:36 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@cfzx55-200 certs]# ll kubectl*
-rw-r--r-- 1 root root 1017 Mar 13 08:48 kubectl.csr
-rw-r--r-- 1 root root 306 Mar 13 08:44 kubectl-csr.json
-rw------- 1 root root 1679 Mar 13 08:48 kubectl-key.pem
-rw-r--r-- 1 root root 1411 Mar 13 08:48 kubectl.pem
[root@cfzx55-200 certs]#

3.3 把证书拷贝到21和22主机上

[root@cfzx55-200 certs]# scp kubectl*.pem root@cfzx55-21:/opt/kubernetes/bin/certs/
[root@cfzx55-200 certs]# scp kubectl*.pem root@cfzx55-22:/opt/kubernetes/bin/certs/

3.4 生成kubeconfig配置文件

生成kubectl组件的kubectl.kubeconfig配置文件,该文件包含访问kube-apiseerver的所有信息,如kube-apiserver的地址,CA证书和自身使用的证书。

有了这个文件,便可以在任何机器上以超级管理员身份对K8S集群做任何操作,请务必保证此文件的安全性。

kubectl命令默认使用的配置文件为:~/.kube.config

以下在21主机上操作,完成后把生成的文件拷贝到其余Master节点,本例是22主机。

生成创建配置文件的脚本

#!/bin/bash
KUBE_CONFIG="/root/.kube/config"
KUBE_APISERVER="https://10.211.55.10:7443" kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/bin/certs/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${KUBE_CONFIG} kubectl config set-credentials clusteradmin \
--client-certificate=/opt/kubernetes/bin/certs/kubectl.pem \
--client-key=/opt/kubernetes/bin/certs/kubectl-key.pem \
--embed-certs=true \
--kubeconfig=${KUBE_CONFIG} kubectl config set-context default \
--cluster=kubernetes \
--user=clusteradmin \
--kubeconfig=${KUBE_CONFIG} kubectl config use-context default --kubeconfig=${KUBE_CONFIG}

说明:

  • 集群名称:描述集群信息的标记,没有实际意义。
  • certificate-authority:K8S集群的根CA证书
  • server:指向kube-apiserrver负载均衡器VIP的地址
  • kubeconfig:生成的kubeconfig文件
  • 用户名称:clusteradmin为定义一个用户,在kubeconfig配置文件中,这个用户用于关联一组证书,这个证书对K8S集群来说无实际意义,真正重要的是证书中的O字段与CN字段的定义。
  • client-certificate:客户端证书
  • client-key:客户端私钥
  • 上下文:default用于将kubeconfig配置文件“clusteradmin”和“kubernetes”作关联。
  • cluster:set-cluster命令配置的集群名称。
  • cluster:set-credentials命令配置的用户名称。

执行脚本

[root@cfzx55-21 ~]# mkdir ~/.kube
[root@cfzx55-21 ~]# mkdir k8s-shell
[root@cfzx55-21 ~]# cd k8s-shell/
[root@cfzx55-21 k8s-shell]# vim kubectl-config.sh
[root@cfzx55-21 k8s-shell]# chmod +x kubectl-config.sh
[root@cfzx55-21 k8s-shell]# ./kubectl-config.sh
Cluster "kubernetes" set.
User "clusteradmin" set.
Context "default" created.
Switched to context "default".
[root@cfzx55-21 k8s-shell]#

查看集群状态

[root@cfzx55-21 k8s-shell]# kubectl cluster-info
Kubernetes control plane is running at https://10.211.55.10:7443 To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
[root@cfzx55-21 k8s-shell]# kubectl get componentstatuses
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
controller-manager Unhealthy Get "https://127.0.0.1:10257/healthz": dial tcp 127.0.0.1:10257: connect: connection refused
scheduler Unhealthy Get "https://127.0.0.1:10259/healthz": dial tcp 127.0.0.1:10259: connect: connection refused
etcd-1 Healthy {"health":"true","reason":""}
etcd-2 Healthy {"health":"true","reason":""}
etcd-0 Healthy {"health":"true","reason":""}
[root@cfzx55-21 k8s-shell]# kubectl get all -A
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default service/kubernetes ClusterIP 192.168.0.1 <none> 443/TCP 10h
[root@cfzx55-21 k8s-shell]#

把21主机上生成的kubeconfig文件拷贝到22节点

[root@cfzx55-22 ~]# mkdir ~/.kube
[root@cfzx55-22 ~]# scp root@cfzx55-21:/root/.kube/config ~/.kube/
[root@cfzx55-22 ~]# ll .kube/
total 8
-rw------- 1 root root 6224 Mar 13 09:42 config
[root@cfzx55-22 ~]#

在22上查看集群状态

[root@cfzx55-22 ~]# kubectl cluster-info
Kubernetes control plane is running at https://10.211.55.10:7443 To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
[root@cfzx55-22 ~]# kubectl get componentstatuses
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
controller-manager Unhealthy Get "https://127.0.0.1:10257/healthz": dial tcp 127.0.0.1:10257: connect: connection refused
scheduler Unhealthy Get "https://127.0.0.1:10259/healthz": dial tcp 127.0.0.1:10259: connect: connection refused
etcd-2 Healthy {"health":"true","reason":""}
etcd-1 Healthy {"health":"true","reason":""}
etcd-0 Healthy {"health":"true","reason":""}
[root@cfzx55-22 ~]# kubectl get all -A
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default service/kubernetes ClusterIP 192.168.0.1 <none> 443/TCP 11h
[root@cfzx55-22 ~]#

至此,kubectl主机部署完成。

4 部署controller-manager

4.1集群规划

主机名 角色 IP
CFZX55-21.host.com controller-manager 10.211.55.21
CFZX55-22.host.com controller-manager 10.211.55.22

4.2 生成kube-controller-manager证书

在运维主机200上操作。

生成证书请求文件

/opt/certs/kube-controller-manager-csr.json

{
"CN": "system:kube-controller-manager",
"hosts": [
"127.0.0.1",
"10.211.55.11",
"10.211.55.12",
"10.211.55.21",
"10.211.55.22",
"10.211.55.23"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "system:masters",
"OU": "system"
}
]
}

说明:

  • CN:这里的CN值非常重要,kube-controller-manager能否正常与kubee-apiserver通信与此值有关,K8S默认会提取CN字段的值作为用户名,这实际是指K8S的“RoleBinding/ClusterRoleBinding”资源中“subjects:kind”的值为“User”
  • hosts:kube-controller-manager运行节点的IP地址。
  • O:无实际意义。
  • OU:无实际意义。
  • hosts 列表包含所有 kube-controller-manager 节点 IP;
  • CN 为 system:kube-controller-manager、O 为 system:kube-controller-manager,kubernetes 内置的 ClusterRoleBindings system:kube-controller-manager 赋予 kube-controller-manager 工作所需的权限

生成证书

[root@cfzx55-200 certs]# cfssl gencert \
> -ca=ca.pem \
> -ca-key=ca-key.pem \
> -config=ca-config.json \
> -profile=kubernetes \
> kube-controller-manager-csr.json | cfssl-json -bare kube-controller-manager
2022/03/13 10:35:55 [INFO] generate received request
2022/03/13 10:35:55 [INFO] received CSR
2022/03/13 10:35:55 [INFO] generating key: rsa-2048
2022/03/13 10:35:55 [INFO] encoded CSR
2022/03/13 10:35:55 [INFO] signed certificate with serial number 386505557530275475753178134460007976778023939766
[root@cfzx55-200 certs]# ll kube-controller*.pem
-rw------- 1 root root 1679 Mar 13 10:35 kube-controller-manager-key.pem
-rw-r--r-- 1 root root 1501 Mar 13 10:35 kube-controller-manager.pem
[root@cfzx55-200 certs]#

把证书拷贝到21和22主机上

[root@cfzx55-200 certs]# scp kube-controller-manager*.pem root@cfzx55-21:/opt/kubernetes/bin/certs/
[root@cfzx55-200 certs]# scp kube-controller-manager*.pem root@cfzx55-22:/opt/kubernetes/bin/certs/

4.3 生成kube-controller-manager的kubeconfig配置文件

配置文件路径:/opt/kubernetes/cfg/

编写生成kubeconfig配置文件的脚本

#!/bin/bash
KUBE_CONFIG="/opt/kubernetes/cfg/kube-controller-manager.kubeconfig"
KUBE_APISERVER="https://10.211.55.10:7443" kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/bin/certs/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${KUBE_CONFIG} kubectl config set-credentials kube-controller-manager \
--client-certificate=/opt/kubernetes/bin/certs/kube-controller-manager.pem \
--client-key=/opt/kubernetes/bin/certs/kube-controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=${KUBE_CONFIG} kubectl config set-context default \
--cluster=kubernetes \
--user=kube-controller-manager \
--kubeconfig=${KUBE_CONFIG} kubectl config use-context default --kubeconfig=${KUBE_CONFIG}

生成配置文件

[root@cfzx55-21 k8s-shell]# vim kube-controller-manager-config.sh
[root@cfzx55-21 k8s-shell]# chmod +x kube-controller-manager-config.sh
[root@cfzx55-21 k8s-shell]# ./kube-controller-manager-config.sh
Cluster "kubernetes" set.
User "kube-controller-manager" set.
Context "default" created.
Switched to context "default".
[root@cfzx55-21 k8s-shell]#

把生成的配置文件拷贝到22主机上。

[root@cfzx55-21 ~]# scp -r /opt/kubernetes/cfg/ root@cfzx55-22:/opt/kubernetes/
root@cfzx55-22's password:
kube-controller-manager.kubeconfig 100% 6366 2.6MB/s 00:00
[root@cfzx55-21 ~]#

在22主机上查看

[root@cfzx55-22 ~]# ll /opt/kubernetes/cfg/
total 8
-rw------- 1 root root 6366 Mar 13 10:49 kube-controller-manager.kubeconfig
[root@cfzx55-22 ~]#

4.4 创建启动脚本

在21主机上操作

/opt/kubernetes/bin/kube-controller-manager-startup.sh

#!/bin/sh
./kube-controller-manager \
--cluster-name=kubernetes \
--bind-address=127.0.0.1 \
--service-cluster-ip-range=192.168.0.0/16 \
--leader-elect=true \
--controllers=*,bootstrapsigner,tokencleaner \
--kubeconfig=/opt/kubernetes/cfg/kube-controller-manager.kubeconfig \
--tls-cert-file=./certs/kube-controller-manager.pem \
--tls-private-key-file=./certs/kube-controller-manager-key.pem \
--cluster-signing-cert-file=./certs/ca.pem \
--cluster-signing-key-file=./certs/ca-key.pem \
--cluster-signing-duration=175200h0m0s \
--use-service-account-credentials=true \
--root-ca-file=./certs/ca.pem \
--service-account-private-key-file=./certs/ca-key.pem \
--log-dir=/data/logs/kubernetes/kube-controller-manager \
--v=2

说明:

--secure-port=10252  这个参数去掉,状态才能正常。
--cluster-cidr string
CIDR Range for Pods in cluster. Requires --allocate-node-cidrs to be true
本例中,allocate-node-cidrs和cluster-cidr两个参数不配置,使用docker的bip。

创建脚本,调整权限

[root@cfzx55-21 bin]# vim kube-controller-manager-startup.sh
[root@cfzx55-21 bin]# chmod +x kube-controller-manager-startup.sh
[root@cfzx55-21 bin]#

创建目录

[root@cfzx55-21 ~]# mkdir -p /data/logs/kubernetes/kube-controller-manager

4.5 创建supervisor配置文件

/etc/supervisord.d/kube-controller-manager.ini

[program:kube-controller-manager-55-21]
command=/opt/kubernetes/bin/kube-controller-manager-startup.sh
numprocs=1
directory=/opt/kubernetes/bin
autostart=true
autorestart=true
startsecs=30
startretries=3
exitcodes=0,2
stopsignal=QUIT
stopwaitsecs=10
user=root
redirect_stderr=true
stdout_logfile=/data/logs/kubernetes/kube-controller-manager/controller.stdout.log
stdout_logfile_maxbytes=64MB
stdout_logfile_backups=4
stdout_capture_maxbytes=1MB
stdout_events_enabled=false

4.6 启动supervisor

[root@cfzx55-21 bin]# supervisorctl start kube-controller-manager-55-21
kube-controller-manager-55-21: started
[root@cfzx55-21 bin]# supervisorctl status
etcd-server-55-21 RUNNING pid 1033, uptime 4:21:51
kube-apiserver-55-21 RUNNING pid 1034, uptime 4:21:51
kube-controller-manager-55-21 RUNNING pid 3330, uptime 0:00:37
[root@cfzx55-21 bin]#
[root@cfzx55-21 bin]# netstat -luntp | grep kube
tcp 0 0 0.0.0.0:6443 0.0.0.0:* LISTEN 1044/./kube-apiserv
tcp 0 0 127.0.0.1:10252 0.0.0.0:* LISTEN 3331/./kube-control
[root@cfzx55-21 bin]#

4.7 把启动脚本、supervisor配置文件拷贝到22主机。

[root@cfzx55-21 bin]# scp kube-controller-manager-startup.sh root@cfzx55-22:/opt/kubernetes/bin/
root@cfzx55-22's password:
kube-controller-manager-startup.sh 100% 778 489.1KB/s 00:00
[root@cfzx55-21 bin]# scp /etc/supervisord.d/kube-controller-manager.ini root@cfzx55-22:/etc/supervisord.d/
root@cfzx55-22's password:
kube-controller-manager.ini 100% 474 326.8KB/s 00:00
[root@cfzx55-21 bin]#

4.8 在22主机上启动服务

# 修改程序名称
[root@cfzx55-22 ~]# vim /etc/supervisord.d/kube-controller-manager.ini
[root@cfzx55-22 ~]# mkdir -p /data/logs/kubernetes/kube-controller-manager
[root@cfzx55-22 ~]# supervisorctl update
kube-controller-manager-55-21: added process group
[root@cfzx55-22 ~]# supervisorctl status
etcd-server-55-22 RUNNING pid 1013, uptime 4:27:39
kube-apiserver-55-22 RUNNING pid 1012, uptime 4:27:39
kube-controller-manager-55-21 RUNNING pid 3099, uptime 0:00:34
[root@cfzx55-22 ~]# netstat -luntp | grep kube
tcp 0 0 0.0.0.0:6443 0.0.0.0:* LISTEN 1014/./kube-apiserv
tcp 0 0 127.0.0.1:10252 0.0.0.0:* LISTEN 3100/./kube-control
[root@cfzx55-22 ~]#

5 部署kube-scheduler

5.1 集群规划

主机名 角色 IP
CFZX55-21.host.com kube-scheduler 10.211.55.21
CFZX55-22.host.com kube-scheduler 10.211.55.22

5.2 生成kube-scheduler证书

创建证书请求csr文件

/opt/certs/kube-scheduler-csr.json

{
"CN": "system:kube-scheduler",
"hosts": [
"127.0.0.1",
"10.211.55.11",
"10.211.55.12",
"10.211.55.21",
"10.211.55.22",
"10.211.55.23"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "system:masters",
"OU": "system"
}
]
}

生成证书

[root@cfzx55-200 certs]# cfssl gencert \
> -ca=ca.pem \
> -ca-key=ca-key.pem \
> -config=ca-config.json \
> -profile=kubernetes \
> kube-scheduler-csr.json | cfssl-json -bare kube-scheduler
2022/03/13 12:30:21 [INFO] generate received request
2022/03/13 12:30:21 [INFO] received CSR
2022/03/13 12:30:21 [INFO] generating key: rsa-2048
2022/03/13 12:30:21 [INFO] encoded CSR
2022/03/13 12:30:21 [INFO] signed certificate with serial number 78101929142938232987965103781662806513424359272
[root@cfzx55-200 certs]# ll kube-scheduler*.pem
-rw------- 1 root root 1679 Mar 13 12:30 kube-scheduler-key.pem
-rw-r--r-- 1 root root 1489 Mar 13 12:30 kube-scheduler.pem
[root@cfzx55-200 certs]#

5.3 把证书拷贝到21和22节点。

[root@cfzx55-200 certs]# scp kube-scheduler*.pem root@cfzx55-21:/opt/kubernetes/bin/certs/
root@cfzx55-21's password:
kube-scheduler-key.pem 100% 1679 957.6KB/s 00:00
kube-scheduler.pem 100% 1489 953.3KB/s 00:00
[root@cfzx55-200 certs]# scp kube-scheduler*.pem root@cfzx55-22:/opt/kubernetes/bin/certs/
root@cfzx55-22's password:
kube-scheduler-key.pem 100% 1679 640.6KB/s 00:00
kube-scheduler.pem 100% 1489 794.6KB/s 00:00
[root@cfzx55-200 certs]#

5.4 生成kubeconfig配置文件

#!/bin/bash
KUBE_CONFIG="/opt/kubernetes/cfg/kube-scheduler.kubeconfig"
KUBE_APISERVER="https://10.211.55.10:7443" kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/bin/certs/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${KUBE_CONFIG} kubectl config set-credentials kube-scheduler \
--client-certificate=/opt/kubernetes/bin/certs/kube-scheduler.pem \
--client-key=/opt/kubernetes/bin/certs/kube-scheduler-key.pem \
--embed-certs=true \
--kubeconfig=${KUBE_CONFIG} kubectl config set-context default \
--cluster=kubernetes \
--user=kube-scheduler \
--kubeconfig=${KUBE_CONFIG} kubectl config use-context default --kubeconfig=${KUBE_CONFIG}

执行脚本

[root@cfzx55-21 k8s-shell]# vim kube-scheduler-config.sh
[root@cfzx55-21 k8s-shell]# chmod +x kube-scheduler-config.sh
[root@cfzx55-21 k8s-shell]# ./kube-scheduler-config.sh
Cluster "kubernetes" set.
User "kube-scheduler" set.
Context "default" created.
Switched to context "default".
[root@cfzx55-21 k8s-shell]#

把kubeconfig 文件拷贝到22主机

[root@cfzx55-21 k8s-shell]# scp /opt/kubernetes/cfg/kube-scheduler.kubeconfig root@cfzx55-22:/opt/kubernetes/cfg/
root@cfzx55-22's password:
kube-scheduler.kubeconfig 100% 6332 2.6MB/s 00:00
[root@cfzx55-21 k8s-shell]#

5.5 创建kube-scheduler启动脚本

/opt/kubernetes/bin/kube-scheduler-startup.sh

#!/bin/sh
./kube-scheduler \
--address=127.0.0.1 \
--leader-elect=true \
--kubeconfig=/opt/kubernetes/cfg/kube-scheduler.kubeconfig \
--log-dir=/data/logs/kubernetes/kube-scheduler \
--v=2

创建脚本,调整权限

[root@cfzx55-21 bin]# vim kube-scheduler-startup.sh
[root@cfzx55-21 bin]# chmod +x kube-scheduler-startup.sh
[root@cfzx55-21 ~]# mkdir -p /data/logs/kubernetes/kube-scheduler

5.6 创建supervisor配置文件

/etc/supervisord.d/kube-scheduler.ini

[program:kube-scheduler-55-21]
command=/opt/kubernetes/bin/kube-scheduler-startup.sh
numprocs=1
directory=/opt/kubernetes/bin
autostart=true
autorestart=true
startsecs=30
startretries=3
exitcodes=0,2
stopsignal=QUIT
stopwaitsecs=10
user=root
redirect_stderr=true
stdout_logfile=/data/logs/kubernetes/kube-scheduler/scheduler.stdout.log
stdout_logfile_maxbytes=64MB
stdout_logfile_backups=4
stdout_capture_maxbytes=1MB
stdout_events_enabled=false

5.7 启动kube-scheduler服务

[root@cfzx55-21 bin]# supervisorctl update
[root@cfzx55-21 bin]# supervisorctl status
etcd-server-55-21 RUNNING pid 1033, uptime 5:16:26
kube-apiserver-55-21 RUNNING pid 1034, uptime 5:16:26
kube-controller-manager-55-21 RUNNING pid 3416, uptime 0:38:46
kube-scheduler-55-21 RUNNING pid 3486, uptime 0:00:32
[root@cfzx55-21 bin]# netstat -luntp | grep kube
tcp 0 0 0.0.0.0:10259 0.0.0.0:* LISTEN 3487/./kube-schedul
tcp 0 0 0.0.0.0:6443 0.0.0.0:* LISTEN 1044/./kube-apiserv
tcp 0 0 127.0.0.1:10252 0.0.0.0:* LISTEN 3417/./kube-control
[root@cfzx55-21 bin]#

5.8 把kube-scheduler启动脚本和supervisor配置文件拷贝到22主机

[root@cfzx55-21 bin]# scp kube-scheduler-startup.sh root@cfzx55-22:/opt/kubernetes/bin/
root@cfzx55-22's password: kube-scheduler-startup.sh 100% 199 100.3KB/s 00:00
[root@cfzx55-21 bin]#
[root@cfzx55-21 bin]# scp /etc/supervisord.d/kube-scheduler.ini root@cfzx55-22:/etc/supervisord.d/
root@cfzx55-22's password:
kube-scheduler.ini 100% 446 329.4KB/s 00:00
[root@cfzx55-21 bin]#

5.9 在22主机上启动kube-scheduler服务

# 修改名称
[root@cfzx55-22 ~]# vim /etc/supervisord.d/kube-scheduler.ini
[root@cfzx55-22 ~]# supervisorctl update
kube-controller-manager-55-21: stopped
kube-controller-manager-55-21: removed process group
kube-controller-manager-55-22: added process group
kube-scheduler-55-22: added process group
[root@cfzx55-22 ~]#
[root@cfzx55-22 ~]# supervisorctl status
etcd-server-55-22 RUNNING pid 1013, uptime 5:25:59
kube-apiserver-55-22 RUNNING pid 1012, uptime 5:25:59
kube-controller-manager-55-22 RUNNING pid 3234, uptime 0:00:32
kube-scheduler-55-22 RUNNING pid 3187, uptime 0:03:19
[root@cfzx55-22 ~]# netstat -luntp | grep kube
tcp 0 0 0.0.0.0:6443 0.0.0.0:* LISTEN 1014/./kube-apiserv
tcp 0 0 127.0.0.1:10252 0.0.0.0:* LISTEN 3235/./kube-control
tcp 0 0 0.0.0.0:10259 0.0.0.0:* LISTEN 3189/./kube-schedul
[root@cfzx55-22 ~]#

部署22上的kube-controller-manager时,没有修改名称。

查看集群状态

[root@cfzx55-22 bin]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-2 Healthy {"health":"true","reason":""}
etcd-0 Healthy {"health":"true","reason":""}
etcd-1 Healthy {"health":"true","reason":""}
[root@cfzx55-22 bin]#

查看集群资源

[root@cfzx55-21 ~]# kubectl get sa -A
NAMESPACE NAME SECRETS AGE
default default 1 114m
kube-node-lease default 1 114m
kube-public default 1 114m
kube-system attachdetach-controller 1 116m
kube-system bootstrap-signer 1 114m
kube-system certificate-controller 1 116m
kube-system clusterrole-aggregation-controller 1 114m
kube-system cronjob-controller 1 116m
kube-system daemon-set-controller 1 116m
kube-system default 1 114m
kube-system deployment-controller 1 116m
kube-system disruption-controller 1 116m
kube-system endpoint-controller 1 114m
kube-system endpointslice-controller 1 116m
kube-system endpointslicemirroring-controller 1 116m
kube-system ephemeral-volume-controller 1 116m
kube-system expand-controller 1 114m
kube-system generic-garbage-collector 1 114m
kube-system horizontal-pod-autoscaler 1 116m
kube-system job-controller 1 116m
kube-system namespace-controller 1 116m
kube-system node-controller 1 116m
kube-system persistent-volume-binder 1 114m
kube-system pod-garbage-collector 1 114m
kube-system pv-protection-controller 1 114m
kube-system pvc-protection-controller 1 114m
kube-system replicaset-controller 1 116m
kube-system replication-controller 1 114m
kube-system resourcequota-controller 1 114m
kube-system root-ca-cert-publisher 1 116m
kube-system service-account-controller 1 116m
kube-system service-controller 1 116m
kube-system statefulset-controller 1 116m
kube-system token-cleaner 1 114m
kube-system ttl-after-finished-controller 1 114m
kube-system ttl-controller 1 116m
[root@cfzx55-21 ~]# kubectl get ns -A
NAME STATUS AGE
default Active 15h
kube-node-lease Active 15h
kube-public Active 15h
kube-system Active 15h
[root@cfzx55-21 ~]# kubectl get role -A
NAMESPACE NAME CREATED AT
kube-public system:controller:bootstrap-signer 2022-03-12T14:36:17Z
kube-system extension-apiserver-authentication-reader 2022-03-12T14:36:16Z
kube-system system::leader-locking-kube-controller-manager 2022-03-12T14:36:16Z
kube-system system::leader-locking-kube-scheduler 2022-03-12T14:36:16Z
kube-system system:controller:bootstrap-signer 2022-03-12T14:36:16Z
kube-system system:controller:cloud-provider 2022-03-12T14:36:16Z
kube-system system:controller:token-cleaner 2022-03-12T14:36:16Z
[root@cfzx55-21 ~]#

至此,Master节点部署完成。

二进制部署1.23.4版本k8s集群-5-部署Master节点服务的更多相关文章

  1. 二进制部署1.23.4版本k8s集群-6-部署Node节点服务

    本例中Master节点和Node节点部署在同一台主机上. 1 部署kubelet 1.1 集群规划 主机名 角色 IP CFZX55-21.host.com kubelet 10.211.55.21 ...

  2. 二进制部署1.23.4版本k8s集群-1-系统安装及环境准备

    1. 致谢 这篇文章参考了老男孩王导的视频,在此表示感谢和致敬! 2. 安装CentOS操作系统 系统镜像:CentOS-7-x86_64-DVD-2009.iso 安装过程略. 3. 环境准备 3. ...

  3. 二进制部署1.23.4版本k8s集群-2-安装DNS服务

    2.安装DNS服务 为什么要安装bind9? K8S中,使用Ingress进行7层流量调度,需要使用域名,进行7层调度. 以前使用绑定host的方法,来进行域名和IP地址的解析. 在K8S里,没有好的 ...

  4. 1.还不会部署高可用的kubernetes集群?看我手把手教你使用二进制部署v1.23.6的K8S集群实践(上)

    公众号关注「WeiyiGeek」 设为「特别关注」,每天带你玩转网络安全运维.应用开发.物联网IOT学习! 本章目录: 0x00 前言简述 0x01 环境准备 主机规划 软件版本 网络规划 0x02 ...

  5. k8s集群中部署prometheus server

    1.概述 本文档主要介绍如何在k8s集群中部署prometheus server用来作为监控的数据采集服务器,这样做可以很方便的对k8s集群中的指标.pod的.节点的指标进行采集和监控. 2.下载镜像 ...

  6. 使用Kubeadm创建k8s集群之部署规划(三十)

    前言 上一篇我们讲述了使用Kubectl管理k8s集群,那么接下来,我们将使用kubeadm来启动k8s集群. 部署k8s集群存在一定的挑战,尤其是部署高可用的k8s集群更是颇为复杂(后续会讲).因此 ...

  7. 使用kubeadm部署一套高可用k8s集群

    使用kubeadm部署一套高可用k8s集群 有疑问的地方可以看官方文档 准备环境 我的机器如下, 系统为ubuntu20.04, kubernetes版本1.21.0 hostname IP 硬件配置 ...

  8. Blazor+Dapr+K8s微服务之基于WSL安装K8s集群并部署微服务

         前面文章已经演示过,将我们的示例微服务程序DaprTest1部署到k8s上并运行.当时用的k8s是Docker for desktop 自带的k8s,只要在Docker for deskto ...

  9. K8S集群安装部署

    K8S集群安装部署   参考地址:https://www.cnblogs.com/xkops/p/6169034.html 1. 确保系统已经安装epel-release源 # yum -y inst ...

随机推荐

  1. Java流程控制01:用户交互Scanner

    Scanner对象 之前我们学习的基本语法并没有实现程序和人的交互,但是Java给我们提供了这样一个工具类,我们可以获取用户的输入.java.Scanner 是java5 的新特征,我们可以通过Sca ...

  2. X000010

    P1829 [国家集训队]Crash的数字表格 / JZPTAB 题意:求 \({\rm S}(n,m)=\sum\limits_{i=1}^n\sum\limits_{j=1}^m{\rm lcm} ...

  3. 原生js获取子元素

    感谢原文作者:归一山人 原文链接:https://www.cnblogs.com/guiyishanren/p/12214757.html 获取子元素的方法有 //获取第一个demo类 dom = d ...

  4. 学习jsp篇:jsp简单实例之一注册

    编程环境:IDEA,Tomcat ,JavaEE 实例一.注册 1.先在IDEA建一个web工程(不懂的可以在网上搜,一大堆..)ServletTest,在工程目录下的web目录建一个文件夹regis ...

  5. 如何添加自己的code snippet

    好了我们开始说吧 1.先建一个后缀名为.sublime-snippet的文件

  6. jdk1.5新特性之-----自动装箱与自动拆箱

    import java.util.ArrayList; /* jdk1.5新特性之-----自动装箱与自动拆箱. java是面向对象 的语言,任何事物都可以使用类进行描述,sun就使用了 一些类描述j ...

  7. 一站式超全JavaScript数组方法大全

    一站式JavaScript数组方法大全(建议收藏) 方法一览表 详细操作 本人总结了JavaScript中有关数组的几乎所有方法(包含ES6之后新增的),并逐一用代码进行演示使用,希望可以帮助大家! ...

  8. 帆软报表(finereport)禁用右键

    点击模板>模板web属性>(填报,数据分析,分页预览设置),选择为该模板单独设置,在下面的事件设置里面添加一个加载结束事件,完整js代码如下: 这段代码的基本原理是让用户的页面右键点击事件 ...

  9. Solution -「CEOI 2017」「洛谷 P4654」Mousetrap

    \(\mathscr{Description}\)   Link.   在一个含 \(n\) 个结点的树形迷宫中,迷宫管理者菈米莉丝和一只老鼠博弈.老鼠初始时在结点 \(y\),有且仅有结点 \(x\ ...

  10. Node 模块规范鏖战:难以相容的 CJS 与 ESM

    自 13.2.0 版本开始,Node.js 在保留了 CommonJS(CJS)语法的前提下,新增了对 ES Modules(ESM)语法的支持. 天下苦 CJS 久已,Node 逐渐拥抱新标准的规划 ...