环境

准备工作

配置ansible(deploy 主机执行)

# ssh-keygen
# for i in 192.168.3.{21..28}; do ssh-copy-id -i ~/.ssh/id_rsa.pub $i; done
[root@deploy ~]# cat /etc/ansible/hosts
[etcd]
192.168.3.21
192.168.3.22
192.168.3.23
[k8s-master]
192.168.3.24
192.168.3.25
192.168.3.26
[k8s-worker]
192.168.3.27
192.168.3.28 [k8s:children]
k8s-master
k8s-worker

优化主机配置

关闭防火墙和selinux

# ansible all -m shell -a "systemctl stop firewalld && systemctl disable firewalld"
# ansible all -m shell -a "sed -i 's/^SELINUX=.*/SELINUX=disabled/g' /etc/selinux/config"

修改limit

关闭交换分区

# swapoff -a
# ansible k8s -m shell -a "yes | cp /etc/fstab /etc/fstab_bak"
# ansible k8s -m shell -a "cat /etc/fstab_bak | grep -v swap > /etc/fstab"
# ansible k8s -m shell -a "echo vm.swappiness = 0 >> /etc/sysctl.d/k8s.conf"
# ansible k8s -m shell -a "sysctl -p /etc/sysctl.d/k8s.conf"

配置ipvs

# cat /root/ipvs.sh
#!/bin/bash
yum -y install ipvsadm ipset
####创建ipvs脚本 cat > /etc/sysconfig/modules/ipvs.modules << EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF ####执行脚本,验证配置 chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e nf_conntrack_ipv4 ######################### # ansible k8s -m script -a "/root/ipvs.sh"

配置网桥转发规则

# cat sysctl.sh
#!/bin/bash
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF cat <<EOF | tee /etc/modules-load.d/crio.conf
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilter
sysctl --system
# ansible k8s  -m script -a "/root/sysctl.sh"

配置etcd集群

生成证书(ansible 主机操作)

# curl -o /usr/bin/cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
# curl -o /usr/bin/cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
# curl -o /usr/bin/cfssl-certinfo https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
# chmod +x /usr/bin/cfssl*

创建 CA 配置文件

# mkdir p ssl
# cd /root/ssl
# cat >ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"etcd": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "876000h"
}
}
}
}
EOF

创建 CA 证书签名请求

# cat >ca-csr.json <<EOF
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "jdt",
"OU": "iot"
}
]
}
EOF

生成 CA 证书和私钥

# cfssl gencert -initca ca-csr.json | cfssljson -bare ca

创建etcd的TLS认证证书

# cat > etcd-csr.json <<EOF
{
"CN": "etcd",
"hosts": [
"192.168.3.21",
"192.168.3.22",
"192.168.3.23",
"192.168.3.24",
"192.168.3.23",
"192.168.3.26",
"etcd1",
"etcd2",
"etcd3",
"master1",
"master2",
"master3"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "jdt",
"OU": "iot"
}
]
EOF

生成 etcd证书和私钥并分发

# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=etcd etcd-csr.json | cfssljson -bare etcd
# ansible etcd -m copy -a "src=/root/ssl/ dest=/export/Data/certs/"

ETCD安装以及配置

创建数据目录

# ansible etcd -m shell -a "mkdir -p /export/Data/etcd_data"

下载etcd并分发

# wget https://github.com/etcd-io/etcd/releases/download/v3.5.1/etcd-v3.5.1-linux-amd64.tar.gz
# tar xf etcd-v3.5.1-linux-amd64.tar.gz && cd etcd-v3.5.1-linux-amd64
# ansible etcd -m copy -a "src=etcd dest=/usr/bin/"
# ansible etcd -m copy -a "src=etcdutl dest=/usr/bin/"
# ansible etcd -m copy -a "src=etcdctl dest=/usr/bin/"
# ansible etcd -m shell -a "chmod +x /usr/bin/etcd*"

配置etcd

# cat etcd_config.sh
#!/bin/bash #PEER_NAME指定本节点的主机名称/域名,
#PRIVATE_IP指定本节点的IP(用于后面配置文件的生成)
#ETCD_CLUSTER群集列表,是所有节点信息(内容格式: 各节点名称=https://ip:端口 名称任意但要有标识性)
#ETCD_INITIAL_CLUSTER_TOKEN为该etcd集群Token,同一集群token一致
interface_name=`cat /proc/net/dev | sed -n '3,$p' | awk -F ':' {'print $1'} | grep -E "^ " | grep -v lo | head -n1`
ipaddr=`ip a | grep $interface_name | awk '{print $2}' | awk -F"/" '{print $1}' | awk -F':' '{print $NF}'`
export PEER_NAME=`hostname`
export PRIVATE_IP=`echo $ipaddr | tr -d '\r'`
export ETCD_CLUSTER="etcd1=https://192.168.3.21:2380,etcd2=https://192.168.3.22:2380,etcd3=https://192.168.3.23:2380"
export ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-1" cat > /etc/systemd/system/etcd.service <<EOF
[Unit]
Description=etcd
Documentation=https://github.com/coreos/etcd
Conflicts=etcd.service [Service]
Type=notify
Restart=always
RestartSec=5s
LimitNOFILE=65536
TimeoutStartSec=0 ExecStart=/usr/bin/etcd --name ${PEER_NAME} \
--data-dir /export/Data/etcd_data\
--listen-client-urls https://${PRIVATE_IP}:2379 \
--advertise-client-urls https://${PRIVATE_IP}:2379 \
--listen-peer-urls https://${PRIVATE_IP}:2380 \
--initial-advertise-peer-urls https://${PRIVATE_IP}:2380 \
--cert-file=/export/Data/certs/etcd.pem \
--key-file=/export/Data/certs/etcd-key.pem \
--client-cert-auth \
--trusted-ca-file=/export/Data/certs/ca.pem \
--peer-cert-file=/export/Data/certs/etcd.pem \
--peer-key-file=/export/Data/certs/etcd-key.pem \
--peer-client-cert-auth \
--peer-trusted-ca-file=/export/Data/certs/ca.pem \
--initial-cluster ${ETCD_CLUSTER} \
--initial-cluster-token etcd-cluster-1 \
--initial-cluster-state new [Install]
WantedBy=multi-user.target EOF
# ansible etcd   -m script -a "/root/etcd_config.sh"

启动ETCD

# ansible etcd -m shell -a "systemctl daemon-reload"
# ansible etcd -m service -a 'name=etcd state=started'
# ansible etcd -m shell -a "systemctl enable etcd"

校验ETCD

注: ansible节点执行,需安装 etcdctl

# cat check_etcd.sh
#!/bin/bash HOST1=192.168.3.21
HOST2=192.168.3.22
HOST3=192.168.3.23
ENDPOINTS=$HOST1:2379,$HOST2:2379,$HOST3:2379
#因为开启了证书验证,因此执行命令需加上证书
KEY="--cacert=/root/ssl/ca.pem \
--cert=/root/ssl/etcd.pem \
--key=/root/ssl/etcd-key.pem" #etcd集群健康信息
etcdctl --endpoints=$ENDPOINTS $KEY endpoint health #etcd集群状态信息
etcdctl --endpoints=$ENDPOINTS $KEY --write-out=table endpoint status #etcd集群成员信息
etcdctl --endpoints=$ENDPOINTS $KEY member list -w table
# sh check_etcd.sh
192.168.3.22:2379 is healthy: successfully committed proposal: took = 6.670434ms
192.168.3.23:2379 is healthy: successfully committed proposal: took = 7.021894ms
192.168.3.21:2379 is healthy: successfully committed proposal: took = 6.938656ms
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| 192.168.3.21:2379 | a30c90f91c6bc0bf | 3.5.1 | 20 kB | false | false | 2 | 23 | 23 | |
| 192.168.3.22:2379 | 877407b6419f0fed | 3.5.1 | 20 kB | true | false | 2 | 23 | 23 | |
| 192.168.3.23:2379 | 75b3a36457698e9a | 3.5.1 | 37 kB | false | false | 2 | 23 | 23 | |
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
+------------------+---------+-------+---------------------------+---------------------------+------------+
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | IS LEARNER |
+------------------+---------+-------+---------------------------+---------------------------+------------+
| 75b3a36457698e9a | started | etcd3 | https://192.168.3.23:2380 | https://192.168.3.23:2379 | false |
| 877407b6419f0fed | started | etcd2 | https://192.168.3.22:2380 | https://192.168.3.22:2379 | false |
| a30c90f91c6bc0bf | started | etcd1 | https://192.168.3.21:2380 | https://192.168.3.21:2379 | false |
+------------------+---------+-------+---------------------------+---------------------------+------------+

安装配置 CRI-O

安装CRI-O

# cat get_cri-o.sh
#!/bin/bash
VERSION=1.22
sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_7/devel:kubic:libcontainers:stable.repo
sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable:cri-o:${VERSION}.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:${VERSION}/CentOS_7/devel:kubic:libcontainers:stable:cri-o:${VERSION}.repo
# ansible k8s -m  yum -a "name=cri-o,cri-tools state=latest"
# ansible k8s -m shell -a "sudo systemctl enable --now crio"

修改cri-o 存储路径

# ansible k8s -m shell -a "sed -i -e  's?^graphroot =.*?graphroot = "/export/Data/containers/storage"?g' /etc/containers/storage.conf"

配置cgroup

# cat 02-cgroup-manager.conf
[crio.runtime]
conmon_cgroup = "pod"
cgroup_manager = "systemd" # ansible k8s -m copy -a "src=02-cgroup-manager.conf dest=/etc/crio/crio.conf.d/"

配置镜像加速

# cat images_mirr.sh
#!/bin/bash
cat >> /etc/containers/registries.conf << EOF
[[registry]]
prefix = "docker.io"
location = "hub-mirror.c.163.com" [[registry.mirror]]
prefix = "docker.io"
location = "hub-mirror.c.163.com"
EOF
# ansible k8s  -m script -a "/root/images_mirr.sh"
# ansible k8s -m service -a 'name=crio state=restarted'

配置LB

公有云使用负载均衡代替

高可用LB后续更新,暂用nginx代替

以下操作LB节点执行

[root@lb ~]# yum -y install epel-release.noarch
[root@lb ~]# yum -y install nginx nginx-mod-stream

nginx 配置文件中加入以下配置

stream {
log_format main '$remote_addr [$time_local]'
'$protocol $status $bytes_sent $bytes_received'
'$session_time';
server {
listen 16443;
proxy_pass kubeapi;
access_log /var/log/nginx/access.log main;
}
upstream kubeapi {
server 192.168.3.24:6443;
server 192.168.3.25:6443;
server 192.168.3.26:6443;
}
}

部署k8s

安装kubeadm、kubelet

# cat kube.sh
#!/bin/bash
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-\$basearch
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kubelet kubeadm kubectl
EOF
yum install -y kubelet-1.22.3-0 kubeadm-1.22.3-0 kubectl-1.22.3-0 --disableexcludes=kubernetes sudo systemctl enable --now kubelet # ansible k8s -m script -a "/root/kube.sh"

分发etcd证书

# ansible k8s -m shell -a "mkdir -p /export/Data/certs/"

配置kubelet

# cat  kubelet_conf.sh
#!/bin/bash cat > /etc/sysconfig/kubelet <<EOF
KUBELET_EXTRA_ARGS=--container-runtime=remote --cgroup-driver=systemd --container-runtime-endpoint='unix:///var/run/crio/crio.sock' --runtime-request-timeout=5m
EOF # ansible k8s -m script -a "/root/kubelet_conf.sh"
# ansible k8s -m service -a 'name=kubelet state=restarted'

初始第一个master节点

# cat kubeadm_config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.22.3
imageRepository: registry.aliyuncs.com/google_containers
controlPlaneEndpoint: "192.168.3.29:16443"
networking:
serviceSubnet: "10.96.0.0/16"
podSubnet: "172.16.0.0/16"
dnsDomain: "cluster.local"
dns:
type: "CoreDNS"
etcd:
external:
endpoints:
- https://192.168.3.21:2379
- https://192.168.3.22:2379
- https://192.168.3.23:2379
caFile: /export/Data/certs/ca.pem
certFile: /export/Data/certs/etcd.pem
keyFile: /export/Data/certs/etcd-key.pem
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd ---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
# ansible 192.168.3.24 -m copy -a "src=kubeadm_config.yaml dest=/root"
# ansible k8s -m copy -a "src=/root/ssl/ dest=/export/Data/certs/"
# ansible 192.168.3.24 -m shell -a "kubeadm init --config=/root/kubeadm_config.yaml --upload-certs"

初始化第二个master节点

注: 密钥上步获得

# ansible 192.168.3.25 -m shell  -a "kubeadm join 192.168.3.29:16443 --token de4x51.d923b7l0tbi0692t --discovery-token-ca-cert-hash sha256:b1a8f00caed912ac083d10d8ecd1e92ddf6870c768f91d4e43c91c2614e24e1a --control-plane --certificate-key 0b34ca2ebd85f99ff66b2f57b80708e2ac0368880da52a802e3feb01852f2d81"

初始化第三个master节点

# ansible 192.168.3.26 -m shell  -a "kubeadm join 192.168.3.29:16443 --token de4x51.d923b7l0tbi0692t --discovery-token-ca-cert-hash sha256:b1a8f00caed912ac083d10d8ecd1e92ddf6870c768f91d4e43c91c2614e24e1a --control-plane --certificate-key 0b34ca2ebd85f99ff66b2f57b80708e2ac0368880da52a802e3feb01852f2d81"

初始化worker节点

# ansible  k8s-worker  -m shell -a " kubeadm join 192.168.3.29:16443 --token de4x51.d923b7l0tbi0692t --discovery-token-ca-cert-hash sha256:b1a8f00caed912ac083d10d8ecd1e92ddf6870c768f91d4e43c91c2614e24e1a"

初始化kubectl

# mkdir -p $HOME/.kube
# scp root@192.168.3.24:/etc/kubernetes/admin.conf $HOME/.kube/config
# scp root@192.168.3.24:/usr/bin/kubectl /usr/bin/kubectl

验证kubelet

[root@deploy ~]# kubectl  get node
NAME STATUS ROLES AGE VERSION
master1 Ready control-plane,master 41m v1.22.3
master2 Ready control-plane,master 13m v1.22.3
master3 Ready control-plane,master 12m v1.22.3
worker1 Ready <none> 9m18s v1.22.3
worker2 Ready <none> 9m19s v1.22.3

部署网络模型

修改配置

---
# Source: calico/templates/calico-etcd-secrets.yaml
# The following contains k8s Secrets for use with a TLS enabled etcd cluster.
# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: calico-etcd-secrets
namespace: kube-system
data:
# Populate the following with etcd TLS configuration if desired, but leave blank if
# not using TLS for etcd.
# The keys below should be uncommented and the values populated with the base64
# encoded contents of each file that would be associated with the TLS data.
# Example command for encoding a file contents: cat <file> | base64 -w 0
etcd-key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBNkZJNE91OUl4WU01L2IxUVBHa2RWQUF0UHc1QzA1b0MwN1Z0WGhWaWZDUzYwWitsCjV3aFRBRGRITVpvajhzdXB3dGU2a3IyUnpOYjluM2xGb2lqb0MxMjV4LzUvd2xZcWYyOWt1WTJrQzd4RlBwUFgKdHhwUmlUMEVFV09vTGVyQ202Y3RmMVhjR1RxTHBsZXorVTBCSGNRY0FoUnc3NFFHK1ByU3pDbzV5UDhGWTFHSQo0UWhHWkxyWnZXT0JJZGcyWjFGOHpvSVdVVE1QdWxrQ0Z6WHNDeENFVXY1TXFybjZsN0RZcWJ5K2drZG5qb2U3CklhQXp0UW5CZFVOMmEvdHdXSVE4S3YvSU15TTduT0plL29POG5rdDY4d2h6V1Ftc3p2VkdpSDI1SWhwUUxub3UKcmp1TEhBRERCaS9RM1JuWXdVZWZYTGVOZ1FoTmJYZXE0eGVuT1FJREFRQUJBb0lCQVFETVNjaHlZb211VFFlSQpqWmxwbGRFWlZaSnorVGxnVXZTYmI5VTlQemE4RFp4TnlzSWJGMkhOTmM2ZjJuZ3ovMDFITFdZOXRQN3BqaisxCnBQRkxlQWNjUDQyblJLN1psK1dFNjlJNXJFaU5uVCtTbUhTKzZTQzd1bkRDVGN6TW03d0hIWW5QaUJPa0I2eFgKV0pYRTZpYktJdkd5RG9HRXpLZEk5MTYzODRXZXJKejJneEhDMEZzUGcxOEJPV0NjMmM3SkN5UmdhOXl6Sk90UQpISWw5N0svNStNRXZpSFI0emhqV3hiTXNyVW55MllMQ1hPeW9reTBzVzN0U2RLbmZBUUs2RkJueWxFN3VVQnNXCm1XK2o3Kzl6ZUJGUVdRMlBGS1J4Q1o4amtYSSt1a1NhVzBoTER6b0d6OHBweno5UWNHMWVCOURqTFE5UHFCZDYKbTFkcnovU0JBb0dCQVBqY2x3TlEvOVFFUWhMc1Q3Nkw1OFpZQnBkQ2RUZTFMbG1vVWVIeDQveStJYlYwSkQ2eQpvRmpFbGEzZm1UOHA3THJGMXFQZjlNWmsrUmtRa3VjOGcyQUlyUXp0cmtrUnQxWmVSWHBhMHIzMU53emlEdTU3ClpLNnVWamM4S0NKYlV6S3NNcGZUajdRemFLelZNSFI3aWgxdzZOUWYxS0JNdG94Q3Rtc2MrdjBwQW9HQkFPNzgKSytxd3BJbWpCd2dXZm01NitSKzFzWXkrclZOMk1kZU9QdmRFTzRyQzlHSGlWT1VaaEM0WXlNRnJNOEh4TFd5RQo0eVpsMVpvNXZYVG5ha0JFUWxHU2E4L1NnMkZkY0tJdnNyTG9RUnBMTEhWMHo4b1E0Q2JWTW1GZkgwMDBMK1dICnBYYXNYaWlBdkcrdmxxOGk2Z1U2OGRnOFc2akFUeTZTTUNQTDBZdVJBb0dBU2h4L1NIaU54MWtCU0Z0aG9EQlAKOU14d0lnbWptTlIzR2pJN09GdHQ5dTIwWWpKVlBPcTdQOVJEY3dWY3dPZStYUnpmdit2SkhIQWprcWhSNTFVcApGcWRleWJQYXJGMy9TRlJJd3BoYm5FQnpoWDJvenJLbW1ETEk4Q2dWRjY0MHg2bHFZN2FZWENUWExtbEt4ZFdvCm12M3VDSVgyTDBySkxsb0xzemh0TW9rQ2dZQjJOdUw2YW5wWll2MDljUE1GYjJyLzFuNkhJbUxXWUNiemUzZUcKRklobmNWdzFkeUdMV2YzYVY0UW11UUtYTXRmSFVFeVVWOWM3UE1pTXBWUVhpaXhMOFdQSEgxakJ0dGphUVVIaAo0YVVpZm9EMWNOekFGV3pyaUpZdE9FSmhqQ2tOSHZZb0o4ZER2YnA0ZktESzdUaFpjZmpqZjZmUFo2RkRaaWpOCjdDb3hJUUtCZ1FDWTRWTDM5cG5KZVFQMFhYc1dHVnhVN2Z5Szh0YVFJVnk4Y0tXeVBGdXNaYklXSVM0eU5ENW8KWFI5cHZGYjdkbmQzMnJXaFNKeWZJVm9ZQWhMTXpyd2dBdnF5Q1J4MXdEU2NqdnRFbCs5ZUF1SWIrUFNYZEQ2NgpRMnFyWEttMjNlem0yVkpUL2MxWHlOb2FDYVExb1BPK1BBTDMxZkxiUklLdUZrUEMzVTRZSFE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
etcd-cert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVLRENDQXhDZ0F3SUJBZ0lVWkRmczR0UGR0dVJGZEZGRHM0MHBRcWp0VUNjd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1hERUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjJKbGFXcHBibWN4RURBT0JnTlZCQWNUQjJKbAphV3BwYm1jeEREQUtCZ05WQkFvVEEycGtkREVNTUFvR0ExVUVDeE1EYVc5ME1RMHdDd1lEVlFRREV3UmxkR05rCk1DQVhEVEl4TVRFeE56RXhOVGd3TUZvWUR6SXhNakV4TURJME1URTFPREF3V2pCY01Rc3dDUVlEVlFRR0V3SkQKVGpFUU1BNEdBMVVFQ0JNSFltVnBhbWx1WnpFUU1BNEdBMVVFQnhNSFltVnBhbWx1WnpFTU1Bb0dBMVVFQ2hNRAphbVIwTVF3d0NnWURWUVFMRXdOcGIzUXhEVEFMQmdOVkJBTVRCR1YwWTJRd2dnRWlNQTBHQ1NxR1NJYjNEUUVCCkFRVUFBNElCRHdBd2dnRUtBb0lCQVFEb1VqZzY3MGpGZ3puOXZWQThhUjFVQUMwL0RrTFRtZ0xUdFcxZUZXSjgKSkxyUm42WG5DRk1BTjBjeG1pUHl5Nm5DMTdxU3ZaSE0xdjJmZVVXaUtPZ0xYYm5IL24vQ1ZpcC9iMlM1amFRTAp2RVUrazllM0dsR0pQUVFSWTZndDZzS2JweTEvVmR3Wk9vdW1WN1A1VFFFZHhCd0NGSER2aEFiNCt0TE1Lam5JCi93VmpVWWpoQ0Vaa3V0bTlZNEVoMkRablVYek9naFpSTXcrNldRSVhOZXdMRUlSUy9reXF1ZnFYc05pcHZMNkMKUjJlT2g3c2hvRE8xQ2NGMVEzWnIrM0JZaER3cS84Z3pJenVjNGw3K2c3eWVTM3J6Q0hOWkNhek85VWFJZmJraQpHbEF1ZWk2dU80c2NBTU1HTDlEZEdkakJSNTljdDQyQkNFMXRkNnJqRjZjNUFnTUJBQUdqZ2Q4d2dkd3dEZ1lEClZSMFBBUUgvQkFRREFnV2dNQjBHQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01CQmdnckJnRUZCUWNEQWpBTUJnTlYKSFJNQkFmOEVBakFBTUIwR0ExVWREZ1FXQkJUSGNlT1BOZm50WnUxR3hZMEtzcmttQmsyQVpUQWZCZ05WSFNNRQpHREFXZ0JUS0JxL29EV3p0TE5HSDNzcnVvY0IrckI1akp6QmRCZ05WSFJFRVZqQlVnZ1ZsZEdOa01ZSUZaWFJqClpES0NCV1YwWTJRemdnZHRZWE4wWlhJeGdnZHRZWE4wWlhJeWdnZHRZWE4wWlhJemh3VEFxQU1WaHdUQXFBTVcKaHdUQXFBTVhod1RBcUFNWWh3VEFxQU1YaHdUQXFBTWFNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUEvSU9NbgpIMkZYWmVqYU1DNHhlTjdVRmVoaTNGQndjbGNXcUtLU3J2VHhYT1RsMjZOVzhRd2h1SGc3RHNrQkN3UEhXL0s3ClJqdllRNHlEbVB0Q0JHbDE0K3hnMmxYcnhuY0Zzd1N0dFoxcDV1UjNWVFFlNlFDS3ZsNGMyWXNHQzZEU3d2dE4KK041SVFkVVhvalhJTVhkWXVzZS90Qk42b2xjMkdvVFJQV0lCU2FHODhBejd4em5VNThiZXZzN28vU1ZtS2pxZgpTVVA2U3FZeHlPaUtDNWs5cC9qOU42MnN0ZmJURmRxN1JYQ2p0OVl6Q3QwNWg4QW1wLzNmdStYZkhCQTRjYjN1ClJUNTdjZVlXdkIzSEtMMFFFNWNOUjRLNWlXa01LUi94YnNzZlNxSWFPTVF6Q29sWjF3dFZPendaNGZsZUkrVUUKYTFpQUF4K1IxNkNCeG4xZgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
etcd-ca: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURyRENDQXBTZ0F3SUJBZ0lVTGpLRjE2cDVteXhiWkZFRWNUMi9sSDhGTVdrd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1hERUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjJKbGFXcHBibWN4RURBT0JnTlZCQWNUQjJKbAphV3BwYm1jeEREQUtCZ05WQkFvVEEycGtkREVNTUFvR0ExVUVDeE1EYVc5ME1RMHdDd1lEVlFRREV3UmxkR05rCk1CNFhEVEl4TVRFeE56RXhOVEl3TUZvWERUSTJNVEV4TmpFeE5USXdNRm93WERFTE1Ba0dBMVVFQmhNQ1EwNHgKRURBT0JnTlZCQWdUQjJKbGFXcHBibWN4RURBT0JnTlZCQWNUQjJKbGFXcHBibWN4RERBS0JnTlZCQW9UQTJwawpkREVNTUFvR0ExVUVDeE1EYVc5ME1RMHdDd1lEVlFRREV3UmxkR05rTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGCkFBT0NBUThBTUlJQkNnS0NBUUVBeXE2Zm4ycWtlVzJOS3RqREJiMmg5Q1lzZWpBWnIwTmlOWGRPZy8rL3FRNk8KbUNMR1pLU3picUlVY05NRUQvSk5tbWF4UGlPUVNLYmFBUlZscWFKS1ZMVXUxMmo2S1NPUi9KR1hOcnF5ZjM4RQpvREE0R29jdHdtWkI3Rzdrdk1PdXFRWXRMKzQxR0hJYmZsaHFXci9zQ2hEM1E3VlJyaWVMYW9CVFpFMFpEUWVDCis5KzNLcDNqYmEyeWZUNU85K3F4WlFya0xBeE5GR21KUVBzT2ZhTnJjU1p6YTVBc0sybE9MNXAveExGRm0yQU8KanVVSE8yMnBKL0NjMDBPanNveUFnVE5jdVJmaDNuNjdXbllyYVV1RXVhU0RheEZBWlk4bGlhdXFHbndlV296VAp0Q3pVcEUrbW9RTzVqL0o0UksrNm94OTJlZWpLQm1hY3VTM1JNeTFPQVFJREFRQUJvMll3WkRBT0JnTlZIUThCCkFmOEVCQU1DQVFZd0VnWURWUjBUQVFIL0JBZ3dCZ0VCL3dJQkFqQWRCZ05WSFE0RUZnUVV5Z2F2NkExczdTelIKaDk3SzdxSEFmcXdlWXljd0h3WURWUjBqQkJnd0ZvQVV5Z2F2NkExczdTelJoOTdLN3FIQWZxd2VZeWN3RFFZSgpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFEQ1huN3h0VnFjT2ptTUcvUFpHYWp5UEdVTlFsKzNwaFhnYm1vZmtQMFNoCkJlVTZXbTNkOHB4WThrR0xTT2gxSzl1MlA3RnUyTzY3M21pMkdSOEJPT3NLWS9UK0p0RmlmenlmY3VGMjQ4L3QKcFFjRkNUOGw1Wjc1bXcySTRORXZJU1pnU3piWVN0SXdPU01FcFBJZWRRNzY2QUxtVGVwZXNuVll3K2U2dGlBUgpmS0lqakJEeGcrd1B4b2tseWNxWXRCQVA5dDRKMVk4Vkt6VENNelorcllweWRNNWlIaElFK3VYTlhqSXJTMURMCmxNN3JSS2crblRnRk9ma2dPQ3FaTmlZMTFzdzNWQnJUOW4rM1BhTzB2ekZqbXcrUVNxTVpTclJUS1d1V2xaSkcKT2JiUXUvcGhKdk9hZ2NBbUVXRGorNUdaZTVwS29neXlIdUVSVW0wSzYwST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# Configure this with the location of your etcd cluster.
etcd_endpoints: "https://192.168.3.21:2379,https://192.168.3.22:2379,https://192.168.3.23:2379"
# If you're using TLS enabled etcd uncomment the following.
# You must also populate the Secret below with these files.
etcd_ca: "/calico-secrets/etcd-ca" # "/calico-secrets/etcd-ca"
etcd_cert: "/calico-secrets/etcd-cert" # "/calico-secrets/etcd-cert"
etcd_key: "/calico-secrets/etcd-key" # "/calico-secrets/etcd-key"
# Typha is disabled.
typha_service_name: "none"
# Configure the backend to use.
calico_backend: "vxlan" # Configure the MTU to use for workload interfaces and tunnels.
# By default, MTU is auto-detected, and explicitly setting this field should not be required.
# You can override auto-detection by providing a non-zero value.
veth_mtu: "0" # The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"log_level": "info",
"log_file_path": "/var/log/calico/cni/cni.log",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
"etcd_key_file": "__ETCD_KEY_FILE__",
"etcd_cert_file": "__ETCD_CERT_FILE__",
"etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
},
{
"type": "bandwidth",
"capabilities": {"bandwidth": true}
}
]
} ---
# Source: calico/templates/calico-kube-controllers-rbac.yaml # Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
rules:
# Pods are monitored for changing labels.
# The node controller monitors Kubernetes nodes.
# Namespace and serviceaccount labels are used for policy.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
- serviceaccounts
verbs:
- watch
- list
- get
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
--- ---
# Source: calico/templates/calico-node-rbac.yaml
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-node
rules:
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
# EndpointSlices are used for Service-based network policy rule
# enforcement.
- apiGroups: ["discovery.k8s.io"]
resources:
- endpointslices
verbs:
- watch
- list
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
# Used to discover service IPs for advertisement.
- watch
- list
# Pod CIDR auto-detection on kubeadm needs access to config maps.
- apiGroups: [""]
resources:
- configmaps
verbs:
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch ---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system ---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-node
spec:
nodeSelector:
kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure calico-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: calico-node
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
priorityClassName: system-node-critical
initContainers:
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: docker.mirrors.ustc.edu.cn/calico/cni:v3.21.0
command: ["/opt/cni/bin/install"]
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# The location of the etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- mountPath: /calico-secrets
name: etcd-certs
securityContext:
privileged: true
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: docker.mirrors.ustc.edu.cn/calico/pod2daemon-flexvol:v3.21.0
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
securityContext:
privileged: true
containers:
# Runs calico-node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: docker.mirrors.ustc.edu.cn/calico/node:v3.21.0
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# The location of the etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# Set noderef for node controller.
- name: CALICO_K8S_NODE_REF
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,bgp"
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
value: "Never"
# Enable or Disable VXLAN on the default IP pool.
- name: CALICO_IPV4POOL_VXLAN
value: "Always"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Set MTU for the VXLAN tunnel device.
- name: FELIX_VXLANMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Set MTU for the Wireguard tunnel device.
- name: FELIX_WIREGUARDMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
- name: CALICO_IPV4POOL_CIDR
value: "172.16.0.0/16"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
lifecycle:
preStop:
exec:
command:
- /bin/calico-node
- -shutdown
livenessProbe:
exec:
command:
- /bin/calico-node
- -felix-live
#- -bird-live
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 10
readinessProbe:
exec:
command:
- /bin/calico-node
- -felix-ready
#- -bird-ready
periodSeconds: 10
timeoutSeconds: 10
volumeMounts:
# For maintaining CNI plugin API credentials.
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
readOnly: false
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- mountPath: /calico-secrets
name: etcd-certs
- name: policysync
mountPath: /var/run/nodeagent
# For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the
# parent directory.
- name: sysfs
mountPath: /sys/fs/
# Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host.
# If the host is known to mount that filesystem already then Bidirectional can be omitted.
mountPropagation: Bidirectional
- name: cni-log-dir
mountPath: /var/log/calico/cni
readOnly: true
volumes:
# Used by calico-node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: sysfs
hostPath:
path: /sys/fs/
type: DirectoryOrCreate
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used to access CNI logs.
- name: cni-log-dir
hostPath:
path: /var/log/calico/cni
# Mount in the etcd TLS secrets with mode 400.
# See https://kubernetes.io/docs/concepts/configuration/secret/
- name: etcd-certs
secret:
secretName: calico-etcd-secrets
defaultMode: 0400
# Used to create per-pod Unix Domain Sockets
- name: policysync
hostPath:
type: DirectoryOrCreate
path: /var/run/nodeagent
# Used to install Flex Volume Driver
- name: flexvol-driver-host
hostPath:
type: DirectoryOrCreate
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
--- apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system ---
# Source: calico/templates/calico-kube-controllers.yaml
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
# The controllers can only have a single active instance.
replicas: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
nodeSelector:
kubernetes.io/os: linux
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: calico-kube-controllers
priorityClassName: system-cluster-critical
# The controllers must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
containers:
- name: calico-kube-controllers
image: docker.mirrors.ustc.edu.cn/calico/kube-controllers:v3.21.0
env:
# The location of the etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: policy,namespace,serviceaccount,workloadendpoint,node
volumeMounts:
# Mount in the etcd TLS secrets.
- mountPath: /calico-secrets
name: etcd-certs
livenessProbe:
exec:
command:
- /usr/bin/check-status
- -l
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 10
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
periodSeconds: 10
volumes:
# Mount in the etcd TLS secrets with mode 400.
# See https://kubernetes.io/docs/concepts/configuration/secret/
- name: etcd-certs
secret:
secretName: calico-etcd-secrets
defaultMode: 0440 --- apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system --- # This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers ---
# Source: calico/templates/calico-typha.yaml ---
# Source: calico/templates/configure-canal.yaml ---
# Source: calico/templates/kdd-crds.yaml

安装calico

# kubectl  apply -f calico-etcd.yaml

验证集群

[root@deploy ~]# kubectl  get  pod -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-9767fc4b9-tk9fb 1/1 Running 0 6m56s
calico-node-5mc9h 1/1 Running 0 6m56s
calico-node-dswmp 1/1 Running 0 6m56s
calico-node-qht2s 1/1 Running 0 6m56s
calico-node-sdrcg 1/1 Running 0 6m56s
calico-node-x58lj 1/1 Running 0 6m56s
coredns-7f6cbbb7b8-fc8rd 1/1 Running 0 61m
coredns-7f6cbbb7b8-qvw2m 1/1 Running 0 61m
kube-apiserver-master1 1/1 Running 2 94m
kube-apiserver-master2 1/1 Running 0 66m
kube-apiserver-master3 1/1 Running 0 64m
kube-controller-manager-master1 1/1 Running 2 94m
kube-controller-manager-master2 1/1 Running 0 66m
kube-controller-manager-master3 1/1 Running 0 64m
kube-proxy-bscfn 1/1 Running 0 62m
kube-proxy-f2fpb 1/1 Running 0 64m
kube-proxy-kt7nl 1/1 Running 0 66m
kube-proxy-lzww8 1/1 Running 0 62m
kube-proxy-zn6gj 1/1 Running 2 94m
kube-scheduler-master1 1/1 Running 2 94m
kube-scheduler-master2 1/1 Running 0 66m
kube-scheduler-master3 1/1 Running 0 64m

问题与解决

1 、 kubelet日报错 failed to get cgroup stats for "

/system.slice/kubelet.service"

11月 18 09:00:42 master1 kubelet[2424]: E1118 09:00:42.948672    2424 summary_sys_containers.go:47] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": unknown container \"/system.slice/kubelet.service\"" containerName="/system.slice/kubelet.service"
11月 18 09:00:52 master1 kubelet[2424]: E1118 09:00:52.956142 2424 summary_sys_containers.go:47] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": unknown container \"/system.slice/kubelet.service\"" containerName="/system.slice/kubelet.service"
11月 18 09:01:02 master1 kubelet[2424]: E1118 09:01:02.961022 2424 summary_sys_containers.go:47] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": unknown container \"/system.slice/kubelet.service\"" containerName="/system.slice/kubelet.service"
11月 18 09:01:12 master1 kubelet[2424]: E1118 09:01:12.966033 2424 summary_sys_containers.go:47] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": unknown container \"/system.slice/kubelet.service\"" containerName="/system.slice/kubelet.service"
11月 18 09:01:22 master1 kubelet[2424]: E1118 09:01:22.970644 2424 summary_sys_containers.go:47] "Failed to get system container stats" err="failed to get cgroup stats for \"/system.slice/kubelet.service\": failed to get container info for \"/system.slice/kubelet.service\": unknown container \"/system.slice/kubelet.service\"" containerName="/system.slice/kubelet.service"

解决方案

配置文件中写入 CPUAccounting=true 与 MemoryAccounting=true

[root@master2 ~]# cat /lib/systemd/system/kubelet.service.d/10-kubeadm.conf
# Note: This dropin only works with kubeadm and kubelet v1.11+
[Service]
CPUAccounting=true
MemoryAccounting=true
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
EnvironmentFile=-/etc/sysconfig/kubelet
ExecStart=
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS

2、kubectl get cs 提示 dial tcp 127.0.0.1:10251: connect: connection refused

[root@deploy ~]# kubectl  get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
scheduler Unhealthy Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused
etcd-1 Healthy {"health":"true","reason":""}
controller-manager Healthy ok
etcd-0 Healthy {"health":"true","reason":""}
etcd-2 Healthy {"health":"true","reason":""}

解决方案

注释 port=0

[root@master1 ~]# cat /etc/kubernetes/manifests/kube-scheduler.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
component: kube-scheduler
tier: control-plane
name: kube-scheduler
namespace: kube-system
spec:
containers:
- command:
- kube-scheduler
- --authentication-kubeconfig=/etc/kubernetes/scheduler.conf
- --authorization-kubeconfig=/etc/kubernetes/scheduler.conf
- --bind-address=127.0.0.1
- --kubeconfig=/etc/kubernetes/scheduler.conf
- --leader-elect=true
# - --port=0
image: registry.aliyuncs.com/google_containers/kube-scheduler:v1.22.3
imagePullPolicy: IfNotPresent
[root@deploy ~]# kubectl  get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-1 Healthy {"health":"true","reason":""}
etcd-2 Healthy {"health":"true","reason":""}
etcd-0 Healthy {"health":"true","reason":""}

作者:宗庄凯

京东云开发者|IoT运维 - 如何部署一套高可用K8S集群的更多相关文章

  1. 使用kubeadm部署一套高可用k8s集群

    使用kubeadm部署一套高可用k8s集群 有疑问的地方可以看官方文档 准备环境 我的机器如下, 系统为ubuntu20.04, kubernetes版本1.21.0 hostname IP 硬件配置 ...

  2. 企业运维实践-还不会部署高可用的kubernetes集群?使用kubeadm方式安装高可用k8s集群v1.23.7

    关注「WeiyiGeek」公众号 设为「特别关注」每天带你玩转网络安全运维.应用开发.物联网IOT学习! 希望各位看友[关注.点赞.评论.收藏.投币],助力每一个梦想. 文章目录: 0x00 前言简述 ...

  3. Rancher 2.2.2 - HA 部署高可用k8s集群

    对于生产环境,需以高可用的配置安装 Rancher,确保用户始终可以访问 Rancher Server.当安装在Kubernetes集群中时,Rancher将与集群的 etcd 集成,并利用Kuber ...

  4. kubeadm部署高可用K8S集群(v1.14.2)

    1. 简介 测试环境Kubernetes 1.14.2版本高可用搭建文档,搭建方式为kubeadm 2. 服务器版本和架构信息 系统版本:CentOS Linux release 7.6.1810 ( ...

  5. Kubeadm部署高可用K8S集群

    一 基础环境 1.1 资源 节点名称 ip地址 VIP 192.168.12.150 master01 192.168.12.48 master02 192.168.12.242 master03 1 ...

  6. kubespray -- 快速部署高可用k8s集群 + 扩容节点 scale.yaml

    主机 系统版本      配置       ip Mater.Node,ansible CentOS 7.2                                             4 ...

  7. 技术沙龙|京东云DevOps自动化运维技术实践

    自动化测试体系不完善.缺少自助式的持续交付平台.系统间耦合度高服务拆分难度大.成熟的DevOps工程师稀缺,缺少敏捷文化--这些都是DevOps 在落地过程中,或多或少会碰到的问题,DevOps发展任 ...

  8. 沙龙报名 | 京东云DevOps——自动化运维技术实践

    随着互联网技术的发展,越来越多企业开始认识DevOps重要性,在企业内部推进实施DevOps,期望获得更好的软件质量,缩短软件开发生命周期,提高服务稳定性.但在DevOps 的实施与落地的过程中,或多 ...

  9. Redis之高可用、集群、云平台搭建

    原文:Redis之高可用.集群.云平台搭建 文章大纲 一.基础知识学习二.Redis常见的几种架构及优缺点总结三.Redis之Redis Sentinel(哨兵)实战四.Redis之Redis Clu ...

随机推荐

  1. 简单概述因特网(Internet)

    学习目的 了解 Internet 的概念,区别因特网与互联网. 了解 Internet 的基本结构. 了解 Internet 的发展历史. Internet 概念 因特网(Internet)是全球性的 ...

  2. PHP极简短连接

    可用于短连接开发 随便找个PHP空间存放即可 点击查看代码 <html> <head> <meta charset="utf-8"/> < ...

  3. spring boot 分布式session实现

    spring boot 分布式session实现 主要是通过包装HttpServletRequest将session相关的方法进行代理. 具体是的实现就是通过SessionRepositoryFilt ...

  4. identity4 系列————纯js客户端案例篇[四]

    前言 前面已经解释了两个案例了,通信原理其实已经很清楚了,那么纯js客户端是怎么处理的呢? 正文 直接贴例子哈. https://github.com/IdentityServer/IdentityS ...

  5. Java开发学习(二十九)----Maven依赖传递、可选依赖、排除依赖解析

    现在的项目一般是拆分成一个个独立的模块,当在其他项目中想要使用独立出来的这些模块,只需要在其pom.xml使用<dependency>标签来进行jar包的引入即可. <depende ...

  6. Bypass Windows Defender Dump Lsass(手法拙劣)

    0x00.前言 Windows Defender是一款内置在Windows操作系统的杀毒软件程序,本文旨在记录实战环境中,服务器存在Windows Defender情况下转储凭证的渗透手法,技术简单粗 ...

  7. CPU密集型和IO密集型(判断最大核心线程的最大线程数)

    CPU密集型和IO密集型(判断最大核心线程的最大线程数) CPU密集型 1.CPU密集型获取电脑CPU的最大核数,几核,最大线程数就是几Runtime.getRuntime().availablePr ...

  8. Kratos漫游指南 1 - 概览

    您好,地球人,欢迎来到Kratos漫游指南. 对于刚开始研究Kratos框架的开发者来说,目前的文档有些零散,这与我们的模块化设计有一些关系,不过Don't panic,从这篇文章开始,我将试图打破这 ...

  9. 2.云原生之Docker容器环境安装实践

    转载自:https://www.bilibili.com/read/cv15181036/?from=readlist 官方一键安装脚本 补充时间:[2020年4月22日 11:00:59] 一键安装 ...

  10. Grafana Loki 架构

    转载自:https://mp.weixin.qq.com/s?__biz=MzU4MjQ0MTU4Ng==&mid=2247492186&idx=2&sn=a06954384a ...