全部二进制脚本高可用--只有docker启动未成功
[root@test1 script]# cat k8s-docker-binary-py
#!/usr/bin/python
# -*- coding: utf-8 -*- from __future__ import print_function
import os, sys, stat
import shutil
import tarfile
import subprocess # 定义环境变量 # 定义主机名
NODE_NAME = subprocess.check_output(["hostname"], shell=True)
NODE_NAME = str(NODE_NAME.decode('utf8').strip()).strip('b') # 定义主机ip
NODE_IP = subprocess.check_output(["hostname -i | awk '{print $NF}'"], shell=True)
NODE_IP = str(NODE_IP.decode('utf8').strip()).strip('b') #定义key
ENCRYPTION_KEY = subprocess.check_output(["head -c 32 /dev/urandom | base64"], shell=True)
ENCRYPTION_KEY = str(ENCRYPTION_KEY.decode('utf8').strip()).strip('b') SERVICE_CIDR = "10.254.0.0/16"
CLUSTER_CIDR = "172.30.0.0/16"
NODE_PORT_RANGE = "8000-30000"
NODE_IPS = ['192.168.0.91', '192.168.0.92', '192.168.0.93']
NODE_NAMES = ['test1', 'test2', 'test3','test4']
MASTER_VIP = "192.168.0.235"
KUBE_APISERVER = "https://192.168.0.235:8443"
VIP_IF = "ens33"
ETCD_ENDPOINTS = "https://192.168.0.91:2379,https://192.168.0.92:2379,https://192.168.0.93:2379"
ETCD_NODES = "test1=https://192.168.0.91:2380,test2=https://192.168.0.92:2380,test3=https://192.168.0.93:2380"
FLANNEL_ETCD_PREFIX = "/kubernetes/network/"
CLUSTER_KUBERNETES_SVC_IP = "10.254.0.1"
CLUSTER_DNS_SVC_IP = "10.254.0.2"
CLUSTER_DNS_DOMAIN = "cluster.local."
IFACE="ens33" def create_dir_kernel_parameters():
print("创建目录、配置内核参数、安装依赖包")
#所有节点创建目录
subprocess.call(["time ansible k8s -m file -a 'path=/k8s/profile/ state=directory mode=0777'"], shell=True)
subprocess.call(["time ansible k8s -m file -a 'path=/server/software/k8s/ state=directory mode=0777'"], shell=True)
subprocess.call(["time ansible k8s -m file -a 'path=/root/ssl/ state=directory mode=0777'"], shell=True)
subprocess.call(["time ansible k8s -m file -a 'path=/script/ state=directory mode=0777'"], shell=True)
#配置内核参数
subprocess.call(["ansible k8s -m shell -a 'iptables -P FORWARD ACCEPT'"], shell=True)
subprocess.call(["time ansible k8s -m copy -a 'src=/k8s/profile/k8s.conf dest=/etc/sysctl.d/k8s.conf force=yes'"], shell=True)
subprocess.call(["ansible k8s -m shell -a 'sysctl --system'"], shell=True)
subprocess.call(["ansible k8s -m shell -a 'modprobe ip_vs'"], shell=True)
subprocess.call(["ansible k8s -m shell -a 'modprobe ip_vs_rr'"], shell=True)
subprocess.call(["ansible k8s -m shell -a 'modprobe ip_vs_wrr'"], shell=True)
subprocess.call(["ansible k8s -m shell -a 'modprobe ip_vs_sh'"], shell=True)
subprocess.call(["ansible k8s -m shell -a 'modprobe nf_conntrack_ipv4'"], shell=True)
subprocess.call(["ansible k8s -m shell -a 'lsmod | grep ip_vs'"], shell=True)
#安装依赖包, 其实这一步只是配置node节点的kube-proxy步骤需要用到
subprocess.call(["ansible k8s -m shell -a 'yum install -y conntrack ipvsadm ipset jq sysstat curl iptables libseccomp'"], shell=True) def install_cfssl():
print("安装 cfssl 工具集")
subprocess.call(["ansible k8s -m file -a 'path=/server/software/k8s/ state=directory mode=0777'"], shell=True)
os.chdir('/server/software/k8s/')
subprocess.call(["ansible test1 -m copy -a 'src=/server/software/k8s/cfssl-certinfo_linux-amd64 dest=/usr/local/bin/cfssl-certinfo force=yes'"], shell=True)
subprocess.call(["ansible test1 -m copy -a 'src=/server/software/k8s/cfssl_linux-amd64 dest=/usr/local/bin/cfssl'"], shell=True)
subprocess.call(["ansible test1 -m copy -a 'src=/server/software/k8s/cfssljson_linux-amd64 dest=/usr/local/bin/cfssljson'"], shell=True)
os.chdir('/usr/local/bin/')
os.chmod("cfssl-certinfo", stat.S_IXOTH)
os.chmod("cfssl", stat.S_IXOTH)
os.chmod("cfssljson", stat.S_IXOTH)
print("successful") def create_root_ca():
print("创建CA根证书")
os.chdir('/root/ssl/')
subprocess.call(["time ansible k8s -m file -a 'path=/root/ssl/ state=directory mode=0777'"], shell=True)
subprocess.call(["cfssl gencert -initca ca-csr.json | cfssljson -bare ca"], shell=True)
#创建用户
subprocess.call(["time ansible k8s -m shell -a 'useradd k8s'"],shell=True)
#创建目录
subprocess.call(["time ansible k8s -m shell -a 'mkdir -p /etc/kubernetes/cert/ && chown -R k8s /etc/kubernetes'"],shell=True)
subprocess.call(["time ansible k8s -m copy -a 'src=/root/ssl/ dest=/etc/kubernetes/cert/ force=yes'"], shell=True)
print("successful") def distribute_kubectl():
print("分发 所有 二进制文件、安装kubectl ")
os.chdir('/server/software/k8s/')
shutil.unpack_archive('kubernetes-server-linux-amd64.tar.gz')
subprocess.call(["time ansible k8s -m file -a 'path=/opt/k8s/bin/ state=directory mode=0777'"], shell=True)
subprocess.call(["time ansible k8s -m copy -a 'src=/server/software/k8s/kubernetes/server/bin/ dest=/opt/k8s/bin/ force=yes'"],shell=True)
subprocess.call(["chmod +x /opt/k8s/bin/*"], shell=True)
subprocess.call(["time ansible k8s -m copy -a 'src=/opt/k8s/bin/kubectl dest=/usr/local/bin/ force=yes'"], shell=True)
subprocess.call(["time ansible k8s -m shell -a 'chmod +x /usr/local/bin/kubectl'"], shell=True)
print("successful") def create_admin_ca():
print("创建 admin 证书和私钥 ")
os.chdir('/root/ssl/')
subprocess.call(["ansible k8s -m shell -a 'chmod +x /etc/kubernetes/cert/*'"], shell=True)
subprocess.call(["cfssl gencert -ca=/etc/kubernetes/cert/ca.pem -ca-key=/etc/kubernetes/cert/ca-key.pem -config=/etc/kubernetes/cert/ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin"],shell=True)
subprocess.call(["ansible k8s -m copy -a 'src=/root/ssl/admin.pem dest=/etc/kubernetes/cert/ force=yes'"], shell=True)
subprocess.call(["ansible k8s -m copy -a 'src=/root/ssl/admin-key.pem dest=/etc/kubernetes/cert/ force=yes'"], shell=True)
#给证书授权
subprocess.call(["ansible k8s -m shell -a 'chmod +x /etc/kubernetes/cert/*'"], shell=True)
print("successful") def create_admin_kubeconfig():
print("创建admin kubeconfig 文件")
os.chdir('/root/ssl/')
subprocess.call(["kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/cert/ca.pem --embed-certs=true --server=https://192.168.0.235:8443 --kubeconfig=admin.conf"],shell=True)
subprocess.call(["kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=admin.conf"],shell=True)
subprocess.call(["kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=admin.conf"],shell=True)
subprocess.call(["kubectl config use-context kubernetes --kubeconfig=admin.conf"],shell=True)
subprocess.call(["ansible k8s -m file -a 'path=/root/.kube/ state=directory mode=0777'"], shell=True)
subprocess.call(["ansible k8s -m copy -a 'src=/root/ssl/admin.conf dest=/root/.kube/config force=yes'"], shell=True)
subprocess.call(["ansible k8s -m shell -a 'chmod +x /root/.kube/*'"], shell=True)
print("创建admin kubeconfig 文件完成") def install_etcd_cluster():
print("部署 etcd 集群")
# 解压和分发 etcd 二进制文件
os.chdir('/server/software/k8s/')
shutil.unpack_archive('etcd-v3.3.9-linux-amd64.tar.gz')
subprocess.call(["time ansible k8s -m copy -a 'src=/server/software/k8s/etcd-v3.3.9-linux-amd64/ dest=/opt/k8s/bin/ force=yes'"],shell=True)
subprocess.call(["time ansible k8s -m shell -a 'chmod +x /opt/k8s/bin/*'"],shell=True)
#创建etcd连接,直接使用etcdctl命令操作
subprocess.call(["time ansible k8s -m shell -a 'ln -s /opt/k8s/bin/etcdctl /usr/bin/etcdctl'"],shell=True)
# 创建 etcd 证书和私钥
os.chdir('/root/ssl/')
subprocess.call(["cfssl gencert -ca=/etc/kubernetes/cert/ca.pem -ca-key=/etc/kubernetes/cert/ca-key.pem -config=/etc/kubernetes/cert/ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd"],shell=True)
subprocess.call(["time ansible k8s -m file -a 'path=/etc/etcd/cert/ state=directory mode=0777'"], shell=True)
subprocess.call(["time ansible k8s -m copy -a 'src=/root/ssl/ dest=/etc/etcd/cert/ force=yes'"], shell=True)
# 创建 etcd.service文件
subprocess.call(["time ansible k8s -m file -a 'path=/k8s/profile/ state=directory mode=0777'"], shell=True)
subprocess.call(["time ansible k8s -m copy -a 'src=/k8s/profile/ dest=/k8s/profile/ force=yes'"], shell=True)
subprocess.call(["time ansible k8s -m shell -a 'python /k8s/profile/etcd.service.py'"],shell=True)
#创建 etcd 数据目录
subprocess.call(["time ansible k8s -m file -a 'path=/var/lib/etcd/ state=directory mode=0777'"], shell=True)
subprocess.call(["time ansible k8s -m shell -a 'chmod +x /var/lib/etcd'"],shell=True)
#启动 etcd 服务
subprocess.call(["time ansible k8s -m shell -a 'systemctl daemon-reload'"],shell=True)
subprocess.call(["time ansible k8s -m service -a 'name=etcd state=started'"],shell=True)
subprocess.call(["ansible k8s -m service -a 'name=etcd enabled=yes'"],shell=True)
#检查启动结果
subprocess.call(["time ansible k8s -m shell -a 'systemctl status etcd'"],shell=True)
#验证服务状态
subprocess.call(["/opt/k8s/bin/etcdctl --ca-file=/etc/kubernetes/cert/ca.pem --cert-file /etc/etcd/cert/etcd.pem --key-file /etc/etcd/cert/etcd-key.pem cluster-health"],shell=True)
#说明:如果启动失败就把所有节点关机重启就即可解决 def install_haproxy():
print("安装haproxy")
#安装软件
subprocess.call(["ansible k8s -m shell -a 'yum install -y haproxy'"],shell=True)
#配置和下发 haproxy 配置文件
subprocess.call(["ansible k8s -m copy -a 'src=/k8s/profile/haproxy.cfg dest=/etc/haproxy force=yes'"], shell=True)
#启动haproxy
subprocess.call(["ansible k8s -m shell -a 'systemctl daemon-reload'"],shell=True)
subprocess.call(["ansible k8s -m service -a 'name=haproxy state=started'"],shell=True)
subprocess.call(["ansible k8s -m service -a 'name=haproxy enabled=yes'"],shell=True)
#检查启动结果
subprocess.call(["time ansible k8s -m shell -a 'systemctl status haproxy'"],shell=True) def install_keepalived():
print("安装keepalived")
#安装软件
subprocess.call(["ansible k8s -m shell -a 'yum install -y keepalived'"],shell=True)
#配置和下发 keepalived 配置文件
subprocess.call(["ansible k8s -m copy -a 'src=/k8s/profile/ dest=/k8s/profile/ force=yes'"], shell=True)
subprocess.call(["ansible test1 -m shell -a 'python /k8s/profile/keepalived-master.py'"],shell=True)
subprocess.call(["ansible test0 -m shell -a 'python /k8s/profile/keepalived-back.py'"],shell=True)
#起动keepalived 服务
subprocess.call(["ansible k8s -m shell -a 'systemctl daemon-reload'"],shell=True)
subprocess.call(["ansible k8s -m service -a 'name=keepalived state=started'"],shell=True)
subprocess.call(["ansible k8s -m service -a 'name=keepalived enabled=yes'"],shell=True)
#检查启动结果
subprocess.call(["time ansible k8s -m shell -a 'systemctl status keepalived'"],shell=True)
#测试是否能ping通vip
subprocess.call(["time ansible k8s -m shell -a 'ping -c 1 192.168.0.235'"],shell=True)
#查看 haproxy 状态页面 http://192.168.0.235:10080/status;账号密码:admin/123456 def kube_apiserver():
print("部署 kube-apiserver 组件")
#创建 kubernetes 证书和私钥
os.chdir('/root/ssl/')
subprocess.call(["cfssl gencert -ca=/etc/kubernetes/cert/ca.pem -ca-key=/etc/kubernetes/cert/ca-key.pem -config=/etc/kubernetes/cert/ca-config.json -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes"], shell=True)
#生产service account key
subprocess.call(["ansible test1 -m shell -a 'openssl genrsa -out /root/ssl/sa.key 2048'"], shell=True)
subprocess.call(["ansible test1 -m shell -a 'openssl rsa -in /root/ssl/sa.key -pubout -out /root/ssl/sa.pub'"], shell=True)
#将生成的证书、私钥、ervice account key 分发到所有节点
subprocess.call(["ansible k8s -m copy -a 'src=/root/ssl/ dest=/etc/kubernetes/cert/ force=yes'"], shell=True)
subprocess.call(["ansible k8s -m shell -a 'chmod +x /etc/kubernetes/cert/'"], shell=True)
#创建、分发加密配置文件
subprocess.call(["ansible test1 -m shell -a 'python /k8s/profile/encryption-config.py'"],shell=True)
subprocess.call(["ansible k8s -m copy -a 'src=/etc/kubernetes/encryption-config.yaml dest=/etc/kubernetes force=yes'"], shell=True)
#创建、分发kube-apiserver文件
subprocess.call(["ansible k8s -m copy -a 'src=/k8s/profile/kube-apiserver.service.py dest=/k8s/profile/ force=yes'"], shell=True)
subprocess.call(["ansible k8s -m copy -a 'src=/k8s/profile/kube-apiserver.service.template.py dest=/k8s/profile/ force=yes'"], shell=True)
subprocess.call(["ansible k8s -m shell -a 'python /k8s/profile/kube-apiserver.service.py'"],shell=True)
#起动kube-apiserver 服务
subprocess.call(["ansible k8s -m shell -a 'systemctl daemon-reload'"],shell=True)
subprocess.call(["ansible k8s -m service -a 'name=kube-apiserver state=started'"],shell=True)
subprocess.call(["ansible k8s -m service -a 'name=kube-apiserver enabled=yes'"],shell=True)
#检查 kube-apiserver服务 运行状态
subprocess.call(["time ansible k8s -m shell -a 'systemctl status kube-apiserver'"],shell=True)
#检查集群运行状态
subprocess.call(["ansible k8s -m shell -a 'kubectl cluster-info'"],shell=True)
subprocess.call(["ansible k8s -m shell -a 'kubectl get componentstatuses'"],shell=True)
#检查 kube-apiserver 监听的端口
subprocess.call(["ansible k8s -m shell -a 'netstat -lnpt|grep kube'"],shell=True)
#授予 kubernetes 证书访问 kubelet API 的权限;在执行 kubectl exec、run、logs 等命令时,apiserver 会转发到 kubelet。这里定义 RBAC 规则,授权 apiserver 调用 kubelet API
subprocess.call(["ansible k8s -m shell -a 'kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernete'"],shell=True) def kube_controller_manager():
print("部署高可用 kube-controller-manager 集群")
#创建 kubernetes 证书和私钥
os.chdir('/root/ssl/')
subprocess.call(["cfssl gencert -ca=/etc/kubernetes/cert/ca.pem -ca-key=/etc/kubernetes/cert/ca-key.pem -config=/etc/kubernetes/cert/ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager"], shell=True)
#将生成的证书和私钥分发到所有节点
subprocess.call(["ansible k8s -m copy -a 'src=/root/ssl/ dest=/etc/kubernetes/cert/ force=yes'"], shell=True)
subprocess.call(["ansible k8s -m shell -a 'chmod +x /etc/kubernetes/cert/'"], shell=True)
#创建和分发 kubeconfig 文件
subprocess.call(["kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/cert/ca.pem --embed-certs=true --server=https://192.168.0.235:8443 --kubeconfig=kube-controller-manager.kubeconfig"], shell=True)
subprocess.call(["kubectl config set-credentials system:kube-controller-manager --client-certificate=kube-controller-manager.pem --client-key=kube-controller-manager-key.pem --embed-certs=true --kubeconfig=kube-controller-manager.kubeconfig"], shell=True)
subprocess.call(["kubectl config set-context system:kube-controller-manager --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig"], shell=True)
subprocess.call(["kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig"], shell=True)
subprocess.call(["ansible k8s -m copy -a 'src=/root/ssl/kube-controller-manager.kubeconfig dest=/etc/kubernetes/ force=yes'"], shell=True)
subprocess.call(["ansible k8s -m shell -a 'chmod +x /etc/kubernetes/*'"], shell=True)
#创建、分发kube-controller-manager.service
subprocess.call(["ansible k8s -m copy -a 'src=/k8s/profile/kube-controller-manager.service.template.py dest=/k8s/profile/ force=yes'"], shell=True)
subprocess.call(["ansible k8s -m copy -a 'src=/k8s/profile/kube-controller-manager.service.py dest=/k8s/profile/ force=yes'"], shell=True)
subprocess.call(["ansible k8s -m shell -a 'python /k8s/profile/kube-controller-manager.service.py'"],shell=True)
#创建日志目录
subprocess.call(["ansible k8s -m file -a 'path=/var/log/kubernetes/ state=directory mode=0777'"], shell=True)
subprocess.call(["ansible k8s -m shell -a 'chmod +x /var/log/kubernetes/'"], shell=True)
#启动 kube-controller-manager 服务
subprocess.call(["ansible k8s -m shell -a 'systemctl daemon-reload'"],shell=True)
subprocess.call(["ansible k8s -m service -a 'name=kube-controller-manager state=started'"],shell=True)
subprocess.call(["ansible k8s -m service -a 'name=kube-controller-manager enabled=yes'"],shell=True)
#检查 kube-controller-manager 运行状态
subprocess.call(["time ansible k8s -m shell -a 'systemctl status kube-controller-manager'"],shell=True)
#查看leader节点
subprocess.call(["kubectl get endpoints kube-controller-manager --namespace=kube-system -o yaml"],shell=True) def install_kube_scheduler():
print("部署高可用 kube-scheduler 集群")
#创建 kube-scheduler 证书和私钥
os.chdir('/root/ssl/')
subprocess.call(["cfssl gencert -ca=/etc/kubernetes/cert/ca.pem -ca-key=/etc/kubernetes/cert/ca-key.pem -config=/etc/kubernetes/cert/ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler"], shell=True)
#将生成的证书和私钥分发到所有节点
subprocess.call(["ansible k8s -m copy -a 'src=/root/ssl/kube-scheduler.pem dest=/etc/kubernetes/cert/ force=yes'"], shell=True)
subprocess.call(["ansible k8s -m copy -a 'src=/root/ssl/kube-scheduler-key.pem dest=/etc/kubernetes/cert/ force=yes'"], shell=True)
subprocess.call(["ansible k8s -m shell -a 'chmod +x /etc/kubernetes/cert/'"], shell=True)
#创建和分发 kubeconfig 文件
subprocess.call(["kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/cert/ca.pem --embed-certs=true --server=https://192.168.0.235:8443 --kubeconfig=kube-scheduler.kubeconfig"], shell=True)
subprocess.call(["kubectl config set-credentials system:kube-scheduler --client-certificate=kube-scheduler.pem --client-key=kube-scheduler-key.pem --embed-certs=true --kubeconfig=kube-scheduler.kubeconfig"], shell=True)
subprocess.call(["kubectl config set-context system:kube-scheduler --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig"], shell=True)
subprocess.call(["kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig"], shell=True)
subprocess.call(["ansible k8s -m copy -a 'src=/root/ssl/kube-scheduler.kubeconfig dest=/etc/kubernetes/ force=yes'"], shell=True)
subprocess.call(["ansible k8s -m shell -a 'chmod +x /etc/kubernetes/*'"], shell=True)
#创建和分发kube-scheduler.service
subprocess.call(["ansible k8s -m copy -a 'src=/k8s/profile/kube-scheduler.service dest=/etc/systemd/system/ force=yes'"], shell=True)
#启动kube-scheduler 服务
subprocess.call(["ansible k8s -m shell -a 'systemctl daemon-reload'"],shell=True)
subprocess.call(["ansible k8s -m service -a 'name=kube-scheduler state=started'"],shell=True)
subprocess.call(["ansible k8s -m service -a 'name=kube-scheduler enabled=yes'"],shell=True)
#检查 kube-scheduler 运行状态
subprocess.call(["time ansible k8s -m shell -a 'systemctl status kube-scheduler'"],shell=True)
#查看leader节点
subprocess.call(["kubectl get endpoints kube-scheduler --namespace=kube-system -o yaml"],shell=True) def environmental_configuration():
print("下面开始单独安装worker节点,worker节点主机名:test4")
print("配置环境")
#所有节点创建目录
subprocess.call(["time ansible test4 -m file -a 'path=/k8s/profile/ state=directory mode=0777'"], shell=True)
subprocess.call(["time ansible test4 -m file -a 'path=/server/software/k8s/ state=directory mode=0777'"], shell=True)
subprocess.call(["time ansible test4 -m file -a 'path=/root/ssl/ state=directory mode=0777'"], shell=True)
subprocess.call(["time ansible test4 -m file -a 'path=/script/ state=directory mode=0777'"], shell=True)
#加载ipvs内核参数,其实这一步在安装kube-proxy那一步时执行即可
subprocess.call(["ansible test4 -m shell -a 'iptables -P FORWARD ACCEPT'"], shell=True)
subprocess.call(["time ansible test4 -m copy -a 'src=/k8s/profile/k8s.conf dest=/etc/sysctl.d/k8s.conf force=yes'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'sysctl --system'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'modprobe ip_vs'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'modprobe ip_vs_rr'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'modprobe ip_vs_wrr'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'modprobe ip_vs_sh'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'modprobe nf_conntrack_ipv4'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'lsmod | grep ip_vs'"], shell=True)
#安装依赖包, 其实这一步只是配置node节点的kube-proxy步骤需要用到
subprocess.call(["ansible test4 -m shell -a 'yum install -y conntrack ipvsadm ipset jq sysstat curl iptables libseccomp'"], shell=True)
#创建k8s群组
subprocess.call(["ansible test4 -m group -a 'name=k8s state=present'"], shell=True)
#创建k8s用户
subprocess.call(["ansible test4 -m user -a 'name=k8s group=k8s'"], shell=True) def install_docker():
print("安装docker")
#创建目录
subprocess.call(["ansible test4 -m file -a 'path=/etc/docker/ state=directory mode=0777'"], shell=True)
#安装依赖包
subprocess.call(["ansible test4 -m shell -a 'yum install -y conntrack ipvsadm ipset jq iptables curl sysstat libseccomp && /usr/sbin/modprobe ip_vs'"],shell=True)
#解压、分发docker二进制文件
subprocess.call(["time ansible test4 -m unarchive -a 'src=/server/software/k8s/docker-18.03.1-ce.tgz dest=/usr/local/'"],shell=True)
subprocess.call(["ansible test4 -m shell -a 'cp /usr/local/docker/dockerd /opt/k8s/bin/'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'chmod +x /opt/k8s/bin/*'"], shell=True)
#设置防火墙策略
subprocess.call(["ansible test4 -m shell -a '/usr/sbin/iptables -P FORWARD ACCEPT'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'echo /usr/sbin/iptables -P FORWARD ACCEPT > /etc/profile'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'source /etc/profile'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'systemctl stop firewalld && systemctl disable firewalld'"], shell=True)
subprocess.call(["ansible test4 -m shell -a '/usr/sbin/iptables -F && /usr/sbin/iptables -X && /usr/sbin/iptables -F -t nat'"], shell=True)
subprocess.call(["ansible test4 -m shell -a '/usr/sbin/iptables -X -t nat'"], shell=True)
#创建和分发docker.service
subprocess.call(["ansible test4 -m copy -a 'src=/k8s/profile/docker.service dest=/etc/systemd/system/docker.service force=yes'"], shell=True)
#配置docker加速
subprocess.call(["ansible test4 -m copy -a 'src=/k8s/profile/daemon.json dest=/etc/docker/daemon.json force=yes'"], shell=True)
#启动docker
subprocess.call(["ansible test4 -m shell -a 'systemctl daemon-reload'"],shell=True)
subprocess.call(["ansible test4 -m service -a 'name=docker state=started'"],shell=True)
subprocess.call(["ansible test4 -m service -a 'name=docker enabled=yes'"],shell=True)
#设置hairpin_mode
subprocess.call(["ansible test4 -m shell -a 'for intf in /sys/devices/virtual/net/docker0/brif/*; do echo 1 > $intf/hairpin_mode; done'"],shell=True)
#分发内核参数。
subprocess.call(["ansible test4 -m copy -a 'src=/k8s/profile/kubernetes.conf dest=/etc/sysctl.d/kubernetes.conf force=yes'"], shell=True)
#加载内核参数
subprocess.call(["ansible test4 -m shell -a 'sysctl -p /etc/sysctl.d/kubernetes.conf'"],shell=True)
#检查启动结果
subprocess.call(["ansible test4 -m shell -a 'systemctl status docker'"],shell=True) def install_flanneld():
print("部署 flannel 网络")
#创建目录
subprocess.call(["ansible test4 -m file -a 'path=/k8s/profile/ state=directory mode=0777'"], shell=True)
subprocess.call(["ansible test4 -m file -a 'path=/opt/k8s/bin/ state=directory mode=0777'"], shell=True)
subprocess.call(["ansible test4 -m file -a 'path=/etc/flanneld/cert/ state=directory mode=0777'"], shell=True)
subprocess.call(["ansible test4 -m file -a 'path=/etc/kubernetes/cert/ state=directory mode=0777'"], shell=True)
#解压、分发 flanneld 二进制文件
subprocess.call(["ansible test4 -m unarchive -a 'src=/server/software/k8s/flannel-v0.10.0-linux-amd64.tar.gz dest=/opt/k8s/bin/'"],shell=True)
subprocess.call(["ansible test4 -m shell -a 'chmod +x /opt/k8s/bin/*'"], shell=True)
#将生成的证书和私钥分发到k8s4节点
subprocess.call(["ansible test4 -m copy -a 'src=/root/ssl/flanneld.pem dest=/etc/flanneld/cert/ force=yes'"], shell=True)
subprocess.call(["ansible test4 -m copy -a 'src=/root/ssl/flanneld-key.pem dest=/etc/flanneld/cert/ force=yes'"], shell=True)
subprocess.call(["ansible test4 -m copy -a 'src=/root/ssl/ca.pem dest=/etc/kubernetes/cert/ force=yes'"], shell=True)
subprocess.call(["ansible test4 -m copy -a 'src=/root/ssl/ca-key.pem dest=/etc/kubernetes/cert/ force=yes'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'chmod +x /etc/flanneld/cert/*'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'chmod +x /etc/kubernetes/cert/*'"], shell=True)
#向 etcd 写入集群 Pod 网段信息 注意:本步骤只需执行一次, 需要在test1节点操作
subprocess.call(["ansible test1 -m shell -a 'chmod +x /k8s/profile/flannel_to_etcd.sh'"], shell=True)
subprocess.call(["ansible test1 -m shell -a 'sh /k8s/profile/flannel_to_etcd.sh'"], shell=True)
#分发flanneld.service
subprocess.call(["ansible test4 -m copy -a 'src=/k8s/profile/flanneld.service.template.py dest=/k8s/profile/ force=yes'"], shell=True)
subprocess.call(["ansible test4 -m copy -a 'src=/k8s/profile/flanneld.service.py dest=/k8s/profile/flanneld.service.py force=yes'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'python /k8s/profile/flanneld.service.py'"],shell=True)
#启动flanneld
subprocess.call(["ansible test4 -m shell -a 'systemctl daemon-reload'"],shell=True)
subprocess.call(["ansible test4 -m service -a 'name=flanneld state=started'"],shell=True)
subprocess.call(["ansible test4 -m service -a 'name=flanneld enabled=yes'"],shell=True)
#检查启动结果
subprocess.call(["ansible test4 -m shell -a 'systemctl status flanneld'"],shell=True) def install_kubelet():
print("部署 kubelet 组件")
os.chdir('/root/ssl/')
#创建工作目录和日志目录
subprocess.call(["ansible test4 -m file -a 'path=/var/lib/kubelet/ state=directory mode=0777'"], shell=True)
subprocess.call(["ansible test4 -m file -a 'path=/var/log/kubernetes/ state=directory mode=0777'"], shell=True)
#安装cni网络插件
subprocess.call(["ansible test4 -m file -a 'path=/opt/cni/bin/ state=directory mode=0777'"], shell=True)
subprocess.call(["ansible test4 -m file -a 'path=/etc/cni/net.d/ state=directory mode=0777'"], shell=True)
subprocess.call(["ansible test4 -m unarchive -a 'src=/server/software/k8s/cni-plugins-amd64-v0.7.1.tgz dest=/opt/cni/bin/'"],shell=True)
#配置admin-config
subprocess.call(["ansible test4 -m file -a 'path=/root/.kube/ state=directory mode=0777'"], shell=True)
subprocess.call(["ansible test4 -m copy -a 'src=/root/ssl/admin.conf dest=/root/.kube/config force=yes'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'chmod +x /root/.kube/*'"], shell=True)
#配置kubectl工具
subprocess.call(["ansible test4 -m copy -a 'src=/opt/k8s/bin/kubectl dest=/usr/local/bin/ force=yes'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'chmod +x /usr/local/bin/kubectl'"], shell=True)
#配置kubeadm工具, 注意:在test1上操作
subprocess.call(["ansible test1 -m copy -a 'src=/opt/k8s/bin/kubeadm dest=/usr/local/bin/ force=yes'"], shell=True)
subprocess.call(["ansible test1 -m shell -a 'chmod +x /usr/local/bin/kubeadm'"], shell=True)
#配置 bootstrap RBAC 权限
subprocess.call(["kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappers"],shell=True)
#创建 kubelet bootstrap kubeconfig 文件,包含创建token
subprocess.call(["ansible test1 -m shell -a 'chmod +x /k8s/profile/bootstrap-kubeconfig.sh'"], shell=True)
subprocess.call(["ansible test1 -m shell -a 'sh /k8s/profile/bootstrap-kubeconfig.sh'"], shell=True)
#分发kubelet bootstrap kubeconfig 文件到所有节点
subprocess.call(["ansible test1 -m shell -a 'chmod +x /k8s/profile/bootstrap-copy.sh'"], shell=True)
subprocess.call(["ansible test1 -m shell -a 'sh /k8s/profile/bootstrap-copy.sh'"], shell=True)
#查看 kubeadm 为各节点创建的 token;之前好好的,后来查看报错
#subprocess.call(["ansible test1 -m shell -a 'kubeadm token list --kubeconfig ~/.kube/config'"], shell=True)
#分发ca证书到worker 节点
subprocess.call(["ansible test4 -m copy -a 'src=/root/ssl/ca.pem dest=/etc/kubernetes/cert/ force=yes'"], shell=True)
#创建 kubelet 参数配置文件
subprocess.call(["ansible test4 -m copy -a 'src=/k8s/profile/kubelet.config.json.template.py dest=/k8s/profile/ force=yes'"], shell=True)
subprocess.call(["ansible test4 -m copy -a 'src=/k8s/profile/kubelet.config.json.py dest=/k8s/profile/ force=yes'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'python /k8s/profile/kubelet.config.json.py'"],shell=True)
#分发kubelet二进制文件
subprocess.call(["time ansible test4 -m copy -a 'src=/opt/k8s/bin/kubelet dest=/opt/k8s/bin/ force=yes'"], shell=True)
subprocess.call(["time ansible test4 -m shell -a 'chmod +x /opt/k8s/bin/kubelet'"], shell=True)
#创建和分发kubelet启动文件
subprocess.call(["ansible test4 -m copy -a 'src=/k8s/profile/kubelet.service.template.py dest=/k8s/profile/ force=yes'"], shell=True)
subprocess.call(["ansible test4 -m copy -a 'src=/k8s/profile/kubelet.service.py dest=/k8s/profile/ force=yes'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'python /k8s/profile/kubelet.service.py'"],shell=True)
#启动 kubelet 服务
subprocess.call(["ansible test4 -m shell -a 'systemctl daemon-reload'"],shell=True)
subprocess.call(["ansible test4 -m service -a 'name=kubelet state=started'"],shell=True)
subprocess.call(["ansible test4 -m service -a 'name=kubelet enabled=yes'"],shell=True)
#检查启动结果
subprocess.call(["ansible test4 -m shell -a 'systemctl status kubelet'"],shell=True)
#创建apiserver到kubelet的权限,就是给kubernetes用户rbac授权,其实安装apiserver时候做过这一步了
#subprocess.call(["ansible test4 -m copy -a 'src=/k8s/profile/apiserver-to-kubelet.yaml dest=/k8s/profile/apiserver-to-kubelet.yaml force=yes'"], shell=True)
#subprocess.call(["ansible test4 -m shell -a 'kubectl create /k8s/profile/apiserver-to-kubelet.yaml'"],shell=True) def approve_csr():
print("自动 approve CSR 请求")
subprocess.call(["time ansible test4 -m copy -a 'src=/k8s/profile/csr-crb.yaml dest=/k8s/profile/csr-crb.yaml force=yes'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'kubectl apply -f /k8s/profile/csr-crb.yaml'"],shell=True)
#等待一段时间(1-10 分钟),所有节点的 CSR 都被自动 approve;看了下时间,这次刚好等了10分钟
subprocess.call(["ansible test4 -m shell -a 'sleep 300s'"],shell=True)
#查看csr
subprocess.call(["ansible test4 -m shell -a 'kubectl get csr'"],shell=True)
#所有节点均 ready
subprocess.call(["ansible test4 -m shell -a 'kubectl get nodes'"],shell=True)
#设置集群角色
test4=subprocess.check_output(["kubectl get nodes | grep test4 | awk '{print $1}'"], shell=True)
test4=test4.decode('utf8').strip()
subprocess.call(['kubectl','label','nodes',test4,'node-role.kubernetes.io/node='])
#查看kube-controller-manager 为各 node 生成了 kubeconfig 文件和公私钥
subprocess.call(["ansible test4 -m shell -a 'ls -l /etc/kubernetes/kubelet.kubeconfig'"],shell=True)
subprocess.call(["ansible test4 -m shell -a 'ls -l /etc/kubernetes/cert/|grep kubelet'"],shell=True)
#查看kubelet 提供的 API 接口
subprocess.call(["ansible test4 -m shell -a 'yum install net-tools -y'"],shell=True)
subprocess.call(["ansible test4 -m shell -a 'netstat -lnpt|grep kubelet'"],shell=True) def install_kube_proxy():
print("部署 kube-proxy 组件")
#创建工作目录和日志目录
subprocess.call(["ansible test4 -m file -a 'path=/var/lib/kube-proxy/ state=directory mode=0777'"], shell=True)
subprocess.call(["ansible test4 -m file -a 'path=/var/log/kubernetes/ state=directory mode=0777'"], shell=True)
subprocess.call(["ansible test4 -m file -a 'path=/var/log/kubernetes/ owner=k8s group=k8s mode=0777'"], shell=True)
#安装依赖包
subprocess.call(["ansible test4 -m shell -a 'yum install -y conntrack ipvsadm ipset jq sysstat curl iptables libseccomp'"], shell=True)
#加载 ip_vs 内核模块
subprocess.call(["ansible test4 -m shell -a 'iptables -P FORWARD ACCEPT'"], shell=True)
subprocess.call(["time ansible test4 -m copy -a 'src=/k8s/profile/k8s.conf dest=/etc/sysctl.d/k8s.conf force=yes'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'sysctl --system'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'modprobe ip_vs'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'modprobe ip_vs_rr'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'modprobe ip_vs_wrr'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'modprobe ip_vs_sh'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'modprobe nf_conntrack_ipv4'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'lsmod | grep ip_vs'"], shell=True)
#创建 kube-proxy 证书和私钥
os.chdir('/root/ssl/')
subprocess.call(["cfssl gencert -ca=/etc/kubernetes/cert/ca.pem -ca-key=/etc/kubernetes/cert/ca-key.pem -config=/etc/kubernetes/cert/ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy"], shell=True)
#将生成的证书和私钥分发到test4节点
subprocess.call(["ansible test4 -m copy -a 'src=/root/ssl/kube-proxy.pem dest=/etc/kubernetes/cert/ force=yes'"], shell=True)
subprocess.call(["ansible test4 -m copy -a 'src=/root/ssl/kube-proxy-key.pem dest=/etc/kubernetes/cert/ force=yes'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'chmod +x /etc/kubernetes/cert/'"], shell=True)
#创建和分发 kubeconfig 文件
subprocess.call(["kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/cert/ca.pem --embed-certs=true --server=https://192.168.0.235:8443 --kubeconfig=kube-proxy.kubeconfig"], shell=True)
subprocess.call(["kubectl config set-credentials system:kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig"], shell=True)
subprocess.call(["kubectl config set-context system:kube-proxy --cluster=kubernetes --user=system:kube-proxy --kubeconfig=kube-proxy.kubeconfig"], shell=True)
subprocess.call(["kubectl config use-context system:kube-proxy --kubeconfig=kube-proxy.kubeconfig"], shell=True)
subprocess.call(["ansible test4 -m copy -a 'src=/root/ssl/kube-proxy.kubeconfig dest=/etc/kubernetes/ force=yes'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'chmod +x /etc/kubernetes/*'"], shell=True)
#创建 kube-proxy 配置文件
subprocess.call(["ansible test4 -m copy -a 'src=/k8s/profile/kube-proxy.config.yaml.template.py dest=/k8s/profile/ force=yes'"], shell=True)
subprocess.call(["ansible test4 -m copy -a 'src=/k8s/profile/kube-proxy.config.yaml.py dest=/k8s/profile/ force=yes'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'python /k8s/profile/kube-proxy.config.yaml.py'"],shell=True)
#分发kube-proxy二进制文件
subprocess.call(["time ansible test4 -m copy -a 'src=/opt/k8s/bin/kube-proxy dest=/opt/k8s/bin/ force=yes'"], shell=True)
subprocess.call(["time ansible test4 -m shell -a 'chmod +x /opt/k8s/bin/kube-proxy'"], shell=True)
#创建和分发kube-proxy.service
subprocess.call(["ansible test4 -m copy -a 'src=/k8s/profile/kube-proxy.service dest=/etc/systemd/system/kube-proxy.service force=yes'"], shell=True)
#启动kube-proxy 服务
subprocess.call(["ansible test4 -m shell -a 'systemctl daemon-reload'"],shell=True)
subprocess.call(["ansible test4 -m service -a 'name=kube-proxy state=started'"],shell=True)
subprocess.call(["ansible test4 -m service -a 'name=kube-proxy enabled=yes'"],shell=True)
#检查 kube-proxy 运行状态
subprocess.call(["time ansible test4 -m shell -a 'systemctl status kube-proxy'"],shell=True) def install_coredns():
print("安装coredns")
subprocess.call(["ansible test4 -m shell -a 'yum install -y jq'"], shell=True)
#通过playbook安装coredns,在playbook中定义了通过kubectl安装coredns;playbook中定义了安装在test4节点上
subprocess.call(["ansible-playbook /k8s/profile/deploy_coredns.yaml"], shell=True)
#等(1-10)分钟查看coredns pod
subprocess.call(["ansible test4 -m shell -a 'sleep 60s'"], shell=True)
subprocess.call(["ansible test4 -m shell -a 'kubectl get pod -n kube-system'"], shell=True)
#测试coredns请参照https://www.cnblogs.com/effortsing/p/10312081.html 最后部分 def func_list():
#install_cfssl()
#create_root_ca()
#distribute_kubectl()
#create_admin_ca()
#create_admin_kubeconfig()
#install_etcd_cluster()
#install_haproxy()
#install_keepalived()
#kube_apiserver()
#kube_controller_manager()
#install_kube_scheduler()
#下面开始单独安装worker节点,worker节点地址:192.168.0.94
#environmental_configuration()
install_docker()
#install_flanneld()
#install_kubelet()
#approve_csr()
#install_kube_proxy()
#install_coredns()
def main():
func_list()
if __name__ == '__main__':
main()
全部二进制脚本高可用--只有docker启动未成功的更多相关文章
- python安装二进制k8s高可用 版本1.13.0
一.所有安装包.脚本.脚本说明.下载链接:https://pan.baidu.com/s/1kHaesJJuMQ5cG-O_nvljtg 提取码:kkv6 二.脚本安装说明 1.脚本说明: 本实验为三 ...
- eureka集群高可用配置,亲测成功配置(转)
转自大神的文章:https://blog.csdn.net/tianyaleixiaowu/article/details/78184793 网上讲这个东西的很多,抄来抄去的,大部分类似,多数没讲明白 ...
- [大数据] hadoop高可用(HA)部署(未完)
一.HA部署架构 如上图所示,我们可以将其分为三个部分: 1.NN和DN组成Hadoop业务组件.浅绿色部分. 2.中间深蓝色部分,为Journal Node,其为一个集群,用于提供高可用的共享文件存 ...
- windows下docker 启动jenkins成功,浏览器无法访问,拒绝了我们的连接
[问题现象] 在Windows下使用docker启动了一个jenkins,翻越了无数的坑,最后的启动命令为 docker run --name jenkins -u root -p 8000:8000 ...
- K8s之二进制安装高可用集群
1.环境准备 #二进制部署安装文档# https://github.com/easzlab/kubeasz/blob/master/docs/setup/00-planning_and_overall ...
- Rancher安装多节点高可用(HA)
Rancher版本:Rancher v1.0.1 基本配置需求 多节点的HA配置请参照单节点需求 节点需要开放的端口 全局访问:TCP 端口22,80,443,18080(可选:用于在集群启动前 查看 ...
- heartbeat单独提供高可用服务
本文目录:1.简介2.安装heartbeat 2.1 编译安装Heartbeat3.heartbeat相关配置文件 3.1 配置文件ha.cf 3.2 配置文件authkeys 3.3 配置文件har ...
- Mysql双主互备+keeplived高可用架构介绍
一.Mysql双主互备+keeplived高可用架构介绍 Mysql主从复制架构可以在很大程度保证Mysql的高可用,在一主多从的架构中还可以利用读写分离将读操作分配到从库中,减轻主库压力.但是在这种 ...
- zookeeper高可用集群搭建
前提:已经在master01配置好hadoop:在各个slave节点配置好hadoop和zookeeper: (该文是将zookeeper配置在各slave节点上的,其实也可以配置在各master上, ...
随机推荐
- wampserver apache 500 Internal Server Error解决办法
Internal Server ErrorThe server encountered an internal error or misconfiguration and was unable to ...
- Python3+Appium学习笔记02-环境配置(下)
配置所需软件及我当前使用的版本: 1)java jdk 1.8.0 2)android sdk 24.4.1 3)Python3 3.7.3 4)Appium-Python-Client 5)n ...
- 学习elasticsearch(一)linux环境搭建(3)——head插件安装
对于5.x的es,head插件不支持 ./elasticearch-plugin install [plugin_name]方式安装. 进入正文 1.首先确保你的机器安装了python,如果没有,请看 ...
- flyio 的请求封装
1.安装flyio.js npm install flyio --save-dev 2.在util创建一个fly.js用于封装 import Vue from 'vue' var Fly=requir ...
- solrcloud2
分片的原因 由于底层Lucene的限制,每个solr索引中包含的文档数不能超过231个,大约是21亿个.但是solr分片一般不是基于这个的原因,因为一般没有到这个峰值的之后,solr的各中性能问题就暴 ...
- Kettle安装和简单使用
Kettle安装和使用 安装 安装之前需要准备的环境为Java环境,需要提前配置好jdk 下载之后,解压即可使用. 使用 1.因为该工具主要是对数据库进行操作,所以需要提前将mysql的jar包放到l ...
- VSS使用技巧
理由很简单:迁出锁定!之所以强调这个,是因为这方面吃过的亏太多,我举几个例子:1.比如两个程序员增加了同一个功能,但是实现方法不同,比如甲:func1,乙 func2,两者代码也不一样第二个人在迁入代 ...
- 2018多校第十场 HDU 6430 (线段树合并)
题目链接:http://acm.hdu.edu.cn/showproblem.php?pid=6430 题意:一棵树上每个节点权值为v[i],每个节点的heard值是:以它为LCA的两个节点的GCD的 ...
- BZOJ 3065 带插入区间K小值 (替罪羊树套线段树)
毒瘤题.参考抄自博客:hzwer 第一次写替罪羊树,完全是照着题解写的,发现这玩意儿好强啊,不用旋转每次都重构还能nlognnlognnlogn. 还有外面二分和里面线段树的值域一样,那么r = mi ...
- HDU 6052 - To my boyfriend | 2017 Multi-University Training Contest 2
说实话不是很懂按题解怎么写,思路来源于 http://blog.csdn.net/calabash_boy/article/details/76272704?yyue=a21bo.50862.2018 ...