项目地址:https://github.com/easzlab/kubeasz

#:先配置harbor

#:利用脚本安装docker
root@k8s-harbor1:~# vim docker_install.sh
#!/bin/bash
sudo apt-get update
sudo apt-get -y install apt-transport-https ca-certificates curl software-properties-common
curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=amd64] http://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
sudo apt-get -y update
sudo apt install -y docker-ce=5:18.09.9~3-0~ubuntu-bionic docker-ce-cli=5:18.09.9~3-0~ubuntu-bionic root@k8s-harbor1:~# bash docker_install.sh #:配置加速器 root@k8s-harbor1:~# sudo mkdir -p /etc/docker
root@k8s-harbor1:~# sudo tee /etc/docker/daemon.json <<-'EOF'
> {
> "registry-mirrors": ["https://5zw40ihv.mirror.aliyuncs.com"]
> }
> EOF
{
"registry-mirrors": ["https://5zw40ihv.mirror.aliyuncs.com"]
}
root@k8s-harbor1:~# sudo systemctl daemon-reload
root@k8s-harbor1:~# sudo systemctl restart docker #:安装docker-compose root@k8s-harbor1:~# apt install -y docker-compose #:下载harbor包,解压并做软连接 root@k8s-harbor1:/usr/local/src# ls
harbor-offline-installer-v1.7.5.tgz
root@k8s-harbor1:/usr/local/src# tar xf harbor-offline-installer-v1.7.5.tgz
root@k8s-harbor1:/usr/local/src# ln -sv /usr/local/src/harbor /usr/local/harbor #:在准备证书,为harbor配置中准备的 root@k8s-harbor1:/usr/local/harbor# mkdir /usr/local/src/harbor/certs #:准备一个放证书的目录
root@k8s-harbor1:/usr/local/harbor# cd /usr/local/src/harbor/certs
root@k8s-harbor1:/usr/local/src/harbor/certs# openssl genrsa -out /usr/local/src/harbor/certs/harbor-ca.key 2048 #:生成私有key
root@k8s-harbor1:/usr/local/src/harbor/certs# openssl req -x509 -new -nodes -key /usr/local/src/harbor/certs/harbor-ca.key -subj "/CN=harbor.magedu.net" -days 7120 -out /usr/local/src/harbor/certs/harbor-ca.crt #:注意改域名,这个名字harbor配置中的hostname一定要一样, 生成自签名证书,在ubuntu系统会以下错误
Can't load /root/.rnd into RNG
139879360623040:error:2406F079:random number generator:RAND_load_file:Cannot open file:../crypto/rand/randfile.c:88:Filename=/root/.rnd #:根据提示创建这个文件,再次执行即可
root@k8s-harbor1:/usr/local/src/harbor/certs# touch /root/.rnd #:修改harbor配置文件 root@k8s-harbor1:/usr/local/src/harbor/certs# cd /usr/local/harbor
root@k8s-harbor1:/usr/local/harbor# vim harbor.cfg
hostname = harbor.magedu.net
ui_url_protocol = https #:此处要使用https协议
ssl_cert = /usr/local/src/harbor/certs/harbor-ca.crt
ssl_cert_key = /usr/local/src/harbor/certs/harbor-ca.key #:此处就写上步生成的证书
harbor_admin_password = 123456 #:harbor的登录密码 #:开始安装harbor root@k8s-harbor1:/usr/local/harbor# ./install.sh #:测试
#:配置master1可以上传和拉取镜像

#:先利用脚本安装docker

root@k8s-master1:~# bash docker_install.sh

#:创建一个以harbor访问名相同的目录(必须要同访问名相同)放证书,否则不能上传和下载镜像
root@k8s-master1:~# mkdir /etc/docker/certs.d/harbor.magedu.net -p #:将harbor的公钥拷贝到要上传镜像的服务器
root@k8s-harbor1:~# scp /usr/local/src/harbor/certs/harbor-ca.crt 192.168.5.101:/etc/docker/certs.d/harbor.magedu.ne #:重启docker
root@k8s-master1:~# systemctl restart docker #:配置域名解析
root@k8s-master1:~# vim /etc/hosts
192.168.5.103 harbor.magedu.net #:登录测试
root@k8s-master1:~# docker login harbor.magedu.net #:在harbor的web端创建一个项目,设置成公开 #:下载一个小镜像,修改tag,上传一下测试
root@k8s-master1:~# docker pull alpine
root@k8s-master1:~# docker tag 961769676411 harbor.magedu.net/linux37/alpine:v1
root@k8s-master1:~# docker push harbor.magedu.net/linux37/alpine:v1 #:配置master2可以上传和拉取镜像 #:在master2 利用脚本安装docker
root@k8s-master2:~# bash docker_install.sh #:因为只有master2上传镜像,所以我们手动将认证文件和证书传到master2上
root@k8s-master1:~# scp -r /root/.docker 192.168.5.102:/root #:利用脚本将master1的公钥拷贝到master2,etcd,node节点,实现免秘钥登录
root@k8s-master1:~# vim scp.sh
#!/bin/bash
IP="
192.168.5.101
192.168.5.102
192.168.5.104
192.168.5.105
192.168.5.106
192.168.5.107
192.168.5.108
192.168.5.109
" for node in ${IP};do
sshpass -p centos ssh-copy-id ${node} -o StrictHostKeyChecking=no
if [ $? -eq 0 ];then
echo "${node} 秘钥拷贝完成"
else
echo "${node} 秘钥拷贝失败"
fi
done #:安装sshpass命令
root@k8s-master1:~# apt install sshpass #:在master1上生成秘钥对
root@k8s-master1:~# ssh-keygen #:执行脚本
root@k8s-master1:~# bash scp.sh #:再次修改脚本,将证书文件,认证文件,资源限制,拷贝到各主机
root@k8s-master1:~# vim scp.sh
#!/bin/bash
IP="
192.168.5.102
192.168.5.104
192.168.5.105
192.168.5.106
192.168.5.107
192.168.5.108
192.168.5.109
" for node in ${IP};do
# sshpass -p centos ssh-copy-id ${node} -o StrictHostKeyChecking=no
# if [ $? -eq 0 ];then
# echo "${node} 秘钥拷贝完成"
# else
# echo "${node} 秘钥拷贝失败"
# fi
scp docker_install.sh ${node}:/root
scp -r /etc/docker/certs.d ${node}:/etc/docker
scp /etc/hosts ${node}:/etc/
scp /etc/security/limits.conf ${node}:/etc/security/limits.conf
scp /etc/sysctl.conf ${node}:/etc/sysctl.conf
ssh ${node} "reboot"
echo "${node} 重启成功"
done

#:优化参数
root@k8s-master1:~# vim /etc/sysctl.conf
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1
net.ipv4.tcp_tw_reuse = 0
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_tw_recycle = 0
root@k8s-master1:~# vim /etc/security/limits.conf
*                soft    core            unlimited
*                hard    core            unlimited
*                soft    nproc           1000000
*                hard    nproc           1000000
*                soft    nofile          1000000
*                hard    nofile          1000000
*                soft    memlock         32000
*                hard    memlock         32000
*                soft    msgqueue        8192000
*                hard    msgqueue        8192000

#:重启自己
root@k8s-master1:~# reboot
#:配置haproxy+keepalived

#:安装haproxy和keepalive的
root@k8s-ha1:~# apt install -y haproxy keepalived #:配置keepalive的 root@k8s-ha1:~# find / -name keepalived.conf*
root@k8s-ha1:~# cp /usr/share/doc/keepalived/samples/keepalived.conf.vrrp /etc/keepalived/keepalived.conf
root@k8s-ha1:~# vim /etc/keepalived/keepalived.conf
virtual_ipaddress {
192.168.5.248 dev eth0 label eth0:0
} #:配置haproxy
root@k8s-etcd3:~# vim /etc/haproxy/haproxy.cfg
listen k8s-api-6443
bind 192.168.5.248:6443
mode tcp
server 192.168.5.101 192.168.5.101:6443 check fall 3 rise 3 inter 3s
server 192.168.5.102 192.168.5.102:6443 check fall 3 rise 3 inter 3s #:重启服务
root@k8s-ha1:~# systemctl restart haproxy
root@k8s-ha1:~# systemctl restart keepalived #:设置开机启动
root@k8s-ha1:~# systemctl enable haproxy
root@k8s-ha1:~# systemctl enable keepalived #:另外一台也用同样的方法,然后测试
#:在master1上配置ansible

#:安装ansible
root@k8s-master1:/etc/ansible# apt install -y ansible #:将项目clone下来,我们用的0.6.1.地址:https://github.com/easzlab/kubeasz/tree/0.6.1 root@k8s-master1:/etc/ansible# cd /opt/
root@k8s-master1:/opt# git clone -b 0.6.1 https://github.com/easzlab/kubeasz.git #:将ansible默认安装的文件移走,然后将clone下来的所有文件移到ansible的配置中
root@k8s-master1:/opt# mv /etc/ansible/* /tmp #:注意此处如果没有别的东西,可删除
root@k8s-master1:/opt# cp -rf kubeasz/* /etc/ansible/ #:如果你的版本启动的时候,需要改变参数,可以到一下目录,修改
root@k8s-master1:/etc/ansible/roles/kube-master/templates# cd /etc/ansible/roles/kube-master/templates/
root@k8s-master1:/etc/ansible/roles/kube-master/templates# ls
aggregator-proxy-csr.json.j2 kube-apiserver.service.j2 kube-controller-manager.service.j2 kube-scheduler.service.j2
basic-auth.csv.j2 kube-apiserver-v1.8.service.j2 kubernetes-csr.json.j2 #:我们选择什么部署方式,是单节点还是多节点,我们是多节点
root@k8s-master1:/etc/ansible# cd /etc/ansible/
root@k8s-master1:/etc/ansible# ll example/
total 40
drwxr-xr-x 2 root root 4096 Oct 6 13:42 ./
drwxr-xr-x 10 root root 4096 Oct 6 13:42 ../
-rw-r--r-- 1 root root 2207 Oct 6 13:42 hosts.allinone.example
-rw-r--r-- 1 root root 2241 Oct 6 13:42 hosts.allinone.example.en
-rw-r--r-- 1 root root 2397 Oct 6 13:42 hosts.cloud.example
-rw-r--r-- 1 root root 2325 Oct 6 13:42 hosts.cloud.example.en
-rw-r--r-- 1 root root 2667 Oct 6 13:42 hosts.m-masters.example #;多节点部署 ,中文版
-rw-r--r-- 1 root root 2626 Oct 6 13:42 hosts.m-masters.example.en #;多节点部署,英文版
-rw-r--r-- 1 root root 2226 Oct 6 13:42 hosts.s-master.example
-rw-r--r-- 1 root root 2258 Oct 6 13:42 hosts.s-master.example.en #:因为我们部署的是多节点,所以讲多节点部署的文件拷贝到ansible下面
root@k8s-master1:/etc/ansible# cp example/hosts.m-masters.example ./hosts
#:ansible部署k8s

#;根据官方文档配置
https://github.com/easzlab/kubeasz/blob/0.6.1/docs/setup/00-planning_and_overall_intro.md #:升级一下apt源
root@k8s-master1:/etc/ansible# apt-get update #:安装python2.7
root@k8s-master1:/etc/ansible# apt-get install python2.7 #:做软链接
root@k8s-master2:~# ln -s /usr/bin/python2.7 /usr/bin/python #:在node和etcd节点也安装Python,并做软链接 #:下载二进制文件K8S解压到/etc/ansible/bin目录
root@k8s-master1:/usr/local/src# tar xf k8s.1-13-5.tar.gz
root@k8s-master1:/usr/local/src# ls
bin k8s.1-13-5.tar.gz
root@k8s-master1:/usr/local/src# mv bin/* /etc/ansible/bin/ #:测试一下,必须可以打出当前版本
root@k8s-master1:/etc/ansible/bin# ./kube-apiserver --version
Kubernetes v1.13.5 #:退出目录,根据情况修改hosts
root@k8s-master1:/etc/ansible/bin# cd ..
root@k8s-master1:/etc/ansible# vim hosts #:这个就是选的那个部署方式,改的名
[deploy]
192.168.5.101 NTP_ENABLED=no #:本机的IP # etcd集群请提供如下NODE_NAME,注意etcd集群必须是1,3,5,7...奇数个节点
[etcd]
192.168.5.104 NODE_NAME=etcd1
192.168.5.105 NODE_NAME=etcd2
192.168.5.106 NODE_NAME=etcd3 [new-etcd] # 预留组,后续添加etcd节点使用
#192.168.1.x NODE_NAME=etcdx [kube-master]
192.168.5.101 [new-master] # 预留组,后续添加master节点使用
192.168.5.102 #:这个是故意留出来的,后期测试添加节点 [kube-node]
192.168.5.108 [new-node] # 预留组,后续添加node节点使用
192.168.5.109 K8S_VER="v1.13" #:这个要注意版本号
MASTER_IP="192.168.5.248" #:这个是VIP地址
KUBE_APISERVER="https://{{ MASTER_IP }}:6443" #:注意这个是6443
CLUSTER_NETWORK="calico" #;我们用的calico网络
SERVICE_CIDR="10.20.0.0/16" #;service 的网段,注意不要和内网冲突
CLUSTER_CIDR="172.31.0.0/16" #:这个是分配给容器的网段
CLUSTER_KUBERNETES_SVC_IP="10.20.0.1" #:上面service定义的第一个网段
CLUSTER_DNS_SVC_IP="10.20.254.254" #:DNS的网段,我们用的service最后一个网段
CLUSTER_DNS_DOMAIN="linux37.local." #:DNS的域名
BASIC_AUTH_USER="admin"
BASIC_AUTH_PASS="123456" #:集群的密码
bin_dir="/usr/bin" #:注意这个一般放这个文件,不然执行时候还要修改

#:测试一下
root@k8s-master1:/etc/ansible# ansible all -m ping
#:根据官网分布安装
root@k8s-master1:/etc/ansible# ansible-playbook 01.prepare.yml #:执行02的时候,如果想换版本,就去下载高点的版本,然后解压
root@k8s-master1:/opt# tar xf etcd-v3.3.15-linux-amd64.tar.gz #:进到解压目录,测试一下
root@k8s-master1:/opt/etcd-v3.3.15-linux-amd64# ./etcd --version #:然后将可执行文件移到ansible
root@k8s-master1:/opt/etcd-v3.3.15-linux-amd64# mv etcd* /etc/ansible/bin/ #:开始部署02
root@k8s-master1:/etc/ansible# ansible-playbook 02.etcd.yml #:在任何一个etcd服务器执行一下命令,验证etcd服务(必须返回successfully)
root@k8s-etcd1:~# export NODE_IPS="192.168.5.104 192.168.5.105 192.168.5.106"
root@k8s-etcd1:~# for ip in ${NODE_IPS}; do ETCDCTL_API=3 /usr/bin/etcdctl --endpoints=https://${ip}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem endpoint health;done
https://192.168.5.104:2379 is healthy: successfully committed proposal: took = 10.453066ms
https://192.168.5.105:2379 is healthy: successfully committed proposal: took = 11.483075ms
https://192.168.5.106:2379 is healthy: successfully committed proposal: took = 11.542092ms #:因为docker我们已经装好了 ,所以03就不用做了 #:开始部署04
root@k8s-master1:/etc/ansible# ansible-playbook 04.kube-master.yml #:找一台主机测试VIP的6443通不通
root@k8s-harbor1:~# telnet 192.168.5.248 6443 #:现在就可以在master1上get node了,查看状态是不是ready
root@k8s-master1:/etc/ansible# kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.5.101 Ready,SchedulingDisabled master 2m9s v1.13.5 #:开始部署05(将node节点添加到master)
root@k8s-master1:/etc/ansible# ansible-playbook 05.kube-node.yml TASK [kube-node : 开启kubelet 服务] *****************************************************************************************************
fatal: [192.168.5.108]: FAILED! => {"changed": true, "cmd": "systemctl daemon-reload && systemctl restart kubelet", "delta": "0:00:00.249926", "end": "2019-10-06 15:40:48.272879", "msg": "non-zero return code", "rc": 5, "start": "2019-10-06 15:40:48.022953", "stderr": "Failed to restart kubelet.service: Unit docker.service not found.", "stderr_lines": ["Failed to restart kubelet.service: Unit docker.service not found."], "stdout": "", "stdout_lines": []} #:此时会报错,因为node节点没有安装docker(这次在node1和2都安装docker)
root@k8s-node1:~# bash docker_install.sh #:在此执行
root@k8s-master1:/etc/ansible# ansible-playbook 05.kube-node.yml #:查看
root@k8s-master1:/etc/ansible# kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.5.101 Ready,SchedulingDisabled master 18m v1.13.5
192.168.5.108 Ready node 17s v1.13.5 #:开始部署06 (网络组件)
#:我们需要准备镜像,准备哪些可以查看你安装那个版本的calico就去查看那个版本(我们这个是装的3.4的,具体看task/default/main.yml定义的版本)
root@k8s-master1:/etc/ansible# vim roles/calico/templates/calico-v3.4.yaml.j2 #:在这个里面搜索image,然后找到需要下载的镜像 #:找到后再GitHub上查找calico3.4最新版本的下载下来 #;然后kublet也需要一个镜像
root@k8s-master1:/etc/ansible# vim roles/calico/templates/calico-v3.4.yaml.j2
--pod-infra-container-image={{ SANDBOX_IMAGE }} \ #:他是用变量显示的,我们查找一下这个镜像在哪里
root@k8s-master1:/etc/ansible# grep pod-infra-container-image* ./* -R
root@k8s-master1:/etc/ansible# grep mirrorgooglecontainers* ./* -R
./roles/kube-node/defaults/main.yml:SANDBOX_IMAGE: "mirrorgooglecontainers/pause-amd64:3.1" #:这样我们就查到他在哪里了,打开文件
root@k8s-master1:/etc/ansible# vim ./roles/kube-node/defaults/main.yml
SANDBOX_IMAGE: "mirrorgooglecontainers/pause-amd64:3.1" #:然后我们找一台主机,将这个镜像下下来,然后修改tag号,传到harbor
root@k8s-node1:~# docker pull mirrorgooglecontainers/pause-amd64:3.1
root@k8s-node1:~# docker tag mirrorgooglecontainers/pause-amd64:3.1 harbor.magedu.net/linux37/pause-amd64:3.1
root@k8s-node1:~# docker push harbor.magedu.net/linux37/pause-amd64:3.1 #:在master主机改掉镜像地址
root@k8s-master1:/etc/ansible# vim ./roles/kube-node/defaults/main.yml
SANDBOX_IMAGE: "harbor.magedu.net/linux37/pause-amd64:3.1" #:然后重新执行一下
root@k8s-master1:/etc/ansible# ansible-playbook 05.kube-node.yml #:在node节点查看
root@k8s-node1:~# ps aux |grep kubelet
--pod-infra-container-image=harbor.magedu.net/linux37/pause-amd64:3.1 #:然后将master的也改掉
root@k8s-master1:/etc/ansible# vim /etc/systemd/system/kubelet.service
--pod-infra-container-image=harbor.magedu.net/linux37/pause-amd64:3.1 \
--max-pods=110 \ #:注意这个在生产环境一定要改大点,这个就是一个master起多少容器 #:然后重启
root@k8s-master1:/etc/ansible# systemctl daemon-reload
root@k8s-master1:/etc/ansible# systemctl restart kubelet #;查看
root@k8s-master1:/etc/ansible# kubectl get nodes #:然后还继续准备网络的镜像
#:将下载好的calico包传到服务器,并解压,解压后会出现三个镜像
root@k8s-master1:/opt# tar xf release-v3.4.4_\(1\).tgz
root@k8s-master1:/opt# cd release-v3.4.4/
root@k8s-master1:/opt/release-v3.4.4# cd images/ #:先将ini的镜像导进来,改tag,传到harbor
root@k8s-master1:/opt/release-v3.4.4/images# docker load -i calico-cni.tar
root@k8s-master1:/opt/release-v3.4.4/images# docker tag f5e5bae3eb87 harbor.magedu.net/linux37/calico-cni:v3.4.4
root@k8s-master1:/opt/release-v3.4.4/images# docker push harbor.magedu.net/linux37/calico-cni:v3.4.4
#:然后修改镜像地址
root@k8s-master1:/etc/ansible# vim roles/calico/templates/calico-v3.4.yaml.j2
- name: install-cni
image: harbor.magedu.net/linux37/calico-cni:v3.4.4 #:将node镜像导进来,改tag,传到harbor
root@k8s-master1:/opt/release-v3.4.4/images# docker load -i calico-node.tar
root@k8s-master1:/opt/release-v3.4.4/images# docker tag a8dbf15bbd6f harbor.magedu.net/linux37/calico-node:v3.4.4
root@k8s-master1:/opt/release-v3.4.4/images# docker push harbor.magedu.net/linux37/calico-node:v3.4.4
#:然后修改镜像地址
root@k8s-master1:/etc/ansible# vim roles/calico/templates/calico-v3.4.yaml.j2
- name: calico-node
image: harbor.magedu.net/linux37/calico-node:v3.4.4 #:将kubee镜像导进来,改tag,传到harbor
root@k8s-master1:/opt/release-v3.4.4/images# docker load -i calico-kube-controllers.tar
root@k8s-master1:/opt/release-v3.4.4/images# docker tag 0030ff291350 harbor.magedu.net/linux37/calico-kube-controllers:v3.4.4
root@k8s-master1:/opt/release-v3.4.4/images# docker push harbor.magedu.net/linux37/calico-kube-controllers:v3.4.4
#:然后修改镜像地址
root@k8s-master1:/etc/ansible# vim roles/calico/templates/calico-v3.4.yaml.j2
containers:
- name: calico-kube-controllers
image: harbor.magedu.net/linux37/calico-kube-controllers:v3.4.4 #:开始部署06
root@k8s-master1:/etc/ansible# ansible-playbook 06.network.yml #:查看
root@k8s-master1:/etc/ansible# calicoctl node status
Calico process is running. IPv4 BGP status
+---------------+-------------------+-------+----------+-------------+
| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO |
+---------------+-------------------+-------+----------+-------------+
| 192.168.5.108 | node-to-node mesh | up | 08:57:09 | Established |
+---------------+-------------------+-------+----------+-------------+

#:添加node和master

#:首先在配置文件中写好要添加的node

root@k8s-master1:/etc/ansible# vim hosts
[new-node] # 预留组,后续添加node节点使用
192.168.5.109 #:执行添加
root@k8s-master1:/etc/ansible# ansible-playbook 20.addnode.yml #:查看
root@k8s-master1:/etc/ansible# kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.5.101 Ready,SchedulingDisabled master 93m v1.13.5
192.168.5.108 Ready node 75m v1.13.5
192.168.5.109 Ready node 62s v1.13.5 #:因为它安装的docker不符合我们的版本,所以执行替换
root@k8s-master1:/etc/ansible# docker version
Client:
Version: 18.09.9
API version: 1.39
Go version: go1.11.13
Git commit: 039a7df9ba
Built: Wed Sep 4 16:57:28 2019
OS/Arch: linux/amd64
Experimental: false Server: Docker Engine - Community
Engine:
Version: 18.09.9
API version: 1.39 (minimum version 1.12)
Go version: go1.11.13
Git commit: 039a7df
Built: Wed Sep 4 16:19:38 2019
OS/Arch: linux/amd64
Experimental: false root@k8s-master1:/etc/ansible# cp /usr/bin/docker* /etc/ansible/bin/
root@k8s-master1:/etc/ansible# cp /usr/bin/containerd* /etc/ansible/bin/ #:在添加会出错,因为node已经添加过了,所以在配置文件删掉重新执行
root@k8s-master1:/etc/ansible# vim hosts
[new-node] # 预留组,后续添加node节点使用
192.168.5.109 #:再次执行
root@k8s-master1:/etc/ansible# ansible-playbook 20.addnode.yml #;检查
root@k8s-master1:/etc/ansible# kubectl get nodes #:在node节点查看
root@k8s-node2:~# calicoctl node status
Calico process is running. IPv4 BGP status
+---------------+-------------------+-------+----------+-------------+
| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO |
+---------------+-------------------+-------+----------+-------------+
| 192.168.5.101 | node-to-node mesh | up | 09:13:07 | Established |
| 192.168.5.108 | node-to-node mesh | up | 09:13:07 | Established |
+---------------+-------------------+-------+----------+-------------+
#:添加master
#:在配置文件写好要添加的master
root@k8s-master1:/etc/ansible# vim hosts
[new-master] # 预留组,后续添加master节点使用
192.168.5.102 #:注释lb选项
root@k8s-master1:/etc/ansible# vim 21.addmaster.yml
# reconfigure and restart the haproxy service
#- hosts: lb
# roles:
# - lb #;添加
root@k8s-master1:/etc/ansible# ansible-playbook 21.addmaster.yml #:检测
root@k8s-master1:/etc/ansible# kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.5.101 Ready,SchedulingDisabled master 113m v1.13.5
192.168.5.102 Ready,SchedulingDisabled master 5m58s v1.13.5
192.168.5.108 Ready node 95m v1.13.5
192.168.5.109 Ready node 20m v1.13.5 #:在node节点检测(必须要保证后面是establishd)
root@k8s-node1:~# calicoctl node status
Calico process is running. IPv4 BGP status
+---------------+-------------------+-------+----------+-------------+
| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO |
+---------------+-------------------+-------+----------+-------------+
| 192.168.5.101 | node-to-node mesh | up | 08:57:10 | Established |
| 192.168.5.109 | node-to-node mesh | up | 09:13:08 | Established |
| 192.168.5.102 | node-to-node mesh | up | 09:22:28 | Established |
+---------------+-------------------+-------+----------+-------------+ #:运行几个容器检测一下
root@k8s-master1:/etc/ansible# kubectl run net-test --image=alpine --replicas=4 sleep 36000
root@k8s-master1:/etc/ansible# kubectl get pod
NAME READY STATUS RESTARTS AGE
net-test-7d5ddd7497-9zmfs 1/1 Running 0 62s
net-test-7d5ddd7497-l2b28 1/1 Running 0 62s
net-test-7d5ddd7497-strk6 1/1 Running 0 62s
net-test-7d5ddd7497-vwsh7 1/1 Running 0 62s #:查看pod的地址
root@k8s-master1:/etc/ansible# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
net-test-7d5ddd7497-9zmfs 1/1 Running 0 112s 172.31.58.65 192.168.5.108 <none> <none>
net-test-7d5ddd7497-l2b28 1/1 Running 0 112s 172.31.58.66 192.168.5.108 <none> <none>
net-test-7d5ddd7497-strk6 1/1 Running 0 112s 172.31.13.129 192.168.5.109 <none> <none>
net-test-7d5ddd7497-vwsh7 1/1 Running 0 112s 172.31.13.130 192.168.5.109 <none> <none> #:进到容器测试一下
root@k8s-master1:/etc/ansible# kubectl exec -it net-test-7d5ddd7497-9zmfs sh
/ # ping 172.31.13.129
PING 172.31.13.129 (172.31.13.129): 56 data bytes
64 bytes from 172.31.13.129: seq=0 ttl=62 time=2.312 ms
^C
--- 172.31.13.129 ping statistics ---
1 packets transmitted, 1 packets received, 0% packet loss
round-trip min/avg/max = 2.312/2.312/2.312 ms
/ # ping 223.6.6.6
PING 223.6.6.6 (223.6.6.6): 56 data bytes
64 bytes from 223.6.6.6: seq=0 ttl=127 time=41.006 ms
^C
--- 223.6.6.6 ping statistics ---
1 packets transmitted, 1 packets received, 0% packet loss
round-trip min/avg/max = 41.006/41.006/41.006 ms

#:搭建DNS

#:我们将下载好的DNS镜像传到ansible专门放第三方软件的目录
root@k8s-master1:/etc/ansible/manifests# cd /etc/ansible/manifests/ #:创建一个DNS目录
root@k8s-master1:/etc/ansible/manifests# mkdir dns #; 以为后期可能会讲解两种dns,因此在创建一个目录,将文件放到此目录
root@k8s-master1:/etc/ansible/manifests# cd dns
root@k8s-master1:/etc/ansible/manifests/dns# mkdir kube-dns
root@k8s-master1:/etc/ansible/manifests/dns# cd kube-dns/
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# ll
total 136996
drwxr-xr-x 3 root root 4096 Oct 6 21:34 ./
drwxr-xr-x 3 root root 4096 Oct 6 21:33 ../
-rw-r--r-- 1 root root 3983872 Oct 6 21:34 busybox-online.tar.gz
-rw-r--r-- 1 root root 277 Oct 6 21:34 busybox.yaml
drwxr-xr-x 2 root root 4096 Oct 6 21:34 heapster/
-rw-r--r-- 1 root root 41687040 Oct 6 21:34 k8s-dns-dnsmasq-nanny-amd64_1.14.13.tar.gz
-rw-r--r-- 1 root root 51441152 Oct 6 21:34 k8s-dns-kube-dns-amd64_1.14.13.tar.gz
-rw-r--r-- 1 root root 43140608 Oct 6 21:34 k8s-dns-sidecar-amd64_1.14.13.tar.gz
-rw-r--r-- 1 root root 6305 Oct 6 21:34 kube-dns.yaml #:我们使用的是此目录里面的kube-dns.yaml这个文件,和镜像
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# vim kube-dns.yaml
clusterIP: 10.20.254.254 #:这个就是DNS的地址,必须要和hosts设置的DNS地址一样

#:然后我们将这个文件中的镜像改一下
#:先导入目录中的镜像,并传到harbor
#:先导入第一个,并传到harbor
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker load -i k8s-dns-kube-dns-amd64_1.14.13.tar.gz
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker tag 82f954458b31 harbor.magedu.net/linux37/k8s-dns-kube-dns-amd64:v1.14.13
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker push harbor.magedu.net/linux37/k8s-dns-kube-dns-amd64:v1.14.13
#;修改文件中的image地址
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# vim kube-dns.yaml
      containers:
      - name: kubedns
        image: harbor.magedu.net/linux37/k8s-dns-kube-dns-amd64:v1.14.13

#:导入第二个,并传到harbor
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker load -i k8s-dns-dnsmasq-nanny-amd64_1.14.13.tar.gz
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker tag 7b15476a7228 harbor.magedu.net/linux37/k8s-dns-dnsmasq-nanny-amd64:v1.14.13
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker push harbor.magedu.net/linux37/k8s-dns-dnsmasq-nanny-amd64:v1.14.13
#;修改文件中的image地址

root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# vim kube-dns.yaml
      - name: dnsmasq
        image: harbor.magedu.net/linux37/k8s-dns-dnsmasq-nanny-amd64:v1.14.13

#:导入第三个,并传到harbor
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker load -i k8s-dns-sidecar-amd64_1.14.13.tar.gz
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker tag 333fb0833870 harbor.magedu.net/linux37/k8s-dns-sidecar-amd64:v1.14.13
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker push harbor.magedu.net/linux37/k8s-dns-sidecar-amd64:v1.14.13

#;修改文件中的image地址
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# vim kube-dns.yaml
      - name: sidecar
        image: harbor.magedu.net/linux37/k8s-dns-sidecar-amd64:v1.14.13
          limits:
            memory: 256Mi #:将这个也要改一下,我们生产中可以设置4个G
        args:
        - --domain=linux37.local #:这个要改成和ansible的host文件中的域名相同
        - --server=/linux37.local/127.0.0.1#10053 #:这个要改成和ansible的host文件中的域名相同
        - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.linux37.local,5,SRV
        - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.linux37.local,5,SRV

#:创建dns服务
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# kubectl apply -f kube-dns.yaml

#:检测
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# kubectl get pod -n kube-system

#:利用busybox检测DNS,先导入镜像,传到harbor
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker load -i busybox-online.tar.gz
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker tag 747e1d7f6665 harbor.magedu.net/linux37/busybox:latest
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker push harbor.magedu.net/linux37/busybox:latest
#:修改镜像地址
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# vim busybox.yaml
spec:
  containers:
  - image: harbor.magedu.net/linux37/busybox:latest
#:创建镜像
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# kubectl apply -f busybox.yaml

#:查看
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# kubectl get pod

#:检测
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# kubectl get service --all-namespaces
NAMESPACE     NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
default       kubernetes   ClusterIP   10.20.0.1       <none>        443/TCP         6h51m
kube-system   kube-dns     ClusterIP   10.20.254.254   <none>        53/UDP,53/TCP   17m
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# kubectl exec busybox nslookup kube-dns.kube-system.svc.linux37.local
Server:    10.20.254.254
Address 1: 10.20.254.254 kube-dns.kube-system.svc.linux37.local

Name:      kube-dns.kube-system.svc.linux37.local
Address 1: 10.20.254.254 kube-dns.kube-system.svc.linux37.local

#:部署dashbord

#:下载dashbord,并解压
root@k8s-master1:~# cd /etc/ansible/manifests/dashboard/
#:创建一个同dashbord同版本的目录,将压缩包移到此目录
root@k8s-master1:/etc/ansible/manifests/dashboard# mkdir 1.10.1
root@k8s-master1:/etc/ansible/manifests/dashboard# mv kubernetes-dashboard-amd64-v1.10.1.tar.gz 1.10.1/
root@k8s-master1:/etc/ansible/manifests/dashboard# cd 1.10.1/
root@k8s-master1:/etc/ansible/manifests/dashboard/1.10.1# tar xf kubernetes-dashboard-amd64-v1.10.1.tar.gz

k8s之ansible安装的更多相关文章

  1. [k8s]kubespray(ansible)自动化安装k8s集群

    kubespray(ansible)自动化安装k8s集群 https://github.com/kubernetes-incubator/kubespray https://kubernetes.io ...

  2. ansible安装二进制kubernetes-1.14.1

    主机信息: 主机IP 主机名 角色 10.10.3.181 k8s-m1  kube-apiserver,kube-controller-manager,kube-scheduler,etcd 10. ...

  3. 初探ansible安装

    一.ansible介绍常用的自动化运维工具 Puppet —基于 Ruby 开发,采用 C/S 架构,扩展性强,基于 SSL,远程命令执行相对较弱SaltStack —基于 Python 开发,采用 ...

  4. Ansible安装配置Nginx

    一.思路 现在一台机器上编译安装好nginx.打包,然后在用ansible去下发 cd /etc/ansible 进入ansible配置文件目录 mkdir roles/{common,install ...

  5. Ansible安装配置及使用

    一.Ansible特点 1.不需要安装客户端,通过sshd通信 2.基于模块工作,模块可以由任何序言开发 3.不仅支持命令行使用模块,也支持编写yaml格式的playbook 4.支持sudo 5.有 ...

  6. Ansible安装配置

    Ansible工具的安装与配置 Ansible基于SSH,不需要在远程端安装任何软件,只需要在管理端安装ansible及其组件即可. Ansible使用前提是已配置ssh密钥免登陆. 一.安装组件: ...

  7. Ansible安装部署以及常用模块详解

    一.  Ansible 介绍Ansible是一个配置管理系统configuration management system, python 语言是运维人员必须会的语言, ansible 是一个基于py ...

  8. 内网环境使用ansible安装software 需要外网时,如何绑定代理呢

    内网环境使用ansible安装software 需要外网时,如何绑定代理呢? 方法一: 在ansible 的脚本里,yum install 的地方,添加语句: environment: https_p ...

  9. Ansible 安装与配置(一)

    公司大概有200多云主机需要进行管理,但是如果通过手工管理费时还累最终结果也容易出错,所以考虑通过自动化的方式来管理云主机,目前开源的自动化工具,大家用的比较多的有Ansible和Saltstack这 ...

随机推荐

  1. 直播预告|App 首页如何动态化更新?来看蚂蚁技术专家详解「支付宝」全新卡片技术栈

    立即前往直播间预约观看 从icon到card,一场内容前置化的变革 从 Windows 时代开始,应用程序图标就成为了用户(流量)的主入口,一直持续到移动端时代. 图标即入口的方式,虽然足够方便但却不 ...

  2. MySQL新建用户并赋予权限:解决命令行与Navicat展示数据库不一致问题

    1.创建新用户 'xiaoxiao'密码'123456' mysql> CREATE USER 'xiaoxiao'@'localhost' IDENTIFIED BY '123456'; 2. ...

  3. Qt 信号与槽的自动关联机制

    前言 对于一些简单的事件判别,如点击按钮.无需写代码关联信号和槽函数. connect(ui->Btnshowhello,SIGNAL(clicked(bool)),this,SLOT(Btns ...

  4. Vue组件传值prop验证方式

    在Vue组件开发过程中,父组件要经常给子组件传递数据,在传递数据的过程中,子组件可以使用prop来接收父组件传递的值,同时呢,我们可以为组件的 prop 指定验证要求,例如你知道的这些类型.如果有一个 ...

  5. pyhon-高并发测试

    使用gevent.对注册.接口进行200的并发量进行测试. #!/usr/bin/env python # -*- coding:utf-8 -*- #设置路径:Defualt Settings--- ...

  6. layui 如果调用 from 内嵌入的 iframe子页面方法

    (人笨,占时想法的办法,不要骂,不要骂,怕了怕了,想到别的会来改的) 父页面; <%@ page language="java" contentType="text ...

  7. C#-WPF数据绑定基础(一)

    前言:WPF数据绑定技术有效的提高了程序的容错率,可以最大程度的保持程序的健壮性,从而降低程序在使用过程中崩掉的可能性. 接下来,我将分享一下我在写测量程序过程中所用到的数据绑定方面的知识 首先,我所 ...

  8. Apache ShardingSphere 5.0.0 内核优化及升级指南

    经过近两年时间的优化和打磨,Apache ShardingSphere 5.0.0 GA 版终于在本月正式发布,相比于 4.1.1 GA 版,5.0.0 GA 版在内核层面进行了大量的优化.首先,基于 ...

  9. javascript-初级-day05js函数传参

    JS基础-01 函数传参.参数类型-1 <!DOCTYPE HTML> <html> <head> <meta http-equiv="Conten ...

  10. PHP对称加密-AES加密、DES加密

    对称加密 对称加密算法是指,数据发信方将明文(原始数据)和密钥一起经过加密处理后,使其变成复杂的加密密文发送出去.收信方收到密文后,若要解读原文,则需要使用加密密钥及相关算法的逆算法对密文进行解密,使 ...