k8s集群———单master节点2node节点
#部署node节点
,将kubelet-bootstrap用户绑定到系统集群角色中(颁发证书的最小权限)
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap ,master节点执行
root@k8s-master: ~/k8s ::
$ cat kubeconfig.sh
# 创建 TLS Bootstrapping Token
#BOOTSTRAP_TOKEN=$(head -c /dev/urandom | od -An -t x | tr -d ' ')
BOOTSTRAP_TOKEN=0fb61c46f8991b718eb38d27b605b008 #cat > token.csv <<EOF
#${BOOTSTRAP_TOKEN},kubelet-bootstrap,,"system:kubelet-bootstrap"
#EOF #----------------------
#api-server节点ip
APISERVER=$
#证书所在目录
SSL_DIR=$
####kubeconfig文件存放的访问apiserver的认证信息, # 创建kubelet bootstrapping kubeconfig
export KUBE_APISERVER="https://$APISERVER:6443" # 设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=$SSL_DIR/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=bootstrap.kubeconfig # 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
--token=${BOOTSTRAP_TOKEN} \
--kubeconfig=bootstrap.kubeconfig # 设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig # 设置默认上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig #---------------------- # 创建kube-proxy kubeconfig文件 kubectl config set-cluster kubernetes \
--certificate-authority=$SSL_DIR/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-proxy.kubeconfig kubectl config set-credentials kube-proxy \
--client-certificate=$SSL_DIR/kube-proxy.pem \
--client-key=$SSL_DIR/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
root@k8s-master: ~/k8s ::
$ ###执行生成两个文件bootstrap.kubeconfig,kube-proxy.kubeconfig
root@k8s-master: ~/k8s ::
$ bash kubeconfig.sh 192.168.1.63 /root/k8s/k8s-cert/
root@k8s-master: ~/k8s/k8s-cert ::
$ ls
admin.csr admin.pem ca.csr ca.pem kube-proxy-csr.json kube-proxy.pem server-key.pem
admin-csr.json bootstrap.kubeconfig ca-csr.json k8s-cert.sh kube-proxy-key.pem server.csr server.pem
admin-key.pem ca-config.json ca-key.pem kube-proxy.csr kube-proxy.kubeconfig server-csr.json
root@k8s-master: ~/k8s/k8s-cert ::
$ clear
root@k8s-master: ~/k8s/k8s-cert ::
$
#不要复制粘贴。。。
root@k8s-master: ~/k8s/k8s-cert ::
$ cat bootstrap.kubeconfig
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR2akNDQXFhZ0F3SUJBZ0lVWDJManhCcWlvUzlQczBGc0U4SlJGSnBQcHZBd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1pURUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeEREQUtCZ05WQkFvVEEyczRjekVQTUEwR0ExVUVDeE1HVTNsemRHVnRNUk13RVFZRFZRUURFd3ByCmRXSmxjbTVsZEdWek1CNFhEVEU1TURNeE9ERTFNekl3TUZvWERUSTBNRE14TmpFMU16SXdNRm93WlRFTE1Ba0cKQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbGFXcHBibWN4RERBSwpCZ05WQkFvVEEyczRjekVQTUEwR0ExVUVDeE1HVTNsemRHVnRNUk13RVFZRFZRUURFd3ByZFdKbGNtNWxkR1Z6Ck1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBc3NnUTZZNTFnaDYwRFFtQXBhY2QKbllRU2NLZFRHMWRaNU0zRGxPMVNVamk2aUExVHpSWlEwZ0VBWE0yWDRLSHgvcGNEN3hJZTJrQXB0Vi92aWo4WAo4Q1RCNEkzeGVQVFJHQVljWmFGNnBWaHNETExqZjBPd1ZYTUtvZi9uNi9tV1JVQXhsd3AwM2d4aExtMGp0L0hlCjB1UDlYYXhnV3EyNFI2emF6aFloM3VNaUZTdWYzWHdCV3VrTXV5YlAwSzJHczJrOU5ERFN6dUJMVU1ZUUVLWUsKcmFDVjhUM1psdk5DbzJWbDQxcFF4Wm80RDBLRUZWQWUwOXNuZkRwaUFCeUpGZlArQWs5M2xVOHdBcUVUeEpXNApYWksxQUNMeTN4WmlDYldCc0dBYWhlK1AxUmNJNjZJTldwZHV2TGFWOWVhcTRxOCtvbHBJbkQ0Z3gyQ1BVWjNKCmd3SURBUUFCbzJZd1pEQU9CZ05WSFE4QkFmOEVCQU1DQVFZd0VnWURWUjBUQVFIL0JBZ3dCZ0VCL3dJQkFqQWQKQmdOVkhRNEVGZ1FVaEpIL1NmdTBzMk0xdjNhalBlVlZkSjFGVXA4d0h3WURWUjBqQkJnd0ZvQVVoSkgvU2Z1MApzMk0xdjNhalBlVlZkSjFGVXA4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFCUTh1b05naC9IWnMyOXJqbTlxCjRtSER0M0puVnpNbWwzUEo0ZUphTnhlKytxUWRxQy9mVjI2Sk05NC9tbjQ3RDdNSWdFMTFlbFI1WjlCc2lWQlYKeTZha0Vnck9FKzduemtQUHhraThjQ0hSOFlrSzFLME52T3A3K3ZoYmYvWnlZcDRNUFhkZ2VEanY5VUIyQ3BTYwpEMFJvVkNsYUtEZjE0bjQ5eXJHU3NiVWVxdjBUYWJQS0VHUlZvN0t2dDMydTYvVWNOZGxhcHUvSW1USEs5clJpClFNaHRHbkpmMDZiaXlkVjkxYzJvTEdzRXNsbG9PVUhpUklYNUJySDlTS2xhd0JkZXg3RTJrUHdlRUxXTGZKbjQKTi9HN0VUc0xZalZLcXdxYWhNRHphcUdKdC9ML1VFakxCNk95aTRvd1ZldEhqcFIwQ0h3dDlhazNJMjg5QUpwZgpZNmM9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
server: https://192.168.1.63:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubelet-bootstrap
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kubelet-bootstrap
user:
token: 0fb61c46f8991b718eb38d27b605b008
###这里一定要有token ########
,将生成的认证文件复制到两个node节点
scp bootstrap.kubeconfig kube-proxy.kubeconfig root@192.168.1.65:/opt/kubernetes/cfg/
scp bootstrap.kubeconfig kube-proxy.kubeconfig root@192.168.1.66:/opt/kubernetes/cfg/
scp kubelet kube-proxy root@192.168.1.65:/opt/kubernetes/bin/
scp kubelet kube-proxy root@192.168.1.66:/opt/kubernetes/bin/ ,node执行脚本
root@k8s-node01: ~ ::
$ cat kubelet.sh
#!/bin/bash
#node节点ip地址
NODE_ADDRESS=$
#部署dns用的ip地址
DNS_SERVER_IP=${:-"10.0.0.2"} cat <<EOF >/opt/kubernetes/cfg/kubelet KUBELET_OPTS="--logtostderr=true \\
--v= \\
--hostname-override=${NODE_ADDRESS} \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--config=/opt/kubernetes/cfg/kubelet.config \\
--cert-dir=/opt/kubernetes/ssl \\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0" EOF cat <<EOF >/opt/kubernetes/cfg/kubelet.config kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: ${NODE_ADDRESS}
port:
readOnlyPort:
cgroupDriver: cgroupfs
clusterDNS:
- ${DNS_SERVER_IP}
clusterDomain: cluster.local.
failSwapOn: false
authentication:
anonymous:
enabled: true
EOF cat <<EOF >/usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service [Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process [Install]
WantedBy=multi-user.target
EOF systemctl daemon-reload
systemctl enable kubelet
systemctl restart kubelet
root@k8s-node01: ~ ::
执行脚本
$ bash kubelet.sh 192.168.1.65 #proxy脚本
root@k8s-node01: ~ ::
$ cat proxy.sh
#!/bin/bash NODE_ADDRESS=$ cat <<EOF >/opt/kubernetes/cfg/kube-proxy KUBE_PROXY_OPTS="--logtostderr=true \\
--v= \\
--hostname-override=${NODE_ADDRESS} \\
--cluster-cidr=10.0.0.0/ \\
--proxy-mode=ipvs \\
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig" EOF cat <<EOF >/usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target [Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure [Install]
WantedBy=multi-user.target
EOF systemctl daemon-reload
systemctl enable kube-proxy
systemctl restart kube-proxy
root@k8s-node01: ~ ::
$
执行脚本
bash proxy.sh 192.168.1.65 ,master节点执行,
(查看node节点发给master的请求)
$ kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-M6k2DlnOW4FIWGF7v4V97AyrmPBKSsIpzNj_BtKHZGE 3h53m kubelet-bootstrap Pending
node-csr-RyWUEYiuwDYFcu7fegbHl-XmUpc3diJtdHowU9LUJyU 3h39m kubelet-bootstrap Pending
root@k8s-master: ~ ::
$
(同意node节点加入请求,命令,后边加上节点name)
kubectl certificate approve node-csr-CB7wV3ITot1QnhMPl2psUT-aAu2mEsXeW-8a9VelNfg (在master查看加入集群节点)
root@k8s-master: ~ ::
$ kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.1.65 Ready <none> 3h23m v1.13.4
192.168.1.66 Ready <none> 3h11m v1.13.4
root@k8s-master: ~ ::
$ =============
node2节点操作
root@k8s-node01: ~ ::
$ scp -r /opt/kubernetes/ root@192.168.1.66:/opt/ $ cat kubelet KUBELET_OPTS="--logtostderr=true \
--v= \
--hostname-override=192.168.1.66 \ #######改成当前nodeip地址
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
--config=/opt/kubernetes/cfg/kubelet.config \
--cert-dir=/opt/kubernetes/ssl \
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0" root@k8s-node01: /opt/kubernetes/cfg ::
$ root@k8s-node01: /opt/kubernetes/cfg ::
$ cat kubelet.config kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 192.168.1.66 #######改成当前nodeip地址
port:
readOnlyPort:
cgroupDriver: cgroupfs
clusterDNS:
- 10.0.0.2
clusterDomain: cluster.local.
failSwapOn: false
authentication:
anonymous:
enabled: true
root@k8s-node01: /opt/kubernetes/cfg ::
$ root@k8s-node01: /opt/kubernetes/cfg ::
$ cat kube-proxy KUBE_PROXY_OPTS="--logtostderr=true \
--v= \
--hostname-override=192.168.1.65 \ #######改成当前nodeip地址
--cluster-cidr=10.0.0.0/ \
--proxy-mode=ipvs \
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig" root@k8s-node01: /opt/kubernetes/cfg ::
$ #####
需要删除给192.168.1.65的ssl文件全部删掉,因为要生成66ip的ssl问件
rm /opt/kubernetes/ssl/* systemctl start kubelet
systemctl start kube-proxy 根node1一样在master执行以下,将node2认证请求同意,并加入集群
3,master节点执行,
(查看node节点发给master的请求)
$ kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-M6k2DlnOW4FIWGF7v4V97AyrmPBKSsIpzNj_BtKHZGE 3h53m kubelet-bootstrap Pending
node-csr-RyWUEYiuwDYFcu7fegbHl-XmUpc3diJtdHowU9LUJyU 3h39m kubelet-bootstrap Pending
root@k8s-master: ~ 20:57:28
$
(同意node节点加入请求,命令,后边加上节点name)
kubectl certificate approve node-csr-CB7wV3ITot1QnhMPl2psUT-aAu2mEsXeW-8a9VelNfg (在master查看加入集群节点)
root@k8s-master: ~ 20:59:30
$ kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.1.65 Ready <none> 3h23m v1.13.4
192.168.1.66 Ready <none> 3h11m v1.13.4
root@k8s-master: ~ 20:59:37
$ ###########
查看创建pod
root@k8s-master: ~ 21:14:53
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
java-84767655bc-24mr6 0/1 Completed 3 3m21s
nginx-7cdbd8cdc9-56xwp 1/1 Running 0 3h21m
nginx-7cdbd8cdc9-m94rk 1/1 Running 0 3h21m
nginx-7cdbd8cdc9-qd72h 1/1 Running 0 3h22m
root@k8s-master: ~ 21:14:55
$ 查看集群节点
root@k8s-master: ~ 21:15:44
$ kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.1.65 Ready <none> 3h40m v1.13.4
192.168.1.66 Ready <none> 3h28m v1.13.4
root@k8s-master: ~ 21:15:50
$ 查看服务运行在哪个节点
root@k8s-master: ~ 21:16:22
$ kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
java-84767655bc-24mr6 0/1 CrashLoopBackOff 4 4m55s 172.17.88.3 192.168.1.66 <none> <none>
nginx-7cdbd8cdc9-56xwp 1/1 Running 0 3h22m 172.17.88.2 192.168.1.66 <none> <none>
nginx-7cdbd8cdc9-m94rk 1/1 Running 0 3h22m 172.17.75.3 192.168.1.65 <none> <none>
nginx-7cdbd8cdc9-qd72h 1/1 Running 0 3h23m 172.17.75.2 192.168.1.65 <none> <none>
root@k8s-master: ~ 21:16:29
$ #############运行一个测试实例
创建一个测试示例
kubectl create deployment nginx --images=nginx 添加三个副本
kubectl scale deployment nginx --replicas=3 启动副本添加监听,访问端口随机生成
kubectl expose deployment nginx --port=88 --target-port=80 --type=NodePort 授权查看pod日志
kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous
k8s集群———单master节点2node节点的更多相关文章
- 记录一个奇葩的问题:k8s集群中master节点上部署一个单节点的nacos,导致master节点状态不在线
情况详细描述; k8s集群,一台master,两台worker 在master节点上部署一个单节点的nacos,导致master节点状态不在线(不论是否修改nacos的默认端口号都会导致master节 ...
- k8s集群之master节点部署
apiserver的部署 api-server的部署脚本 [root@mast-1 k8s]# cat apiserver.sh #!/bin/bash MASTER_ADDRESS=$1 主节点IP ...
- k8s集群添加新得node节点
服务端操作: 方法一: 获取master的join token kubeadm token create --print-join-command 重新加入节点 kubeadm join 192.16 ...
- K8S集群etcd备份与恢复
参考链接: K8S集群多master:Etcd v3备份与恢复 K8S集群单master:Kubernetes Etcd 数据备份与恢复 ETCD系列之一:简介:https://developer.a ...
- 【K8S】基于单Master节点安装K8S集群
写在前面 最近在研究K8S,今天就输出部分研究成果吧,后续也会持续更新. 集群规划 IP 主机名 节点 操作系统版本 192.168.175.101 binghe101 Master CentOS 8 ...
- hyper-v虚拟机上的centos多节点k8s集群实践
之前体验了minikube,掉深坑里至今还没有爬出来,玩单节点用minikube够了, 但傻瓜试的安装让人对k8s理解不是很深刻(坑),而且多节点好像有什么奇怪的问题 所以我这次要用两个虚拟机来模拟k ...
- 使用Rancher Server部署本地多节点K8S集群
当我第一次开始我的Kubernetes之旅时,我一直在寻找一种设置本地部署环境的方式.很多人常常会使用minikube或microk8s,这两者非常适合新手在单节点集群环境下进行操作.但当我已经了解了 ...
- k8s集群节点更换ip 或者 k8s集群添加新节点
1.需求情景:机房网络调整,突然要回收我k8s集群上一台node节点机器的ip,并调予新的ip到这台机器上,所以有了k8s集群节点更换ip一说:同时,k8s集群节点更换ip也相当于k8s集群添加新节点 ...
- 使用kind快速搭建本地k8s集群
Kind是什么? k8s集群的组成比较复杂,如果纯手工部署的话易出错且时间成本高.而本文介绍的Kind工具,能够快速的建立起可用的k8s集群,降低初学者的学习门槛. Kind是Kubernetes I ...
随机推荐
- 自定义View系列教程02--onMeasure源码详尽分析
深入探讨Android异步精髓Handler 站在源码的肩膀上全解Scroller工作机制 Android多分辨率适配框架(1)- 核心基础 Android多分辨率适配框架(2)- 原理剖析 Andr ...
- 洛谷P1910 L国的战斗之间谍
//二维费用01背包 #include<bits/stdc++.h> using namespace std; ; ; ; int v1[maxn],v2[maxn],w[maxn],n, ...
- 云原生生态周报 Vol. 5 | etcd性能知多少
业界要闻 1 Azure Red Hat OpenShift已经GA.在刚刚结束的Red Hat Summit 2019上,Azure Red Hat OpenShift正式宣布GA,这是一个微软和红 ...
- mongodb Helper
/// <summary> /// mongoDBHelper访问助手 /// </summary> public class mongoDBHelper { /// < ...
- LRJ-Example-06-17-Uva10562
main() 函数中的这两行 fgets(buf[0], maxn, stdin); sscanf(buf[0], "%d", &T); 不能简单替换为 scanf(&qu ...
- H3C 代理ARP
- 利用 jquery 获取某个元素下的所有图片并改变其属性
HTML代码 <div id="mochu"> <p>内容....<./p> <p><img src="xxxx.p ...
- C#循环语句练习(二)
1.求1!+2!+...+n! 所有阶乘的和 2.求100以内的质数 (1)第一种做法 (2)另一种做法 3. 总数=幼兔+小兔+成兔成兔=上个月的小兔+上个月的成兔小兔=上个月的幼兔幼兔=这个月的成 ...
- Python--day48--今日内容
- python 字符串方法isdigit()
python isdigit() 方法检测字符串是否只有数字组成. 语法: isdigit()方法语法: str.isdigit() 参数:无 返回值: 如果字符串中只含有数字则返回True,否则返回 ...