k8s二进制部署 - master节点安装
下载kubernetes服务端
[root@hdss7-21 ~]# cd /opt/src
[root@hdss7-21 src]# wget https://dl.k8s.io/v1.15.2/kubernetes-server-linux-amd64.tar.gz [root@hdss7-21 src]# tar -xf kubernetes-server-linux-amd64.tar.gz
[root@hdss7-21 src]# mv kubernetes /opt/release/kubernetes-v1.15.2
[root@hdss7-21 src]# ln -s /opt/release/kubernetes-v1.15.2 /opt/apps/kubernetes
[root@hdss7-21 src]# ll /opt/apps/kubernetes
lrwxrwxrwx 1 root root 31 Jan 6 12:59 /opt/apps/kubernetes -> /opt/release/kubernetes-v1.15.2 [root@hdss7-21 src]# cd /opt/apps/kubernetes
[root@hdss7-21 kubernetes]# rm -f kubernetes-src.tar.gz
[root@hdss7-21 kubernetes]# cd server/bin/
[root@hdss7-21 bin]# rm -f *.tar *_tag # *.tar *_tag 镜像文件
[root@hdss7-21 bin]# ll
total 884636
-rwxr-xr-x 1 root root 43534816 Aug 5 18:01 apiextensions-apiserver
-rwxr-xr-x 1 root root 100548640 Aug 5 18:01 cloud-controller-manager
-rwxr-xr-x 1 root root 200648416 Aug 5 18:01 hyperkube
-rwxr-xr-x 1 root root 40182208 Aug 5 18:01 kubeadm
-rwxr-xr-x 1 root root 164501920 Aug 5 18:01 kube-apiserver
-rwxr-xr-x 1 root root 116397088 Aug 5 18:01 kube-controller-manager
-rwxr-xr-x 1 root root 42985504 Aug 5 18:01 kubectl
-rwxr-xr-x 1 root root 119616640 Aug 5 18:01 kubelet
-rwxr-xr-x 1 root root 36987488 Aug 5 18:01 kube-proxy
-rwxr-xr-x 1 root root 38786144 Aug 5 18:01 kube-scheduler
-rwxr-xr-x 1 root root 1648224 Aug 5 18:01 mounter
证书下发
[root@hdss7-200 certs]# for i in 21 22;do echo hdss7-$i;ssh hdss7-$i "mkdir /opt/apps/kubernetes/server/bin/certs";scp apiserver-key.pem apiserver.pem ca-key.pem ca.pem client-key.pem client.pem hdss7-$i:/opt/apps/kubernetes/server/bin/certs/;done
配置apiserver日志审计
[root@hdss7-21 bin]# mkdir /opt/apps/kubernetes/conf
[root@hdss7-21 bin]# vim /opt/apps/kubernetes/conf/audit.yaml # 打开文件后,设置 :set paste,避免自动缩进
apiVersion: audit.k8s.io/v1beta1 # This is required.
kind: Policy
# Don't generate audit events for all requests in RequestReceived stage.
omitStages:
- "RequestReceived"
rules:
# Log pod changes at RequestResponse level
- level: RequestResponse
resources:
- group: ""
# Resource "pods" doesn't match requests to any subresource of pods,
# which is consistent with the RBAC policy.
resources: ["pods"]
# Log "pods/log", "pods/status" at Metadata level
- level: Metadata
resources:
- group: ""
resources: ["pods/log", "pods/status"] # Don't log requests to a configmap called "controller-leader"
- level: None
resources:
- group: ""
resources: ["configmaps"]
resourceNames: ["controller-leader"] # Don't log watch requests by the "system:kube-proxy" on endpoints or services
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core API group
resources: ["endpoints", "services"] # Don't log authenticated requests to certain non-resource URL paths.
- level: None
userGroups: ["system:authenticated"]
nonResourceURLs:
- "/api*" # Wildcard matching.
- "/version" # Log the request body of configmap changes in kube-system.
- level: Request
resources:
- group: "" # core API group
resources: ["configmaps"]
# This rule only applies to resources in the "kube-system" namespace.
# The empty string "" can be used to select non-namespaced resources.
namespaces: ["kube-system"] # Log configmap and secret changes in all other namespaces at the Metadata level.
- level: Metadata
resources:
- group: "" # core API group
resources: ["secrets", "configmaps"] # Log all other resources in core and extensions at the Request level.
- level: Request
resources:
- group: "" # core API group
- group: "extensions" # Version of group should NOT be included. # A catch-all rule to log all other requests at the Metadata level.
- level: Metadata
# Long-running requests like watches that fall under this rule will not
# generate an audit event in RequestReceived.
omitStages:
- "RequestReceived"
创建启动脚本
[root@hdss7-21 bin]# vim /opt/apps/kubernetes/server/bin/kube-apiserver-startup.sh
#!/bin/bash WORK_DIR=$(dirname $(readlink -f $0))
[ $? -eq 0 ] && cd $WORK_DIR || exit /opt/apps/kubernetes/server/bin/kube-apiserver \
--apiserver-count 2 \
--audit-log-path /data/logs/kubernetes/kube-apiserver/audit-log \
--audit-policy-file ../../conf/audit.yaml \
--authorization-mode RBAC \
--client-ca-file ./certs/ca.pem \
--requestheader-client-ca-file ./certs/ca.pem \
--enable-admission-plugins NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \
--etcd-cafile ./certs/ca.pem \
--etcd-certfile ./certs/client.pem \
--etcd-keyfile ./certs/client-key.pem \
--etcd-servers https://10.4.7.12:2379,https://10.4.7.21:2379,https://10.4.7.22:2379 \
--service-account-key-file ./certs/ca-key.pem \
--service-cluster-ip-range 192.168.0.0/16 \
--service-node-port-range 3000-29999 \
--target-ram-mb=1024 \
--kubelet-client-certificate ./certs/client.pem \
--kubelet-client-key ./certs/client-key.pem \
--log-dir /data/logs/kubernetes/kube-apiserver \
--tls-cert-file ./certs/apiserver.pem \
--tls-private-key-file ./certs/apiserver-key.pem \
--v 2
配置supervisor启动配置
[root@hdss7-21 bin]# vim /etc/supervisord.d/kube-apiserver.ini
[program:kube-apiserver-7-21]
command=/opt/apps/kubernetes/server/bin/kube-apiserver-startup.sh
numprocs=1
directory=/opt/apps/kubernetes/server/bin
autostart=true
autorestart=true
startsecs=30
startretries=3
exitcodes=0,2
stopsignal=QUIT
stopwaitsecs=10
user=root
redirect_stderr=true
stdout_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stdout.log
stdout_logfile_maxbytes=64MB
stdout_logfile_backups=5
stdout_capture_maxbytes=1MB
stdout_events_enabled=false
[root@hdss7-21 bin]# supervisorctl update
[root@hdss7-21 bin]# supervisorctl status
etcd-server-7-21 RUNNING pid 23637, uptime 22:26:08
kube-apiserver-7-21 RUNNING pid 32591, uptime 0:05:37
启停apiserver
[root@hdss7-12 ~]# supervisorctl start kube-apiserver-7-21
[root@hdss7-12 ~]# supervisorctl stop kube-apiserver-7-21
[root@hdss7-12 ~]# supervisorctl restart kube-apiserver-7-21
[root@hdss7-12 ~]# supervisorctl status kube-apiserver-7-21
查看进程
[root@hdss7-21 bin]# netstat -lntp|grep api
tcp 0 0 127.0.0.1:8080 0.0.0.0:* LISTEN 32595/kube-apiserve
tcp6 0 0 :::6443 :::* LISTEN 32595/kube-apiserve
[root@hdss7-21 bin]# ps uax|grep kube-apiserver|grep -v grep
root 32591 0.0 0.0 115296 1476 ? S 20:17 0:00 /bin/bash /opt/apps/kubernetes/server/bin/kube-apiserver-startup.sh
root 32595 3.0 2.3 402720 184892 ? Sl 20:17 0:16 /opt/apps/kubernetes/server/bin/kube-apiserver --apiserver-count 2 --audit-log-path /data/logs/kubernetes/kube-apiserver/audit-log --audit-policy-file ../../conf/audit.yaml --authorization-mode RBAC --client-ca-file ./certs/ca.pem --requestheader-client-ca-file ./certs/ca.pem --enable-admission-plugins NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota --etcd-cafile ./certs/ca.pem --etcd-certfile ./certs/client.pem --etcd-keyfile ./certs/client-key.pem --etcd-servers https://10.4.7.12:2379,https://10.4.7.21:2379,https://10.4.7.22:2379 --service-account-key-file ./certs/ca-key.pem --service-cluster-ip-range 192.168.0.0/16 --service-node-port-range 3000-29999 --target-ram-mb=1024 --kubelet-client-certificate ./certs/client.pem --kubelet-client-key ./certs/client-key.pem --log-dir /data/logs/kubernetes/kube-apiserver --tls-cert-file ./certs/apiserver.pem --tls-private-key-file ./certs/apiserver-key.pem --v 2
配置apiserver L4代理
[root@hdss7-11 ~]# yum install -y nginx
[root@hdss7-11 ~]# vim /etc/nginx/nginx.conf
# 末尾加上以下内容,stream 只能加在 main 中
# 此处只是简单配置下nginx,实际生产中,建议进行更合理的配置
stream {
log_format proxy '$time_local|$remote_addr|$upstream_addr|$protocol|$status|'
'$session_time|$upstream_connect_time|$bytes_sent|$bytes_received|'
'$upstream_bytes_sent|$upstream_bytes_received' ; upstream kube-apiserver {
server 10.4.7.21:6443 max_fails=3 fail_timeout=30s;
server 10.4.7.22:6443 max_fails=3 fail_timeout=30s;
}
server {
listen 7443;
proxy_connect_timeout 2s;
proxy_timeout 900s;
proxy_pass kube-apiserver;
access_log /var/log/nginx/proxy.log proxy;
}
}
[root@hdss7-11 ~]# systemctl start nginx; systemctl enable nginx
[root@hdss7-11 ~]# curl 127.0.0.1:7443 # 测试几次
Client sent an HTTP request to an HTTPS server.
[root@hdss7-11 ~]# cat /var/log/nginx/proxy.log
06/Jan/2020:21:00:27 +0800|127.0.0.1|10.4.7.21:6443|TCP|200|0.001|0.000|76|78|78|76
06/Jan/2020:21:05:03 +0800|127.0.0.1|10.4.7.22:6443|TCP|200|0.020|0.019|76|78|78|76
06/Jan/2020:21:05:04 +0800|127.0.0.1|10.4.7.21:6443|TCP|200|0.001|0.001|76|78|78|76
安装keepalived
[root@hdss7-11 ~]# yum install -y keepalived
[root@hdss7-11 ~]# vim /etc/keepalived/check_port.sh # 配置检查脚本
#!/bin/bash
if [ $# -eq 1 ] && [[ $1 =~ ^[0-9]+ ]];then
[ $(netstat -lntp|grep ":$1 " |wc -l) -eq 0 ] && echo "[ERROR] nginx may be not running!" && exit 1 || exit 0
else
echo "[ERROR] need one port!"
exit 1
fi
[root@hdss7-11 ~]# chmod +x /etc/keepalived/check_port.sh
配置主节点:/etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id 10.4.7.11
}
vrrp_script chk_nginx {
script "/etc/keepalived/check_port.sh 7443"
interval 2
weight -20
}
vrrp_instance VI_1 {
state MASTER
interface ens32
virtual_router_id 251
priority 100
advert_int 1
mcast_src_ip 10.4.7.11
nopreempt authentication {
auth_type PASS
auth_pass 11111111
}
track_script {
chk_nginx
}
virtual_ipaddress {
10.4.7.10
}
}
配置备节点:/etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id 10.4.7.12
}
vrrp_script chk_nginx {
script "/etc/keepalived/check_port.sh 7443"
interval 2
weight -20
}
vrrp_instance VI_1 {
state BACKUP
interface ens32
virtual_router_id 251
mcast_src_ip 10.4.7.12
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 11111111
}
track_script {
chk_nginx
}
virtual_ipaddress {
10.4.7.10
}
}
! Configuration File for keepalived
global_defs {
router_id 10.4.7.12
}
vrrp_script chk_nginx {
script "/etc/keepalived/check_port.sh 7443"
interval 2
weight -20
}
vrrp_instance VI_1 {
state BACKUP
interface ens32
virtual_router_id 251
mcast_src_ip 10.4.7.12
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 11111111
}
track_script {
chk_nginx
}
virtual_ipaddress {
10.4.7.10
}
}
启动keepalived
[root@hdss7-11 ~]# systemctl start keepalived ; systemctl enable keepalived
[root@hdss7-11 ~]# ip addr show ens32
2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:6d:b8:82 brd ff:ff:ff:ff:ff:ff
inet 10.4.7.11/24 brd 10.4.7.255 scope global noprefixroute ens32
valid_lft forever preferred_lft forever
inet 10.4.7.10/32 scope global ens32
valid_lft forever preferred_lft forever
......
controller-manager 安装
创建启动脚本
[root@hdss7-21 ~]# vim /opt/apps/kubernetes/server/bin/kube-controller-manager-startup.sh
#!/bin/sh
WORK_DIR=$(dirname $(readlink -f $0))
[ $? -eq 0 ] && cd $WORK_DIR || exit /opt/apps/kubernetes/server/bin/kube-controller-manager \
--cluster-cidr 172.7.0.0/16 \
--leader-elect true \
--log-dir /data/logs/kubernetes/kube-controller-manager \
--master http://127.0.0.1:8080 \
--service-account-private-key-file ./certs/ca-key.pem \
--service-cluster-ip-range 192.168.0.0/16 \
--root-ca-file ./certs/ca.pem \
--v 2
[root@hdss7-21 ~]# chmod u+x /opt/apps/kubernetes/server/bin/kube-controller-manager-startup.sh
配置supervisor启动配置
[root@hdss7-21 ~]# vim /etc/supervisord.d/kube-controller-manager.ini
[program:kube-controller-manager-7-21]
command=/opt/apps/kubernetes/server/bin/kube-controller-manager-startup.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/apps/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-controller-manager/controller.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false) [root@hdss7-21 ~]# supervisorctl update
kube-controller-manager-7-21: stopped
kube-controller-manager-7-21: updated process group
[root@hdss7-21 ~]# supervisorctl status
etcd-server-7-21 RUNNING pid 23637, uptime 1 day, 0:16:54
kube-apiserver-7-21 RUNNING pid 32591, uptime 1:56:23
kube-controller-manager-7-21 RUNNING pid 33357, uptime 0:00:38
kube-scheduler安装
创建启动脚本
[root@hdss7-21 ~]# vim /opt/apps/kubernetes/server/bin/kube-scheduler-startup.sh
#!/bin/sh
WORK_DIR=$(dirname $(readlink -f $0))
[ $? -eq 0 ] && cd $WORK_DIR || exit /opt/apps/kubernetes/server/bin/kube-scheduler \
--leader-elect \
--log-dir /data/logs/kubernetes/kube-scheduler \
--master http://127.0.0.1:8080 \
--v 2
[root@hdss7-21 ~]# chmod u+x /opt/apps/kubernetes/server/bin/kube-scheduler-startup.sh
[root@hdss7-21 ~]# mkdir -p /data/logs/kubernetes/kube-scheduler
配置supervisor启动配置
[root@hdss7-21 ~]# vim /etc/supervisord.d/kube-scheduler.ini
[program:kube-scheduler-7-21]
command=/opt/apps/kubernetes/server/bin/kube-scheduler-startup.sh
numprocs=1
directory=/opt/apps/kubernetes/server/bin
autostart=true
autorestart=true
startsecs=30
startretries=3
exitcodes=0,2
stopsignal=QUIT
stopwaitsecs=10
user=root
redirect_stderr=true
stdout_logfile=/data/logs/kubernetes/kube-scheduler/scheduler.stdout.log
stdout_logfile_maxbytes=64MB
stdout_logfile_backups=4
stdout_capture_maxbytes=1MB
stdout_events_enabled=false [root@hdss7-21 ~]# supervisorctl update
kube-scheduler-7-21: stopped
kube-scheduler-7-21: updated process group
[root@hdss7-21 ~]# supervisorctl status
etcd-server-7-21 RUNNING pid 23637, uptime 1 day, 0:26:53
kube-apiserver-7-21 RUNNING pid 32591, uptime 2:06:22
kube-controller-manager-7-21 RUNNING pid 33357, uptime 0:10:37
kube-scheduler-7-21 RUNNING pid 33450, uptime 0:01:18
检查主控节点状态
[root@hdss7-21 ~]# ln -s /opt/apps/kubernetes/server/bin/kubectl /usr/local/bin/
[root@hdss7-21 ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-1 Healthy {"health": "true"}
etcd-0 Healthy {"health": "true"}
etcd-2 Healthy {"health": "true"} [root@hdss7-22 ~]# ln -s /opt/apps/kubernetes/server/bin/kubectl /usr/local/bin/
[root@hdss7-22 ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-2 Healthy {"health": "true"}
etcd-1 Healthy {"health": "true"}
etcd-0 Healthy {"health": "true"}
k8s二进制部署 - master节点安装的更多相关文章
- k8s二进制部署 - node节点安装
创建kubelet配置 • set-cluster # 创建需要连接的集群信息,可以创建多个k8s集群信息 [root@hdss7-21 ~]# kubectl config set-cluster ...
- k8s二进制部署 - etcd节点安装
下载etcd [root@hdss7-12 ~]# useradd -s /sbin/nologin -M etcd [root@hdss7-12 ~]# cd /opt/src/ [root@hds ...
- K8s二进制部署单节点 master组件 node组件 ——头悬梁
K8s二进制部署单节点 master组件 node组件 --头悬梁 1.master组件部署 2.node 组件部署 k8s集群搭建: etcd集群 flannel网络插件 搭建maste ...
- K8s二进制部署单节点 etcd集群,flannel网络配置 ——锥刺股
K8s 二进制部署单节点 master --锥刺股 k8s集群搭建: etcd集群 flannel网络插件 搭建master组件 搭建node组件 1.部署etcd集群 2.Flannel 网络 ...
- k8s1.13.0二进制部署-master节点(三)
部署apiserver 创建生成CSR的JSON配置文件 [root@k8s-master1 ssl]# vim kubernetes-csr.json { "CN": " ...
- Kubernetes1.91(K8s)安装部署过程(四)--Master节点安装
再次明确下架构: 三台虚拟机 centos 7.4系统,docker为17版本,ip为10.10.90.105到107,其中105位master,接下来的master相关组件安装到此机器上. etc ...
- K8S入门系列之集群二进制部署-->master篇(二)
组件版本和配置策略 组件版本 Kubernetes 1.16.2 Docker 19.03-ce Etcd 3.3.17 https://github.com/etcd-io/etcd/release ...
- k8s二进制部署
k8s二进制部署 1.环境准备 主机名 ip地址 角色 k8s-master01 10.0.0.10 master k8s-master02 10.0.0.11 master k8s-node01 1 ...
- kubeadm部署k8s1.9高可用集群--4部署master节点
部署master节点 kubernetes master 节点包含的组件: kube-apiserver kube-scheduler kube-controller-manager 本文档介绍部署一 ...
随机推荐
- library cache pin解决方法
library cache pin大部分都是因为编译存储过程造成的 查找造成问题的数据库对象(一般为存储过程) SELECT * FROM v$session_wait WHERE event = ' ...
- Poj-P2533题解【动态规划】
本文为原创,转载请注明:http://www.cnblogs.com/kylewilson/ 题目出处: http://poj.org/problem?id=2533 题目描述: 如果ai1 < ...
- ReactRouter的实现
ReactRouter的实现 ReactRouter是React的核心组件,主要是作为React的路由管理器,保持UI与URL同步,其拥有简单的API与强大的功能例如代码缓冲加载.动态路由匹配.以及建 ...
- python生成器 递归
生成器 生成器:只要函数体内出现yield关键字,那么再执行函数就不会执行函数代码,会得到一个结果,该结果就是生成器 生成器就是迭代器 yield的功能 1.yield为我们提供了一种自定义迭 ...
- (Oracle)看懂Oracle执行计划(转载)
最近一直在跟Oracle打交道,从最初的一脸懵逼到现在的略有所知,也来总结一下自己最近所学,不定时更新ing- 一:什么是Oracle执行计划? 执行计划是一条查询语句在Oracle中的执行过程或访问 ...
- virtualenv安装和配置
安装命令 命令执行结束 配 执行命令:virtualenv testvir 执行完成:会在当前目录下生成如下文件夹 进入到testvir目录 进入Scripts目录: 进入虚拟环境:执行 activa ...
- 理解和运用 ClassLoader 该篇文章就够了
定义 根据<深入理解Java虚拟机>提到"通过一个类的全限定名(packageName.ClassName)来获取描述此类的二进制字节(class文件字节)这个动作的代码模块就叫 ...
- 消息队列扫盲(RocketMQ 入门)
消息队列扫盲 消息队列顾名思义就是存放消息的队列,队列我就不解释了,别告诉我你连队列都不知道似啥吧? 所以问题并不是消息队列是什么,而是 消息队列为什么会出现?消息队列能用来干什么?用它来干这些事会带 ...
- luoguP2657 [SCOI2009] windy 数
目录 luoguP2657 [SCOI2009] windy 数 简述题意: Solution: luoguP2657 [SCOI2009] windy 数 简述题意: 不含前导零且相邻两个数字之差至 ...
- 从一片森林(JavaScript)到另一片森林(C++)
从JavaScript到C Plus Plus 作为一个忠诚的Web开发者,JavaScript几乎是我这一年多以来的首选,不管是开发网站后端服务,还是开发跨端应用,我都会首选一个使用JavaScri ...