#证书自签名脚本
root@k8s-master: ~/k8s/k8s-cert ::
$ cat k8s-cert.sh
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF cat > ca-csr.json <<EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size":
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing",
"O": "k8s",
"OU": "System"
}
]
}
EOF cfssl gencert -initca ca-csr.json | cfssljson -bare ca - #-----------------------
#hosts内容slb节点ip,master节点ip,下边是node节点ip(node节点多写一写冗余IP地址为后续使用)
cat > server-csr.json <<EOF
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1",
"127.0.0.1",
"192.168.1.63",
"192.168.1.64",
"192.168.1.65",
"192.168.1.66",
"192.168.1.60",
"192.168.1.61",
"192.168.1.62",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size":
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server #----------------------- cat > admin-csr.json <<EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size":
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin #----------------------- cat > kube-proxy-csr.json <<EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size":
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
root@k8s-master: ~/k8s/k8s-cert ::
$ . kube-apiserver
. kube-controller-manager . kube-scheduler
配置文件 -> systemd管理组件 -> 启动 ==================================================kube-apiserver==================================================

# 创建 TLS Bootstrapping Token
#BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
BOOTSTRAP_TOKEN=0fb61c46f8991b718eb38d27b605b008

cat > token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
mv token.csv /opt/kubernetes/cfg

,部署kube-apiserver
()创建apiserver的文件存放目录
root@k8s-master: ~/soft ::
$ mkdir /opt/kubernetes/{bin,ssl,cfg} -p ()解压tar包,将核心组件复制到/opt/kubernetes/bin下kube-apiserver,kube-controller-manager,kube-scheduler
root@k8s-master: ~/soft ::
$ tar zxvf kubernetes-server-linux-amd64.tar.gz
root@k8s-master: ~ ::
$ cd /root/soft/kubernetes/server/bin
root@k8s-master: ~/soft/kubernetes/server/bin ::
$ cp kube-apiserver kube-controller-manager kube-scheduler /opt/kubernetes/bin/ ()kube-apiserver配置文件脚本
root@k8s-master: ~/k8s ::
$ cat apiserver.sh
#!/bin/bash
#master主机节点ip地址,传入变量
MASTER_ADDRESS=$
#etcd所有节点ip地址
ETCD_SERVERS=$ cat <<EOF >/opt/kubernetes/cfg/kube-apiserver
#true开启日志默认写到/var/log/messages,第二选项flase,并在下边指定log写入目录--logs-dir=/opt/kubernetes/logs
KUBE_APISERVER_OPTS="--logtostderr=true \\
#日志登记,登记越高日志越少
--v= \\
--etcd-servers=${ETCD_SERVERS} \\
--bind-address=${MASTER_ADDRESS} \\
--secure-port= \\
--advertise-address=${MASTER_ADDRESS} \\
--allow-privileged=true \\
#负载均衡节点ip范文,下边是端口
--service-cluster-ip-range=10.0.0.0/ \\
--service-node-port-range=- \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--kubelet-https=true \\
#互相通信的token,身份认证标识
--enable-bootstrap-token-auth \\
--token-auth-file=/opt/kubernetes/cfg/token.csv \\
#apiserver的ssl自签名证书
--tls-cert-file=/opt/kubernetes/ssl/server.pem \\
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \\
--client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\
#下边是etcd的ssl自签名证书。因为都是https
--etcd-cafile=/opt/etcd/ssl/ca.pem \\
--etcd-certfile=/opt/etcd/ssl/server.pem \\
--etcd-keyfile=/opt/etcd/ssl/server-key.pem" EOF #配置systemctl管理apiserver
cat <<EOF >/usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes [Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver
ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure [Install]
WantedBy=multi-user.target
EOF systemctl daemon-reload
systemctl enable kube-apiserver
systemctl restart kube-apiserver
root@k8s-master: ~/k8s ::
$ ()将apiserver自签名证书移动到/opt/kubernetes/ssl
root@k8s-master: /opt/kubernetes/ssl ::
$ pwd
/opt/kubernetes/ssl
root@k8s-master: /opt/kubernetes/ssl ::
$ ls
ca-key.pem ca.pem server-key.pem server.pem
root@k8s-master: /opt/kubernetes/ssl ::
$ ()复制apiserver自签名证书到/opt/kubernetes/ssl
执行脚本
root@k8s-master: ~/k8s ::
$ ./apiserver.sh 192.168.1.63 https://192.168.1.63:2379,https://192.168.1.65:2379,https://192.168.1.66:2379 ()验证apiserver是否启动成功
root@k8s-master: /opt/kubernetes/ssl ::
$ netstat -lntup |grep
tcp 127.0.0.1: 0.0.0.0:* LISTEN /kube-apiserver
root@k8s-master: /opt/kubernetes/ssl ::
root@k8s-master: /opt/kubernetes/ssl ::
$ ps -ef|grep kube
root : ? :: /opt/kubernetes/bin/kube-apiserver --logtostderr=true --v= --etcd-servers=https://192.168.1.63:2379,https://192.168.1.65:2379,https://192.168.1.66:2379 --bind-address=192.168.1.63 --secure-port=6443 --advertise-address=192.168.1.63 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --kubelet-https=true --enable-bootstrap-token-auth --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-50000 --tls-cert-file=/opt/kubernetes/ssl/server.pem --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/etcd/ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem
root : ? :: /opt/kubernetes/bin/kube-scheduler --logtostderr=true --v= --master=127.0.0.1: --leader-elect
root : ? :: /opt/kubernetes/bin/kube-controller-manager --logtostderr=true --v= --master=127.0.0.1: --leader-elect=true --address=127.0.0.1 --service-cluster-ip-range=10.0.0.0/ --cluster-name=kubernetes --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem --root-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem --experimental-cluster-signing-duration=87600h0m0s
root : pts/ :: grep --color=auto kube
root@k8s-master: /opt/kubernetes/ssl ::
$
####报错排查方式
$ source /opt/kubernetes/cfg/kube-apiserver
$ /opt/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS ==================================================kube-controller-manager================================================== root@k8s-master: ~/k8s ::
$ cat controller-manager.sh
#!/bin/bash
#传参master节点ip地址
MASTER_ADDRESS=$ cat <<EOF >/opt/kubernetes/cfg/kube-controller-manager KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \\
--v= \\
#master-apiserver运行端口8080所有引用传参变量
--master=${MASTER_ADDRESS}: \\
#选举,自动做高可用
--leader-elect=true \\
#这个服务只在本地运行所以能跟apiserver通信就可以了
--address=127.0.0.1 \\
--service-cluster-ip-range=10.0.0.0/ \\
--cluster-name=kubernetes \\
#颁发证书
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--experimental-cluster-signing-duration=87600h0m0s" EOF
##使用systemctl管理controller工具
cat <<EOF >/usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes [Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager
ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure [Install]
WantedBy=multi-user.target
EOF systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl restart kube-controller-manager
root@k8s-master: ~/k8s ::
$ ==================================================kube-scheduler================================================== root@k8s-master: ~/k8s ::
$ cat scheduler.sh
#!/bin/bash MASTER_ADDRESS=$
##scheduler四行,定义日志,指定masterip,自动选举
cat <<EOF >/opt/kubernetes/cfg/kube-scheduler KUBE_SCHEDULER_OPTS="--logtostderr=true \\
--v= \\
--master=${MASTER_ADDRESS}: \\
--leader-elect" EOF cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes [Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure [Install]
WantedBy=multi-user.target
EOF systemctl daemon-reload
systemctl enable kube-scheduler
systemctl restart kube-scheduler ==================================================kubectl==================================================
部署完成验证
root@k8s-master: ~/k8s ::
$ cp /root/soft/kubernetes/server/bin/kubectl /usr/bin/
#检查当前集群节点的健康状态
$ kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd- Healthy {"health":"true"}
etcd- Healthy {"health":"true"}
etcd- Healthy {"health":"true"}
root@k8s-master: ~/k8s ::
$ ###ps:cs为缩写

k8s集群---apiserver,controller-manager,scheduler部署的更多相关文章

  1. 记录一个奇葩的问题:k8s集群中master节点上部署一个单节点的nacos,导致master节点状态不在线

    情况详细描述; k8s集群,一台master,两台worker 在master节点上部署一个单节点的nacos,导致master节点状态不在线(不论是否修改nacos的默认端口号都会导致master节 ...

  2. 还不会用 K8s 集群控制器?那你会用冰箱吗?(多图详解)

    作者 | 阿里云售后技术专家 声东 导读:当我们尝试去理解 K8s 集群工作原理的时候,控制器(Controller)肯定是一个难点.这是因为控制器有很多,具体实现大相径庭:且控制器的实现用到了一些较 ...

  3. centos7.8 安装部署 k8s 集群

    centos7.8 安装部署 k8s 集群 目录 centos7.8 安装部署 k8s 集群 环境说明 Docker 安装 k8s 安装准备工作 Master 节点安装 k8s 版本查看 安装 kub ...

  4. 在 Nebula K8s 集群中使用 nebula-spark-connector 和 nebula-algorithm

    本文首发于 Nebula Graph Community 公众号 解决思路 解决 K8s 部署 Nebula Graph 集群后连接不上集群问题最方便的方法是将 nebula-algorithm / ...

  5. K8s集群部署(二)------ Master节点部署

    Master节点要部署三个服务:API Server.Scheduler.Controller Manager. apiserver提供集群管理的REST API接口,包括认证授权.数据校验以 及集群 ...

  6. 菜鸟系列k8s——k8s集群部署(2)

    k8s集群部署 1. 角色分配 角色 IP 安装组件 k8s-master 10.0.0.170 kube-apiserver,kube-controller-manager,kube-schedul ...

  7. 二进制方法-部署k8s集群部署1.18版本

    二进制方法-部署k8s集群部署1.18版本 1. 前置知识点 1.1 生产环境可部署kubernetes集群的两种方式 目前生产部署Kubernetes集群主要有两种方式 kuberadm Kubea ...

  8. 5.基于二进制部署kubernetes(k8s)集群

    1 kubernetes组件 1.1 Kubernetes 集群图 官网集群架构图 1.2 组件及功能 1.2.1 控制组件(Control Plane Components) 控制组件对集群做出全局 ...

  9. Centos7 安装部署Kubernetes(k8s)集群

    目录 一.系统环境 二.前言 三.Kubernetes 3.1 概述 3.2 Kubernetes 组件 3.2.1 控制平面组件 3.2.2 Node组件 四.安装部署Kubernetes集群 4. ...

随机推荐

  1. 2018-12-25-C#-使用转换语义版本号

    title author date CreateTime categories C# 使用转换语义版本号 lindexi 2018-12-25 09:25:41 +0800 2018-06-29 12 ...

  2. MySQL5.7默认打开ONLY_FULL_GROUP_BY模式问题与解决方案

    MySQL5.7后将sql_mode的ONLY_FULL_GROUP_BY模式默认设置为打开状态,这样一来,很多之前的sql语句可能会出现错误,错误信息如下: Error Code: 1055. Ex ...

  3. Android 申请权限示例

    1.在Mainifest.xml中添加 <?xml version="1.0" encoding="utf-8"?> <manifest xm ...

  4. SuperSocket内置的命令行协议

    内置的命令行协议(接受自定义,分隔符为“:”,“,”): 命令行协议定义了每个请求必须以回车换行结尾 "\r\n". 由于 SuperSocket 中内置的命令行协议用空格来分割请 ...

  5. History和Location

    Location 对象属性属性 描述hash 设置或返回从井号 (#) 开始的 URL(锚).host 设置或返回主机名和当前 URL 的端口号.hostname 设置或返回当前 URL 的主机名.h ...

  6. Python--day30--软件开发架构

    软件开发架构: C/S架构: B/S架构: B/S架构和C/S架构的关系:

  7. 2019-7-29-PowerShell-拿到显卡信息

    title author date CreateTime categories PowerShell 拿到显卡信息 lindexi 2019-7-29 10:3:35 +0800 2019-02-21 ...

  8. siblings() 获得匹配集合中每个元素的同胞

    定义和用法 siblings() 获得匹配集合中每个元素的同胞,通过选择器进行筛选是可选的. 如果给定一个表示 DOM 元素集合的 jQuery 对象,.siblings() 方法允许我们在 DOM ...

  9. H3C 显示RIP当前运行状态及配置信息

  10. 地址中如果含有"+",发给服务器时"+"变成了空格问题解析

    如地址为sms:+7 915 444-414-444,含有空格. 服务器解码 URLDecoder.decode("sms:+7 915 444-414-444"),返回的是sms ...