CoreDNS安装及集群验证
叙述
截止到目前为止,整个集群的核心组件已经安装完成。
此时集群内部还需要 CoreDNS
组件的支持。
安装
CoreDNS 是以 Pod 的形式运行在 k8s 集群内部;
创建下面的 yaml 文件:
[root@node01 work]# cd /opt/k8s/work/
[root@node01 work]# cat coredns.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
.:53 {
errors
health
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
replicas: 2
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- name: coredns
image: coredns/coredns:1.4.0
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: host-time
mountPath: /etc/localtime
readOnly: true
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
dnsPolicy: Default
volumes:
- name: host-time
hostPath:
path: /etc/localtime
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.254.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
执行 coredns.yaml
文件:
[root@node01 work]# kubectl apply -f coredns.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created
查看结果
[root@node01 work]# kubectl get pods -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-5c6c9cf6c8-gbhvd 1/1 Running 0 41s 172.30.160.2 node02 <none> <none>
coredns-5c6c9cf6c8-rtrc5 1/1 Running 0 41s 172.30.48.2 node04 <none> <none>
测试 一
创建测试yaml文件
cd /opt/k8s/work
cat > nginx-ds.yml <<EOF
apiVersion: v1
kind: Service
metadata:
name: nginx-ds
labels:
app: nginx-ds
spec:
type: NodePort
selector:
app: nginx-ds
ports:
- name: http
port: 80
targetPort: 80
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: nginx-ds
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
template:
metadata:
labels:
app: nginx-ds
spec:
containers:
- name: my-nginx
image: daocloud.io/library/nginx:1.13.0-alpine
ports:
- containerPort: 80
EOF
执行
[root@node01 work]# kubectl apply -f nginx-ds.yml
service/nginx-ds created
daemonset.extensions/nginx-ds created
[root@node01 work]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-ds-4cdb6 1/1 Running 0 28s 172.30.160.3 node02 <none> <none>
nginx-ds-4l8pv 1/1 Running 0 28s 172.30.80.2 node03 <none> <none>
nginx-ds-jfz8l 1/1 Running 0 28s 172.30.48.3 node04 <none> <none>
nginx-ds-pmhw7 1/1 Running 0 28s 172.30.224.2 node01 <none> <none>
[root@node01 work]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.254.0.1 <none> 443/TCP 7h1m
nginx-ds NodePort 10.254.7.236 <none> 80:22415/TCP 33s
测试访问
[root@node01 work]# curl -I 10.0.20.11:22415
HTTP/1.1 200 OK
Server: nginx/1.13.0
Date: Thu, 05 Dec 2019 13:31:18 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Wed, 10 May 2017 21:50:27 GMT
Connection: keep-alive
ETag: "59138b23-264"
Accept-Ranges: bytes
可以看到 NodePort 模式的 SVC 已经可以访问;
测试 二 CoreDNS
创建一个 busybox 的 pod,进入pod 解析
cat<<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: busybox
namespace: default
spec:
containers:
- name: busybox
image: busybox:1.28.3
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
restartPolicy: Always
EOF
[root@node01 work]# kubectl get pods
NAME READY STATUS RESTARTS AGE
busybox 1/1 Running 0 18s
测试解析集群内部解析:
[root@node01 work]# kubectl exec -it busybox -- nslookup kubernetes
Server: 10.254.0.2
Address 1: 10.254.0.2 kube-dns.kube-system.svc.cluster.local
Name: kubernetes
Address 1: 10.254.0.1 kubernetes.default.svc.cluster.local
测试解析刚刚创建的nginx 的SVC:
[root@node01 work]# kubectl exec -it busybox -- nslookup nginx-ds.default.svc.cluster.local.
Server: 10.254.0.2
Address 1: 10.254.0.2 kube-dns.kube-system.svc.cluster.local
Name: nginx-ds.default.svc.cluster.local.
Address 1: 10.254.7.236 nginx-ds.default.svc.cluster.local
[root@node01 work]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.254.0.1 <none> 443/TCP 7h5m
nginx-ds NodePort 10.254.7.236 <none> 80:22415/TCP 4m7s
至此集群安装结束;
CoreDNS安装及集群验证的更多相关文章
- Kubernetes全栈架构师(Kubeadm高可用安装k8s集群)--学习笔记
目录 k8s高可用架构解析 Kubeadm基本环境配置 Kubeadm系统及内核升级 Kubeadm基本组件安装 Kubeadm高可用组件安装 Kubeadm集群初始化 高可用Master及Token ...
- 安装hadoop集群服务器(hadoop1.2.1)
摘要:hadoop,一个分布式系统基础架构,可以充分利用集群的威力进行高速运算和存储.本文主要介绍hadoop的安装与集群服务器的配置. 准备文件: ▪ VMware11.0.0 ▪ Cen ...
- spark1.3.1安装和集群的搭建
由于越来越多的人开始使用spark计算框架了,而且spark计算框架也是可以运行在yarn的平台上,因此可以利用单个集群,运行多个计算框架.这是一些大公司都是这么干的.好了,下面讲一下spark1.3 ...
- CentOS7 安装Hbase集群
继续接上一章,已安装好Hadoop集群环境 http://www.cnblogs.com/dopeter/p/4612232.html 在此基础上继续安装Hbase集群 Hbase版本为1.0.1.1 ...
- CentOS7 搭建Ambari-Server,安装Hadoop集群(一)
2017-07-05:修正几处拼写错误,之前没发现,抱歉! 第一次在cnblogs上发表文章,效果肯定不会好,希望各位多包涵. 编写这个文档的背景是月中的时候,部门老大希望我们能够抽时间学习一下Had ...
- 在线安装TIDB集群
在线安装TiDB集群 服务器准备 说明:TiDB8需要能够连接外网,以便下载各类安装包 TiDB4非必须,但最好是有一台,因为后续测试Mysql数据同步或者进行性能比较时,都要用到 TiKV最好是采 ...
- RedHat6.5安装Spark集群
版本号: RedHat6.5 RHEL 6.5系统安装配置图解教程(rhel-server-6.5) JDK1.8 http://blog.csdn.net/chongxin1/arti ...
- Spark学习笔记--Linux安装Spark集群详解
本文主要讲解如何在Linux环境下安装Spark集群,安装之前我们需要Linux已经安装了JDK和Scala,因为Spark集群依赖这些.下面就如何安装Spark进行讲解说明. 一.安装环境 操作系统 ...
- Kubernetes实战(二):k8s v1.11.1 prometheus traefik组件安装及集群测试
1.traefik traefik:HTTP层路由,官网:http://traefik.cn/,文档:https://docs.traefik.io/user-guide/kubernetes/ 功能 ...
随机推荐
- 讲一讲快速学习WPF的思路
我不想浪费大家的时间,直接奔主题了. 首先大家要明白,WPF跟Winform的区别,优点,缺点. 首先入门来讲 Winform简单点,WPF会难一点.所以第一次接触C# 我推荐用Winform项目去学 ...
- wpf使用技巧
1.设置资源 <Window.Resources> <ResourceDictionary> <ResourceDictionary.MergedDictionaries ...
- python学习之【第五篇】:Python中的元组及其所具有的方法
1.前言 Python的元组(tuple)与列表很相似,不同之处在于元组不能被修改,即元组一旦创建,就不能向元组中的增加新元素,不能删除元素中的元素,更不能修改元组中元素.但是元组可以访问任意元素,可 ...
- git下载安装
git是目前最流行的分布式版本控制系统,使用它可以很方便的对项目进行管理备份. 1.git下载 登录git官网https://git-scm.com/,点击downloads即可下载安装包 安装包如下 ...
- m113
今天的比赛很有感触,所以来写一下题解: T1可以发现一些规律是:面积扩大的速度显然比周长扩大的速度快,然后就可以枚举周长来看能为成的面积,其实最优的情况一定是六边型的情况,通过手膜我们可以发现对于边长 ...
- php imagick生成图片需要注意的问题
php imagick生成图片需要注意的问题 坐标必须写死不要写自适应 这样才能达到效果图的最好效果 而且不会出现各种问题如果前端显示的生成图片不达标 可以再写一套代码 把后台生成的图片透明度设成0 ...
- python函数的基本语法<二>
函数的流程控制: if...else... a = 100 b = 200 if a == 100 and b ==300: print('100,200') elif b == 200: print ...
- kubernetes的ingress-nginx
这是一篇学习记录.记录kubernetes集群中如何将jenkins服务通过域名接入外部.由于是测试环境,域名是自定义的,解析写在/etc/hosts和自己本地的hosts中. 部署图: 一.部署后端 ...
- 装上linux后的准备工作
A.修改对应网卡的IP地址的配置文件 1 2 3 4 5 6 7 8 # vi /etc/sysconfig/network-scripts/ifcfg-eth0 IPV6INIT=no #关闭 ...
- PHP 将数据从 Laravel 传送到 vue 的四种方式
在过去的两三年里,我一直在研究同时使用 Vue 和 Laravel 的项目,在每个项目开发的开始阶段,我必须问自己 “我将如何将数据从 Laravel 传递到 Vue ?”.这适用于 Vue 前端组件 ...