1 Filebeat:

apiVersion: v1
kind: Service
metadata:
name: XX
spec:
ports:
- name: http
port:
targetPort: http
selector:
app: XX---
apiVersion: apps/v1
kind: Deployment
metadata:
name: XX
labels:
app: XX
spec:
replicas:
minReadySeconds:
revisionHistoryLimit:
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: %
maxSurge: %
selector:
matchLabels:
app: XX
template:
metadata:
labels:
app: XX
spec:
terminationGracePeriodSeconds:
imagePullSecrets:
- name: registry-key
containers:
- name: filebeat
image: docker.elastic.co/beats/filebeat:6.2.
args: [
"-c", "/etc/filebeat/filebeat.yml",
]
volumeMounts:
- name: app-logs
mountPath: /aaa/log
- name: filebeat-config
mountPath: /etc/filebeat/
- name: forecast-user-profile
image: your_application_imageURL
volumeMounts:
- name: app-logs
mountPath: /var/log
#imagePullPolicy: IfNotPresent
imagePullPolicy: Always
ports:
- name: http
containerPort:
env:
- name: DB_HOST
valueFrom:
configMapKeyRef:
name: forecast-conf
key: db_host
- name: DB_PORT
valueFrom:
configMapKeyRef:
name: forecast-conf
key: db_port
- name: DB_NAME
valueFrom:
configMapKeyRef:
name: forecast-conf
key: db_name
- name: DB_USER
valueFrom:
secretKeyRef:
name: db-auth
key: username
- name: DB_PWD
valueFrom:
secretKeyRef:
name: db-auth
key: password
volumes:
- name: app-logs
emptyDir: {}
- name: filebeat-config
configMap:
name: filebeat-config # lifecycle:
# preStop:
# exec:
# command: ["consul", "leave']
# PostStart:
# exec:
# command: ["consule", "entry"]
# livenessProbe:
# readinessProbe:
# resources:
# workingDir:
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
data:
filebeat.yml: |
filebeat.prospectors:
- input_type: log
paths:
- "/aaa/log/*.log"
output.elasticsearch:
hosts: ["logstash-elasticsearch-service:9200"]

2 Elasticsearch:

kind: Deployment
apiVersion: apps/v1beta2
metadata:
labels:
elastic-app: elasticsearch
role: data
name: logstash-elasticsearch-deployment
spec:
replicas:
revisionHistoryLimit:
selector:
matchLabels:
elastic-app: elasticsearch
template:
metadata:
labels:
elastic-app: elasticsearch
role: data
spec:
containers:
- name: elasticsearch
image: docker.elastic.co/elasticsearch/elasticsearch:6.2.
ports:
- containerPort:
protocol: TCP
volumeMounts:
- name: esdata
mountPath: /usr/share/elasticsearch/data
env:
- name: "ES_JAVA_OPTS"
value: "-Xms256m -Xmx256m"
volumes:
- name: esdata
emptyDir: {}
initContainers:
- image: alpine:3.6
command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
name: elasticsearch-logging-init
securityContext:
privileged: true
---
kind: Service
apiVersion: v1
metadata:
labels:
elastic-app: elasticsearch-service
name: logstash-elasticsearch-service
spec:
ports:
- port:
targetPort:
selector:
elastic-app: elasticsearch
type: NodePort

3 kibana:

---
kind: Deployment
apiVersion: apps/v1beta2
metadata:
labels:
elastic-app: kibana
name: kibana
spec:
replicas:
revisionHistoryLimit:
selector:
matchLabels:
elastic-app: kibana
template:
metadata:
labels:
elastic-app: kibana
spec:
containers:
- name: kibana
image: docker.elastic.co/kibana/kibana:6.2.
ports:
- containerPort:
protocol: TCP
volumeMounts:
- name: config-volume
mountPath: /opt/kibana/config
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: config-volume
configMap:
name: logging-configmap
items:
- key: kibana.yml
path: kibana.yml ---
kind: Service
apiVersion: v1
metadata:
labels:
elastic-app: kibana
name: kibana-service
spec:
ports:
- port:
targetPort:
selector:
elastic-app: kibana
type: NodePort
apiVersion: v1
kind: ConfigMap
metadata:
name: logging-configmap
namespace: default
data:
logstash.yml: |
http.host: "0.0.0.0"
path.config: /usr/share/logstash/pipeline ## Disable X-Pack
## see https://www.elastic.co/guide/en/x-pack/current/xpack-settings.html
xpack.monitoring.enabled: false
logstash.conf: |
# all input will come from filebeat, no local logs
input {
beats {
port =>
}
} ## some more advanced filtering and tagging of incoming kubernetes logs is done in logstash
filter {
if [type] == "kube-logs" {
mutate {
rename => ["log", "message"]
add_tag => [ "pelias", "kubernetes" ]
} date {
match => ["time", "ISO8601"]
remove_field => ["time"]
} # all standard container logs files match a common pattern
grok {
match => { "source" => "%{DATA:pod_name}" }
remove_field => ["source"]
} # system services have a simpler log filename format that does not include namespace, pod names, or container ids
grok {
match => { "source" => "%{DATA:container_name}" }
add_field => { "namespace" => "default" }
remove_field => ["source"]
}
}
} output {
elasticsearch {
hosts => [ "logstash-elasticsearch-service:9200" ]
}
} kibana.yml: |
## Default Kibana configuration from kibana-docker.
## from https://github.com/elastic/kibana-docker/blob/master/build/kibana/config/kibana.yml
#
#server.host: ""
elasticsearch.url: http://logstash-elasticsearch-service:9200
## Disable X-Pack
## see https://www.elastic.co/guide/en/x-pack/current/xpack-settings.html
## https://www.elastic.co/guide/en/x-pack/current/installing-xpack.html#xpack-enabling
#
xpack.security.enabled: false
xpack.monitoring.enabled: false
xpack.ml.enabled: false
xpack.graph.enabled: false
xpack.reporting.enabled: false

4:  常用命令:

kubectl  delete -f    xxxxxxxxx.yaml

kubectl creatge -f  xxxxxxxxxx.yaml

kubectl get pods

kubectl get service

kubectl logs -f   podname containername -n namespacename

  kubectl exec -it  podname   sh

kubectl get pod podname -o yaml

3 K8s安裝ELK+filebeat的更多相关文章

  1. 基于docker部署使用ELK+FileBeat日志管理平台

    Docker从狭义上来讲就是一个进程,从广义上来讲是一个虚拟容器,专业叫法为 Application Container(应用容器).Docker进程和普通的进程没有任何区别,它就是一个普通的应用进程 ...

  2. elk + filebeat,6.3.2版本简单搭建,实现我们自己的集中式日志系统

    前言 刚从事开发那段时间不习惯输出日志,认为那是无用功,徒增代码量,总认为自己的代码无懈可击:老大的叮嘱.强调也都视为耳旁风,最终导致的结果是我加班排查问题,花的时间还挺长的,要复现问题.排查问题等, ...

  3. ELK + filebeat集群部署

    ELK + filebeat集群部署 一.ELK简介 1. Elasticsearch Elasticsearch是一个实时的分布式搜索分析引擎, 它能让你以一个之前从未有过的速度和规模,去探索你的数 ...

  4. ELK + Filebeat日志分析系统安装

    之前搭建过elk,用于分析日志,无奈服务器资源不足,开了多个Logstash之后发现占用内存过高,于是现在改为Filebeat做日志收集,记录一下搭建过程和遇到问题的解决方案. 第一步 , 安装jdk ...

  5. 免安裝、免設定的 Hadoop 開發環境 - cloudera 的 QuickStart VM

    cloudera 的 QuickStart VM,為一種免安裝.免設定 Linux 及 Hadoop,已幫你建好 CDH 5.x.Hadoop.Eclipse 的一個虛擬機環境.下載後解壓縮,可直接以 ...

  6. 安裝 14.04.1 Ubuntu 到 Lenovo thinkpad t460p

    在 Lenovo Thinkpad T460p 安裝 ubuntu, BIOS 需要做一些設定, 沒設定的現象:不斷地停在 usb disk 設定 可以 使用 usb disk install 了!

  7. Ubuntu 安裝 嘸蝦米 輸入法

    O S : 14.04.1-Ubuntu 加入fcitx開發團隊的repository: sudo add-apt-repository ppa:fcitx-team/nightly sudo apt ...

  8. Linux下安裝Oracle database內核參數設置

    參考:1529864.1 ************************************************** RAM                                  ...

  9. 從 Internet 安裝 Cygwin

    從 Internet 安裝 Cygwin 如果您有高速的 Internet 連線, 可以考慮用這個方法, 否則不建議使用 執行 setup.exe Cygwin Setup 畫面, 按 Next. C ...

随机推荐

  1. 我的Android进阶之旅------>/storage/sdcard0, /sdcard, /mnt/sdcard ,/storage/emulated/legacy 的区别

    转自:http://bbs.gfan.com/android-5382920-1-1.html 关于android的4.2的0文件夹的详解---- android 4.0 ----在galaxy ne ...

  2. SM30维护视图创建【转】

           在SAP中,经常需要自定义数据库表.而且可能需要人工维护数据库表中的数据,可以通过SM30进行维护数据:但是SM30事务的权限太大,不适宜将SM30直接分配:因此,可以通过给维护表分配事 ...

  3. Struts2之ModelDriven的使用

    http://www.cnblogs.com/luoyanli/archive/2012/11/20/2778361.html 我们可以根据Action属性的不同将它分为两类:Field-Driven ...

  4. linux 基础2-null,cut,wc,head,tail

    一. 特殊文件: /dev/null和/dev/tty Linux系统提供了两个对Shell编程非常有用的特殊文件,/dev/null和/dev/tty.其中/dev/null将会丢掉所有写入它的数据 ...

  5. LeetCode:删除排序数组中的重复项||【80】

    LeetCode:删除排序数组中的重复项||[80] 题目描述 给定一个排序数组,你需要在原地删除重复出现的元素,使得每个元素最多出现两次,返回移除后数组的新长度. 不要使用额外的数组空间,你必须在原 ...

  6. Java中的 && 与&

    Java中&&和&都是表示与的逻辑运算符,都表示逻辑运输符and,当两边的表达式都为true的时候,整个运算结果才为true,否则为false. &&的短路功能 ...

  7. P4455 [CQOI2018]社交网络(矩阵树定理)

    题目 P4455 [CQOI2018]社交网络 \(CQOI\)的题都这么裸的吗?? 做法 有向图,指向叶子方向 \(D^{out}(G)-A(G)\) 至于证明嘛,反正也就四个定理,先挖个坑,省选后 ...

  8. hadoop集群增加新节点

    上次hadoop集群一块数据盘报警, 提交工单后维修人员更换硬盘 服务器是dell r720的, 8盘位, 蛋疼的是这些硬盘都是做的单盘raid1,维修人员说必须关机导入硬盘才能正常使用 (服务器就这 ...

  9. EntityFramework 学习 一 Model Browser

    我们已经为School表创建第一个实体数据模型,可视化的EDM设计器不显示所有的实体,而是显示和数据库中对应的表和视图 Model Browser为你提供关于所有对象和函数的信息, Diagrams ...

  10. MySQL- 常用的MySQL函数,指令等

    MySQL查看版本: status: 或者 select version(); //select @@version MySQL昨天, 一周前 ,一月前 ,一年前的数据 这里主要用到了 DATE_SU ...