https://www.docker.elastic.co

注:目前阿里云为7.4

elasticsearch

参考
https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html

拉取镜像
docker pull docker.elastic.co/elasticsearch/elasticsearch:7.5.0

查看命令
docker inspect docker.elastic.co/elasticsearch/elasticsearch:7.5.0

mkdir /opt/elasticsearch -p
vim /opt/elasticsearch/Dockerfile

FROM docker.elastic.co/elasticsearch/elasticsearch:7.5.0
EXPOSE 9200
EXPOSE 9300

9200是http  9300是tcp

mkdir /opt/elasticsearch/usr/share/elasticsearch/data/ -p
cat /etc/passwd
data及其子目录(-R)赋予权限,否则无法写入数据
chown 1000:1000 -R /opt/elasticsearch/usr/share/elasticsearch/data/

chown 1000:1000 -R /opt/elasticsearch/usr/share/elasticsearch/logs/

vim /opt/elasticsearch/docker-compose.yml

version: '2.2'
services:
elasticsearch:
image: v-elasticsearch
restart: always
container_name: elasticsearch
build:
context: .
dockerfile: Dockerfile
ports:
- "9200:9200"
- "9300:9300"
environment:
- cluster.name=docker-cluster
- discovery.type=single-node
- bootstrap.memory_lock=true
- network.host=0.0.0.0
- http.cors.enabled=true
- http.cors.allow-origin=*
- ES_JAVA_OPTS=-Xms512m -Xmx512m
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test: ["CMD", "curl", "-f", "http://127.0.0.1:9200"]
retries: 300
interval: 1s
volumes:
- ./usr/share/elasticsearch/data/:/usr/share/elasticsearch/data
- ./usr/share/elasticsearch/logs:/usr/share/elasticsearch/logs

也可以用先写好配置文件的方式

version: '2.2'
services:
elasticsearch:
image: v-elasticsearch
restart: always
container_name: elasticsearch
build:
context: .
dockerfile: Dockerfile
ports:
- "9200:9200"
- "9300:9300"
environment:
- ES_JAVA_OPTS=-Xms256m -Xmx256m
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test: ["CMD", "curl", "-f", "http://127.0.0.1:9200"]
retries: 300
interval: 1s
volumes:
- ./usr/share/elasticsearch/data/:/usr/share/elasticsearch/data
- ./usr/share/elasticsearch/logs:/usr/share/elasticsearch/logs
- ./usr/share/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml

配置文件参考:
https://github.com/elastic/elasticsearch-docker/tree/master/.tedi/template
https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-xpack.html

mkdir -p /opt/elasticsearch/usr/share/elasticsearch/config/
vim /opt/elasticsearch/usr/share/elasticsearch/config/elasticsearch.yml

---
discovery.type: single-node
bootstrap.memory_lock: true
cluster.name: docker-cluster
network.host: 0.0.0.0
http.cors.enabled: true
http.cors.allow-origin: "*"

ES的密码管理是用x-pack来实现的
默认账户为elastic默认密码为changme
这里省略掉x-pack

cd /opt/elasticsearch
docker-compose build
docker-compose up -d --force-recreate
docker-compose down
docker-compose restart

查看日志
docker logs --tail="500" elasticsearch

检查状态
curl http://127.0.0.1:9200/_cat/health

netstat -anltp|grep 9200

进入容器
docker exec -it elasticsearch /bin/bash

检查容器
docker exec -it elasticsearch /bin/bash /usr/share/elasticsearch/bin/elasticsearch --help
docker exec -it elasticsearch /bin/bash /usr/share/elasticsearch/bin/elasticsearch --version

复制配置文件
docker cp elasticsearch:/usr/share/elasticsearch/config/elasticsearch.yml /opt/elasticsearch/elasticsearch_bak.yml

-----------------------------------

elasticsearch-head

mkdir /opt/elasticsearch-head -p
vim /opt/elasticsearch-head/Dockerfile

FROM mobz/elasticsearch-head:5
EXPOSE 9100

vim /opt/elasticsearch-head/docker-compose.yml

version: '2.2'
services:
elasticsearch-head:
image: v-elasticsearch-head
restart: always
container_name: elasticsearch-head
build:
context: .
dockerfile: Dockerfile
ports:
- 9100:9100

environment:
      TZ: 'Asia/Shanghai'

cd /opt/elasticsearch-head
docker-compose build
docker-compose up -d --force-recreate
docker-compose down
docker-compose restart

查看日志
docker logs --tail="500" elasticsearch-head

netstat -anltp|grep 9100

进入容器
docker exec -it elasticsearch-head /bin/bash

检查容器
docker exec -it elasticsearch-head /bin/bash /usr/share/elasticsearch-head/bin/elasticsearch-head --help
docker exec -it elasticsearch-head /bin/bash /usr/share/elasticsearch-head/bin/elasticsearch-head --version

导出配置文件
docker cp elasticsearch-head:/usr/src/app/Gruntfile.js /opt/elasticsearch-head/Gruntfile.js

mkdir /opt/elasticsearch-head/_site
docker cp elasticsearch-head:/usr/src/app/_site/app.js /opt/elasticsearch-head/_site/app.js

docker-compose加入

volumes:
- ./Gruntfile.js:/usr/src/app/Gruntfile.js
- ./_site/app.js:/usr/src/app/_site/app.js

chown 1000:1000 -R /opt/elasticsearch-head/

重新生成创建启动

Gruntfile.js加入hostname: '*'

                connect: {
server: {
options: {
hostname: '0.0.0.0',
port: 9100,
base: '.',
keepalive: true
}
}
}

http://192.168.0.101:9100/?auth_user=elastic&auth_password=123456

------------------------------------

kibana

https://www.elastic.co/guide/en/kibana/current/docker.html

docker pull docker.elastic.co/kibana/kibana:7.5.0

docker inspect docker.elastic.co/kibana/kibana:7.5.0

mkdir /opt/kibana -p
vim /opt/kibana/Dockerfile

FROM docker.elastic.co/kibana/kibana:7.5.0
EXPOSE 5601

vim /opt/kibana/docker-compose.yml

version: '2.2'
services:
kibana:
image: v-kibana
restart: always
container_name: kibana
build:
context: .
dockerfile: Dockerfile
environment:
- SERVER_NAME=kibana
- SERVER_HOST=0.0.0.0
- ELASTICSEARCH_HOSTS=http://elasticsearchserver:9200
- KIBANA_DEFAULTAPPID=discover
- I18N_LOCALE=zh-CN
- XPACK_MONITORING_UI_CONTAINER_ELASTICSEARCH_ENABLE=true
- ELASTICSEARCH_USERNAME="elastic"
- ELASTICSEARCH_PASSWORD="123456"
healthcheck:
test: ["CMD", "curl", "-f", "http://127.0.0.1:5601"]
retries: 300
interval: 1s
ports:
- 5601:5601
volumes:
- /var/log/kibana:/var/log/kibana
- /var/lib/kibana:/var/lib/kibana
extra_hosts:
- "elasticsearchserver:192.168.0.101"

无法设置path.data

也可以用先写好配置文件的方式

vim /opt/kibana/docker-compose.yml

version: '2.2'
services:
kibana:
image: v-kibana
restart: always
container_name: kibana
build:
context: .
dockerfile: Dockerfile
healthcheck:
test: ["CMD", "curl", "-f", "http://127.0.0.1:5601"]
retries: 300
interval: 1s
ports:
- 5601:5601
volumes:
- ./usr/share/kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml
- /var/lib/kibana:/var/lib/kibana
extra_hosts:
- "elasticsearchserver:192.168.0.101"

mkdir -p /opt/kibana/usr/share/kibana/config/
vim /opt/kibana/usr/share/kibana/config/kibana.yml

server.name: kibana
server.host: "0.0.0.0"
path.data: /var/lib/kibana
elasticsearch.hosts: [ "http://elasticsearchserver:9200" ]
kibana.defaultAppId: discover
i18n.locale: zh-CN
xpack.monitoring.ui.container.elasticsearch.enabled: true
elasticsearch.username: "elastic"
elasticsearch.password: "123456"

带x-pack

elasticsearch.username: "elastic"
elasticsearch.password: "123456"

mkdir -p /var/lib/kibana;chmod -R 777 /var/lib/kibana

cd /opt/kibana
docker-compose build
docker-compose up -d --force-recreate
docker-compose down
docker-compose restart

查看日志
docker logs --tail="500" kibana

检查状态
curl http://192.168.1.101:5601/_cat/health

netstat -anltp|grep 5601

进入容器
docker exec -it kibana /bin/bash

检查容器
docker exec -it kibana /bin/bash /usr/share/kibana/bin/kibana --help
docker exec -it kibana /bin/bash /usr/share/kibana/bin/kibana --version

复制配置文件
docker cp kibana:/usr/share/kibana/config/kibana.yml /opt/kibana/kibana_bak.yml

------------------------

logstash

参考
https://www.elastic.co/guide/en/logstash/current/docker.html
https://www.elastic.co/guide/en/logstash/current/docker-config.html

docker pull docker.elastic.co/logstash/logstash:7.5.0

docker inspect docker.elastic.co/logstash/logstash:7.5.0

mkdir /opt/logstash -p
vim /opt/logstash/Dockerfile

jdk8

FROM openjdk:8 AS jdk
FROM docker.elastic.co/logstash/logstash:7.5.0
COPY --from=jdk /usr/local/openjdk-8 /usr/local
ENV JAVA_HOME=/usr/local/openjdk-8
ENV PATH=$JAVA_HOME/bin:$PATH
ENV CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
EXPOSE 9600
EXPOSE 9601
EXPOSE 9602
EXPOSE 5044

内置jdk会抛警告

FROM docker.elastic.co/logstash/logstash:7.5.0
EXPOSE 9600
EXPOSE 5044

vim /opt/logstash/docker-compose.yml

version: '2.2'
services:
logstash:
image: v-logstash
restart: always
container_name: logstash
build:
context: .
dockerfile: Dockerfile
environment:
- HTTP.HOST=0.0.0.0
- PATH_DATA=/var/lib/logstash/data
- PATH_LOGS=/var/log/logstash
- XPACK_MONITORING_ELASTICSEARCH_HOSTS=http://elasticsearchserver:9200
- XPACK_MONITORING_ENABLED=true
- XPACK_MONITORING_ELASTICSEARCH_USERNAME="elastic"
- XPACK_MONITORING_ELASTICSEARCH_PASSWORD="123456"
ports:
- 9600:9600
- 5044:5044
healthcheck:
test: ["CMD", "curl", "-f", "http://127.0.0.1:9600/_node/stats"]
retries: 300
interval: 1s
volumes:
- /var/log/logstash:/var/log/logstash
- /var/lib/logstash/data:/var/lib/logstash/data
- ./usr/share/logstash/pipeline/logstash1.conf:/usr/share/logstash/pipeline/logstash1.conf
- /var/lib/logstash/data1:/var/lib/logstash/data1
- ./usr/share/logstash/pipeline/logstash2.conf:/usr/share/logstash/pipeline/logstash2.conf
- /var/lib/logstash/data1:/var/lib/logstash/data2
extra_hosts:
- "elasticsearchserver:192.168.0.101"
- "kafkaserver1:192.168.0.101"
- "kafkaserver2:192.168.0.101"
- "kafkaserver3:192.168.0.101"
    command: /usr/share/logstash/bin/logstash -f /usr/share/logstash/pipeline/logstash2.conf --path.data=/usr/share/logstash/data2

也可以用先写好配置文件的方式

vim /opt/logstash/docker-compose.yml

version: '2.2'
services:
logstash:
image: v-logstash
restart: always
container_name: logstash
build:
context: .
dockerfile: Dockerfile
ports:
- 9600:9600
- 5044:5044
healthcheck:
test: ["CMD", "curl", "-f", "http://127.0.0.1:9600/_node/stats"]
retries: 300
interval: 1s
volumes:
- ./usr/share/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml
- /var/log/logstash:/var/log/logstash
- /var/lib/logstash/data:/var/lib/logstash/data
- ./usr/share/logstash/pipeline/logstash1.conf:/usr/share/logstash/pipeline/logstash1.conf
- /var/lib/logstash/data1:/var/lib/logstash/data1
- ./usr/share/logstash/pipeline/logstash2.conf:/usr/share/logstash/pipeline/logstash2.conf
- /var/lib/logstash/data2:/var/lib/logstash/data2
extra_hosts:
- "elasticsearchserver:192.168.0.101"
- "kafkaserver1:192.168.0.101"
- "kafkaserver2:192.168.0.101"
- "kafkaserver3:192.168.0.101"
    command: /usr/share/logstash/bin/logstash -f /usr/share/logstash/pipeline/logstash2.conf --path.data=/usr/share/logstash/data2

mkdir -p /var/log/logstash;chmod -R 777 /var/log/logstash
mkdir -p /var/lib/logstash/data;chmod -R 777 /var/lib/logstash/data
mkdir -p /var/lib/logstash/data1;chmod -R 777 /var/lib/logstash/data1
mkdir -p /var/lib/logstash/data2;chmod -R 777 /var/lib/logstash/data2

mkdir -p /opt/logstash/usr/share/logstash/config/
vim /opt/logstash/usr/share/logstash/config/logstash.yml

http.host: 0.0.0.0
path.data: /var/lib/logstash/data
path.logs: /var/log/logstash
xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearchserver:9200" ]
xpack.monitoring.enabled: true
xpack.monitoring.elasticsearch.username: "elastic"
xpack.monitoring.elasticsearch.password: "123456"

带x-pack

xpack.monitoring.elasticsearch.username: "elastic"
xpack.monitoring.elasticsearch.password: "123456"

mkdir /opt/logstash/usr/share/logstash/pipeline/ -p

vim /opt/logstash/usr/share/logstash/pipeline/logstash1.conf

input{
stdin{
}
}
output {
elasticsearch {
hosts => ["elasticsearchserver:9200"]
}
}
input{
stdin{
}
}
output {
elasticsearch {
hosts => ["elasticsearchserver:9200"]
user => "elastic"
password => "changeme"
}
}
input{stdin{}}output{stdout{codec=>rubydebug}}
input{stdin{}}output{stdout{codec=>json_lines}}
input {
beats {
port => 5044
}
}
output {
stdout {
codec => rubydebug
}
}

##host:port是logback中appender中的 destination

logback->logstash->es

input {
tcp {
port => "9601"
mode => "server"
tags => "tags_test"
codec => json_lines
}
}
output {
elasticsearch {
hosts => "elasticsearchserver:9200"
index => "log-demo1"
user => "elastic"
password => "123456"
}
}

tags的值自行设置

如果json文件比较长,需要换行的话,建议用codec=>"json_lines"插件

logback->kafka->logstash->es

input {
kafka {
topics => "log"
bootstrap_servers => "kafkaserver1:9011,kafkaserver2:9012,kafkaserver3:9013"
group_id => "log_1"
auto_offset_reset => "earliest"
consumer_threads => ""
decorate_events => "false"
type => "spring-boot-log"
tags => "tags_test"
codec => "json"
}
}
filter {
}
output {
stdout {
codec => rubydebug
}
elasticsearch {
hosts => "elasticsearchserver:9200"
index => "log-kafka-demo1"
user => "elastic"
password => ""
}
}

若logstash集群同一个groupid ,不会出现logstash重复消费kafka集群的问题

如果logback的kafka输出pattern不是json格式,logstash需设置为codec => "plain"

如果logback的kafka输出pattern为json格式,logstash需设置为codec => "json",不能是"json_lines"

pattern如:

<appender name="KAFKA" class="com.github.danielwegener.logback.kafka.KafkaAppender">
<encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<timestamp>
<timeZone>UTC</timeZone>
</timestamp>
<pattern>
<pattern>{"contextName ":"%contextName ","relative":"%relative","time":"%date{yyyy-MM-dd HH:mm:ss.SSS}","file":"%file","class":"%class","method":"%method","contextName":"%contextName","line":"%line","logger_name":"%logger","thread_name":"%thread","level":"%-5level","host":"%host","hostName":"%hostName","port":"${server.port}","message":"%message","stack_trace":"%xException{5}"}</pattern>
</pattern>
</providers>
</encoder>
<!-- <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">-->
<!-- <pattern>{"contextName ":"%contextName ","relative":"%relative","time":"%date{yyyy-MM-dd HH:mm:ss.SSS}","file":"%file","class":"%class","method":"%method","contextName":"%contextName","line":"%line","logger_name":"%logger","thread_name":"%thread","level":"%-5level","host":"%host","hostName":"%hostName","port":"${server.port}","message":"%message","stack_trace":"%xException{5}"}</pattern>-->
<!-- <charset>UTF-8</charset>-->
<!-- </encoder>-->
<topic>log</topic>
<keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy"/>
<deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy"/>
<producerConfig>bootstrap.servers=${Kafka_Log_Servers}</producerConfig>
</appender>

所有key带s的参数不要用["xxx","yyy"] 的格式,应该用"xxx,yyy"的格式

cd /opt/logstash
docker-compose build
docker-compose up -d --force-recreate
docker-compose down
docker-compose restart

查看日志
docker logs --tail="500" logstash
docker logs -f logstash

进入容器
docker exec -it logstash /bin/bash

检查容器
docker exec -it logstash /bin/bash /usr/share/logstash/bin/logstash --help
docker exec -it logstash /bin/bash /usr/share/logstash/bin/logstash --version

netstat -anltp|grep 9600
netstat -anltp|grep 5044

复制配置文件
docker cp logstash:/usr/share/logstash/config/logstash.yml /opt/logstash/logstash_bak.yml
docker cp logstash:/usr/share/logstash/pipeline/logstash1.conf /opt/logstash/logstash1_bak.conf

测试
数据放data1以另一个实例运行
docker exec -it logstash /bin/bash /usr/share/logstash/bin/logstash --path.data=./data1 -e 'input{stdin{}}output{stdout{codec=>rubydebug}}'
docker exec -it logstash /bin/bash /usr/share/logstash/bin/logstash --path.data=./data1 -e 'input{stdin{}}output{stdout{codec=>json_lines}}'
docker exec -it logstash /bin/bash /usr/share/logstash/bin/logstash --path.data=./data1 -e 'input{stdin{}}output{elasticsearch{hosts=>"192.168.1.101:9200"}}'

docker exec -it logstash /bin/bash /usr/share/logstash/bin/logstash -f  /usr/share/logstash/pipeline/logstash2.conf --path.data=/usr/share/logstash/data2

----------------------------
Filebeat

docker pull docker.elastic.co/beats/filebeat:7.5.0

docker inspect docker.elastic.co/beats/filebeat:7.5.0

参考
https://www.elastic.co/guide/en/beats/filebeat/current/running-on-docker.html

mkdir /opt/filebeat -p
vim /opt/filebeat/Dockerfile

FROM docker.elastic.co/beats/filebeat:7.5.0

vim /opt/filebeat/docker-compose.yml

version: '2.2'
services:
filebeat:
image: v-filebeat
restart: always
container_name: filebeat
build:
context: .
dockerfile: Dockerfile
volumes:
- ./usr/share/filebeat/data/:/usr/share/filebeat/data/
- ./usr/share/filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml

mkdir /opt/filebeat/usr/share/filebeat -p
vim /opt/filebeat/usr/share/filebeat/filebeat.yml

filebeat.config:
modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
processors:
- add_cloud_metadata: ~
output.elasticsearch:
hosts: '${ELASTICSEARCH_HOSTS:192.168.1.101:9200}'
username: '${ELASTICSEARCH_USERNAME:}'
password: '${ELASTICSEARCH_PASSWORD:}'

output.elasticsearch:
  hosts: ["192.168.1.101:9200"]

output.logstash:
  hosts: ["192.168.1.101:5044"]

cd /opt/filebeat
docker-compose build
docker-compose up -d --force-recreate
docker-compose down
docker-compose restart

查看日志
docker logs --tail="500" filebeat
docker logs -f filebeat

进入容器
docker exec -it filebeat /bin/bash

检查容器
docker exec -it filebeat /bin/bash /usr/share/filebeat/bin/filebeat --help
docker exec -it filebeat /bin/bash /usr/share/filebeat/bin/filebeat --version

复制配置文件
docker cp filebeat:/usr/share/filebeat/filebeat.yml /opt/filebeat/filebeat_bak.yml

---------------------------------------

apm-server

docker pull docker.elastic.co/apm/apm-server:7.5.0

docker inspect docker.elastic.co/apm/apm-server:7.5.0

参考
https://www.elastic.co/guide/en/apm/server/current/running-on-docker.html

mkdir /opt/apm-server -p
vim /opt/apm-server/Dockerfile

FROM docker.elastic.co/apm/apm-server:7.5.0
EXPOSE 8200

docker-compose参考
https://github.com/elastic/apm-server/blob/master/tests/docker-compose.yml

vim /opt/apm-server/docker-compose.yml

version: '2.2'
services:
apm-server:
image: v-apm-server
restart: always
container_name: apm-server
build:
context: .
dockerfile: Dockerfile
environment:
- output.elasticsearch.hosts=['http://192.168.1.101:9200']
- output.elasticsearch.username: elastic
- output.elasticsearch.password:
- apm-server.host="0.0.0.0:8200"
- setup.kibana.host="192.168.1.101:5601"
ports:
- :
healthcheck:
test: ["CMD", "curl", "-f" ,"http://127.0.0.1:8200/"]
command: apm-server -e -d "*" -E apm-server.host="0.0.0.0:8200" -E apm-server.expvar.enabled=true -E output.elasticsearch.hosts=['http://192.168.1.101:9200']

command: apm-server -e -d "*" -E apm-server.host="0.0.0.0:8200" -E apm-server.expvar.enabled=true -E output.elasticsearch.hosts=['http://192.168.1.101:9200']

volumes:
- ./usr/share/apm-server/apm-server.yml:/usr/share/apm-server/apm-server.yml

volumes:
- ./usr/share/apm-server/data:/usr/share/apm-server/data:ro

cd /opt/apm-server
docker-compose build
docker-compose up -d --force-recreate
docker-compose down
docker-compose restart

查看日志
docker logs --tail="500" apm-server
docker logs -f apm-server

进入容器
docker exec -it apm-server /bin/bash

检查容器
docker exec -it apm-server /bin/bash /usr/share/apm-server/bin/apm-server --help
docker exec -it apm-server /bin/bash /usr/share/apm-server/bin/apm-server --version

复制配置文件
docker cp apm-server:/usr/share/apm-server/apm-server.yml /opt/apm-server/apm-server_bak.yml

elastic stack安装运行(docker)的更多相关文章

  1. Elastic Stack 安装

    Elastic Stack 是一套支持数据采集.存储.分析.并可视化全面的分析工具,简称 ELK(Elasticsearch,Logstash,Kibana)的缩写. 安装Elastic Stack ...

  2. Docker——MacOS上安装运行docker

    近几年来,Docker越来越流行,使用场景也越来越广泛.为了能尽快跟上时代步伐.学习并应用到实际工作中,我也开始了Docker之旅. Docker版本 Docker共有两种版本: 社区版(Commun ...

  3. TICK/TIGK运维栈安装运行 docker【中】

    InfluxDB docker search influxdb docker pull influxdb docker run -d -p 8086:8086 -v /var/lib/influxdb ...

  4. Centos7基于容器安装运行Docker私有仓库及添加认证

    一.前言 官方的Docker hub是一个用于管理公共镜像的好地方,我们可以在上面找到我们想要的镜像,也可以把我们自己的镜像推送上去.但是,有时候,我们的使用场景需要我们拥有一个私有的镜像仓库用于管理 ...

  5. kafka安装运行(docker)

    zookeeper参照https://www.cnblogs.com/wintersoft/p/11128484.html mkdir /opt/kafka -pvim /opt/kafka/Dock ...

  6. skywalking安装运行(docker)

    https://github.com/apache/skywalking-docker/tree/master/6/6.5 https://hub.docker.com/r/apache/skywal ...

  7. zookeeper安装运行(docker)

    拉取镜像docker pull zookeeper:latest 获取镜像基本信息docker inspect zookeeper mkdir /opt/zookeeper -p vim /opt/z ...

  8. 手摸手 Elastic Stack 使用教程 - 环境安装

    前言 在日常的开发中,会涉及到对一些中间件以及基础组件的使用,日志汇总分析.APM 监控.接口的健康检查.站内搜索,以及对于服务器.nginx.iis 等等的监控,最近的几个需求点,都和 Elasti ...

  9. Spring Boot + Elastic stack 记录日志

    原文链接:https://piotrminkowski.wordpress.com/2019/05/07/logging-with-spring-boot-and-elastic-stack/ 作者: ...

随机推荐

  1. 纯css实现省略号,兼容火狐,IE9,chrome

    <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8&quo ...

  2. Appscan漏洞之会话标识未更新

    本次针对 Appscan漏洞 会话标识未更新 进行总结,如下: 1. 会话标识未更新 1.1.攻击原理 在认证用户或者以其他方式建立新用户会话时,如果不使任何现有会话标识失效,攻击者就有机会窃取已认证 ...

  3. insmod/rmmod

    insmod -f 不检查目前kernel版本与模块编译时的kernel版本是否一致,强制将模块载入 -k 将模块设置为自动卸除 -m 输出模块的载入信息 -o <模块名称> 指定模块的名 ...

  4. 为什么说pt-osc可能会引起主从延迟,有什么好办法解决或规避吗?

    若复制中binlog使用row格式,对大表使用pt-osc把数据从旧表拷贝到临时表,期间会产生大量的binlog,从而导致延时 pt-osc在搬数据过程中insert...select是有行锁的,会降 ...

  5. matplotlib绘图难题解决

    # coding=utf-8 import pandas as pd import yagmail import requests import arrow import numpy as np im ...

  6. linux虚拟串口及远程访问

    1. 虚拟终端概念 linux中有很多终端,如下简单介绍下各种终端或串口的概念. 1.1 tty:终端设备的统称 tty是Teletype或TeletypeWriter的缩写,中文翻译为电传打字机.电 ...

  7. 实验3 SQL注入原理-万能密码注入

    实验目的 (1)理解[万能密码]的原理 (2)学习[万能密码]的使用 实验原理 一.访问目标网站 1.选择一个存在漏洞的论坛 http://192.168.1.3:8009 进入 2.输入用户名[ad ...

  8. linux中container_of

    linux 驱动程序中 container_of宏解析 众所周知,linux内核的主要开发语言是C,但是现在内核的框架使用了非常多的面向对象的思想,这就面临了一个用C语言来实现面向对象编程的问题,今天 ...

  9. Python入门篇-文件操作

    Python入门篇-文件操作 作者:尹正杰 版权声明:原创作品,谢绝转载!否则将追究法律责任. 一.文件IO常用操作 open:打开 read:读取 write:写入 close:关闭 readlin ...

  10. 【原创】STM32低功耗模式及中断唤醒(基于BMI160及RTC)的研究

    预研目标 六轴静止时,终端进入低功耗模式:六轴震动时,终端正常工作模式,从而极大减少非工作时的电流消耗. 解决方案 机器静止时,依据六轴算法,CPU进入休眠(停止)模式:机器工作时,触发六轴中断唤醒C ...