Etcd是Kubernetes集群中的一个十分重要的组件,用于保存集群所有的网络配置和对象的状态信息,K8S中所有持久化的状态信息都是以Key-Value的形式存储在etcd中,提供分布式协调服务。之所以说kubenetes各个组件是无状态的,就是因为其中把数据都存放在etcd中。
由于etcd支持集群,本实验中在三台主机上都部署上etcd.

一、创建etcd的配置文件

2379端口用于外部通信,2380用于内部通信

承接上文:https://www.cnblogs.com/yangzp/p/15692046.html

master 操作:

yang@master:/opt/kubernetes/$ sudo mkdir -p /opt/kubernetes/cfg
yang@master:/opt/kubernetes/cfg$ sudo nano /opt/kubernetes/cfg/etcd.conf
#[member]
##ETCD节点名称修改,这个ETCD_NAME每个节点必须不同
ETCD_NAME="etcd-node1"
#ETCD数据目录
ETCD_DATA_DIR="/var/lib/etcd/default.etcd" #ETCD_SNAPSHOT_COUNTER="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
#
##ETCD监听的URL,每个节点不同需要修改
ETCD_LISTEN_PEER_URLS="https://192.168.1.106:2380" #外部通信监听URL修改,每个节点不同需要修改
ETCD_LISTEN_CLIENT_URLS="https://192.168.1.106:2379,https://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.106:2380"
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="etcd-node1=https://192.168.1.106:2380,etcd-node2=https://192.168.1.108:2380,etcd-node3=https://192.168.1.109:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.1.106:2379"
#[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"

node1 操作:

yang@node1:/opt/kubernetes/$ sudo mkdir -p /opt/kubernetes/cfg
yang@node1:/opt/kubernetes/cfg$ sudo nano /opt/kubernetes/cfg/etcd.conf
#[member]
##ETCD节点名称修改,这个ETCD_NAME每个节点必须不同
ETCD_NAME="etcd-node2"
#ETCD数据目录
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_SNAPSHOT_COUNTER="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
##ETCD监听的URL,每个节点不同需要修改 ETCD_LISTEN_PEER_URLS="https://192.168.1.108:2380"
#外部通信监听URL修改,每个节点不同需要修改
ETCD_LISTEN_CLIENT_URLS="https://192.168.1.108:2379,https://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.108:2380"
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
# #添加集群访问
ETCD_INITIAL_CLUSTER="etcd-node1=https://192.168.1.106:2380,etcd-node2=https://192.168.1.108:2380,etcd-node3=https://192.168.1.109:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.1.108:2379"
#[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"

node2 操作:

yang@node2:/opt/kubernetes/$ sudo mkdir -p /opt/kubernetes/cfg
yang@node2:/opt/kubernetes/cfg$ cat /opt/kubernetes/cfg/etcd.conf
#[member]
##ETCD节点名称修改,这个ETCD_NAME每个节点必须不同
ETCD_NAME="etcd-node3"
#ETCD数据目录
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"  
#ETCD_SNAPSHOT_COUNTER="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
##ETCD监听的URL,每个节点不同需要修改
ETCD_LISTEN_PEER_URLS="https://192.168.1.109:2380"
#外部通信监听URL修改,每个节点不同需要修改
ETCD_LISTEN_CLIENT_URLS="https://192.168.1.109:2379,https://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.109:2380"
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
# 添加集群访问
ETCD_INITIAL_CLUSTER="etcd-node1=https://192.168.1.106:2380,etcd-node2=https://192.168.1.108:2380,etcd-node3=https://192.168.1.109:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.1.109:2379"
#[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"

二 、创建etcd系统服务

master 创建:

yang@master:/opt/kubernetes/cfg$ sudo nano /etc/systemd/system/etcd.service 

[Unit]
Description=Etcd Server
After=network.target [Service]
Type=simple
WorkingDirectory=/var/lib/etcd
EnvironmentFile=-/opt/kubernetes/cfg/etcd.conf
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /opt/kubernetes/bin/etcd"
Type=notify [Install]
WantedBy=multi-user.target

node1 创建:

yang@node1:/opt/kubernetes/cfg$ sudo nano /etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target [Service]
Type=simple
WorkingDirectory=/var/lib/etcd
EnvironmentFile=-/opt/kubernetes/cfg/etcd.conf
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /opt/kubernetes/bin/etcd"
Type=notify [Install]
WantedBy=multi-user.target

node2 创建:

yang@node2:/opt/kubernetes/cfg$ sudo nano /etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target [Service]
Type=simple
WorkingDirectory=/var/lib/etcd
EnvironmentFile=-/opt/kubernetes/cfg/etcd.conf
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /opt/kubernetes/bin/etcd"
Type=notify [Install]
WantedBy=multi-user.target

三、重新加载系统服务

1、在master 、node1 、node2 三台服务器上重新加载并设置开机启动etcd系统服务

yang@master:/opt/kubernetes/cfg$ sudo systemctl daemon-reload
yang@master:/opt/kubernetes/cfg$ sudo systemctl enable etcd

2、默认不会创建etcd的数据存储目录,这里在三个节点上创建etcd数据存储目录并启动etcd

yang@master:/opt/kubernetes/cfg$ sudo  mkdir /var/lib/etcd
yang@master:/opt/kubernetes/cfg$ sudo systemctl start etcd
yang@master:/opt/kubernetes/cfg$ sudo systemctl status etcd yang@node1:/opt/kubernetes/cfg$ sudo mkdir /var/lib/etcd
yang@node1:/opt/kubernetes/cfg$ sudo systemctl start etcd
yang@node1:/opt/kubernetes/cfg$ sudo systemctl status etcd yang@node2:/opt/kubernetes/cfg$ sudo mkdir /var/lib/etcd
yang@node2:/opt/kubernetes/cfg$ sudo systemctl start etcd
yang@node2:/opt/kubernetes/cfg$ sudo systemctl status etcd

3、查看三台服务器监听的2379 和2380端口

master 查看:

yang@master:/opt/kubernetes/cfg$ sudo apt install net-tools
yang@master:/opt/kubernetes/cfg$ netstat -antpl
(Not all processes could be identified, non-owned process info
will not be shown, you would have to be root to see it all.)
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 192.168.1.106:2379 0.0.0.0:* LISTEN -
tcp 0 0 127.0.0.1:2379 0.0.0.0:* LISTEN -
tcp 0 0 192.168.1.106:2380 0.0.0.0:* LISTEN -
tcp 0 0 127.0.0.53:53 0.0.0.0:* LISTEN -
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN -
tcp 0 0 127.0.0.1:6010 0.0.0.0:* LISTEN -
tcp 0 0 192.168.1.106:53248 192.168.1.108:2380 ESTABLISHED -
tcp 0 0 192.168.1.106:53250 192.168.1.108:2380 ESTABLISHED -
tcp 0 36 192.168.1.106:22 192.168.1.114:64179 ESTABLISHED -
tcp 0 0 192.168.1.106:2380 192.168.1.108:46790 ESTABLISHED -
tcp 0 0 127.0.0.1:38788 127.0.0.1:2379 ESTABLISHED -
tcp 0 0 192.168.1.106:2380 192.168.1.109:36012 ESTABLISHED -
tcp 0 0 192.168.1.106:2380 192.168.1.108:46778 ESTABLISHED -
tcp 0 0 192.168.1.106:2380 192.168.1.108:46780 ESTABLISHED -
tcp 0 0 192.168.1.106:53260 192.168.1.108:2380 ESTABLISHED -
tcp 0 0 192.168.1.106:2380 192.168.1.109:36008 ESTABLISHED -
tcp 0 0 192.168.1.106:2380 192.168.1.109:36000 ESTABLISHED -
tcp 0 0 192.168.1.106:33486 192.168.1.109:2380 ESTABLISHED -
tcp 0 0 127.0.0.1:2379 127.0.0.1:38788 ESTABLISHED -
tcp 0 0 192.168.1.106:2379 192.168.1.106:39984 ESTABLISHED -
tcp 0 0 192.168.1.106:33476 192.168.1.109:2380 ESTABLISHED -
tcp 0 0 192.168.1.106:2380 192.168.1.109:35998 ESTABLISHED -
tcp 0 0 192.168.1.106:33478 192.168.1.109:2380 ESTABLISHED -
tcp 0 0 192.168.1.106:39984 192.168.1.106:2379 ESTABLISHED -
tcp6 0 0 :::22 :::* LISTEN -
tcp6 0 0 ::1:6010 :::* LISTEN -

node1 查看:

yang@node1:/opt/kubernetes/cfg$ netstat -antpl
(Not all processes could be identified, non-owned process info
will not be shown, you would have to be root to see it all.)
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 127.0.0.1:6010 0.0.0.0:* LISTEN -
tcp 0 0 192.168.1.108:2379 0.0.0.0:* LISTEN -
tcp 0 0 127.0.0.1:2379 0.0.0.0:* LISTEN -
tcp 0 0 192.168.1.108:2380 0.0.0.0:* LISTEN -
tcp 0 0 127.0.0.53:53 0.0.0.0:* LISTEN -
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN -
tcp 0 0 192.168.1.108:44020 192.168.1.108:2379 ESTABLISHED -
tcp 0 0 192.168.1.108:2380 192.168.1.109:60836 ESTABLISHED -
tcp 0 0 192.168.1.108:2380 192.168.1.106:53260 ESTABLISHED -
tcp 0 0 192.168.1.108:2379 192.168.1.108:44020 ESTABLISHED -
tcp 0 0 192.168.1.108:2380 192.168.1.106:53248 ESTABLISHED -
tcp 0 0 192.168.1.108:53444 192.168.1.109:2380 ESTABLISHED -
tcp 0 0 192.168.1.108:53452 192.168.1.109:2380 ESTABLISHED -
tcp 0 0 192.168.1.108:46778 192.168.1.106:2380 ESTABLISHED -
tcp 0 0 192.168.1.108:46780 192.168.1.106:2380 ESTABLISHED -
tcp 0 0 192.168.1.108:46790 192.168.1.106:2380 ESTABLISHED -
tcp 0 0 192.168.1.108:2380 192.168.1.109:60842 ESTABLISHED -
tcp 0 0 192.168.1.108:2380 192.168.1.109:60848 ESTABLISHED -
tcp 0 0 192.168.1.108:2380 192.168.1.106:53250 ESTABLISHED -
tcp 0 0 192.168.1.108:53442 192.168.1.109:2380 ESTABLISHED -
tcp 0 0 192.168.1.108:2380 192.168.1.109:60856 ESTABLISHED -
tcp 0 0 127.0.0.1:60782 127.0.0.1:2379 ESTABLISHED -
tcp 0 36 192.168.1.108:22 192.168.1.114:64184 ESTABLISHED -
tcp 0 0 127.0.0.1:2379 127.0.0.1:60782 ESTABLISHED -
tcp6 0 0 ::1:6010 :::* LISTEN -
tcp6 0 0 :::22 :::* LISTEN -

node2 查看:

yang@node2:/opt/kubernetes/cfg$ netstat -antpl
(Not all processes could be identified, non-owned process info
will not be shown, you would have to be root to see it all.)
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 127.0.0.53:53 0.0.0.0:* LISTEN -
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN -
tcp 0 0 127.0.0.1:6010 0.0.0.0:* LISTEN -
tcp 0 0 192.168.1.109:2379 0.0.0.0:* LISTEN -
tcp 0 0 127.0.0.1:2379 0.0.0.0:* LISTEN -
tcp 0 0 192.168.1.109:2380 0.0.0.0:* LISTEN -
tcp 0 0 192.168.1.109:2380 192.168.1.106:33478 ESTABLISHED -
tcp 0 0 192.168.1.109:36008 192.168.1.106:2380 ESTABLISHED -
tcp 0 0 192.168.1.109:2380 192.168.1.106:33476 ESTABLISHED -
tcp 0 0 192.168.1.109:36000 192.168.1.106:2380 ESTABLISHED -
tcp 0 0 192.168.1.109:2380 192.168.1.106:33486 ESTABLISHED -
tcp 0 0 192.168.1.109:60842 192.168.1.108:2380 ESTABLISHED -
tcp 0 0 192.168.1.109:2379 192.168.1.109:37924 ESTABLISHED -
tcp 0 0 192.168.1.109:36012 192.168.1.106:2380 ESTABLISHED -
tcp 0 0 192.168.1.109:37924 192.168.1.109:2379 ESTABLISHED -
tcp 0 36 192.168.1.109:22 192.168.1.114:64185 ESTABLISHED -
tcp 0 0 127.0.0.1:2379 127.0.0.1:57926 ESTABLISHED -
tcp 0 0 192.168.1.109:60856 192.168.1.108:2380 ESTABLISHED -
tcp 0 0 192.168.1.109:2380 192.168.1.108:53452 ESTABLISHED -
tcp 0 0 192.168.1.109:35998 192.168.1.106:2380 ESTABLISHED -
tcp 0 0 192.168.1.109:60848 192.168.1.108:2380 ESTABLISHED -
tcp 0 0 192.168.1.109:60836 192.168.1.108:2380 ESTABLISHED -
tcp 0 0 127.0.0.1:57926 127.0.0.1:2379 ESTABLISHED -
tcp 0 0 192.168.1.109:2380 192.168.1.108:53442 ESTABLISHED -
tcp 0 0 192.168.1.109:2380 192.168.1.108:53444 ESTABLISHED -
tcp6 0 0 :::22 :::* LISTEN -
tcp6 0 0 ::1:6010 :::* LISTEN -

四、验证ETCD集群

master 操作:

yang@master:/opt/kubernetes/bin$ etcdctl --endpoints=https://192.168.1.106:2379 --ca-file=/opt/kubernetes/ssl/ca.pem --cert-file=/opt/kubernetes/ssl/etcd.pem  --key-file=/opt/kubernetes/ssl/etcd-key.pem cluster-health

Command 'etcdctl' not found, but can be installed with:

sudo apt install etcd-client

yang@master:/opt/kubernetes/bin$ sudo apt install etcd-client
Reading package lists... Done
Building dependency tree
Reading state information... Done
The following NEW packages will be installed:
etcd-client
0 upgraded, 1 newly installed, 0 to remove and 44 not upgraded.
Need to get 4,563 kB of archives.
After this operation, 17.2 MB of additional disk space will be used.
Get:1 http://mirrors.aliyun.com/ubuntu focal/universe amd64 etcd-client amd64 3.2.26+dfsg-6 [4,563 kB]
Fetched 4,563 kB in 2s (2,398 kB/s)
Selecting previously unselected package etcd-client.
(Reading database ... 71849 files and directories currently installed.)
Preparing to unpack .../etcd-client_3.2.26+dfsg-6_amd64.deb ...
Unpacking etcd-client (3.2.26+dfsg-6) ...
Setting up etcd-client (3.2.26+dfsg-6) ...
Processing triggers for man-db (2.9.1-1) ...
yang@master:/opt/kubernetes/bin$ sudo etcdctl --endpoints=https://192.168.1.106:2379 --ca-file=/opt/kubernetes/ssl/ca.pem --cert-file=/opt/kubernetes/ssl/etcd.pem --key-file=/opt/kubernetes/ssl/etcd-key.pem cluster-health
member 1baa0a58b574d69b is healthy: got healthy result from https://192.168.1.108:2379
member 39133e181b350a4e is healthy: got healthy result from https://192.168.1.106:2379
member 69fb6a35f1ce3d83 is healthy: got healthy result from https://192.168.1.109:2379
cluster is healthy

至此,etcd 部署完毕!

参考:https://blog.csdn.net/qq_34261373/article/details/90052220 

etcd 基于ubuntu 20.04 部署集群的更多相关文章

  1. ubuntu 14.04 rabbitmq集群部署

    1.准备机器,我这边准备的是三台ubuntu14.04 机器主机名不能相同,不然节点冲突 2.安装rabbitmq 3.修改hosts文件 root@abc-web-04:~# vim /etc/ho ...

  2. 基于Tomcat的Solr3.5集群部署

    基于Tomcat的Solr3.5集群部署 一.准备工作 1.1 保证SOLR库文件版本相同 保证SOLR的lib文件版本,slf4j-log4j12-1.6.1.jar slf4j-jdk14-1.6 ...

  3. Docker Swarm部署集群

    一.Swarm简介 Swarm是Docker的一个编排工具,参考官网:https://docs.docker.com/engine/swarm/ Swarm 模式简介 要在Swarm模式下运行dock ...

  4. Kubernetes学习之路(26)之kubeasz+ansible部署集群

    目录 1.环境说明 2.准备工作 3.分步骤安装 3.1.创建证书和安装准备 3.2.安装etcd集群 3.3.安装docker 3.4.安装master节点 3.5.安装node节点 3.6.部署集 ...

  5. [转贴]CentOS7.5 Kubernetes V1.13(最新版)二进制部署集群

    CentOS7.5 Kubernetes V1.13(最新版)二进制部署集群 http://blog.51cto.com/10880347/2326146   一.概述 kubernetes 1.13 ...

  6. 基于开源软件构建高性能集群NAS系统,包括负载均衡(刘爱贵)

    大数据时代的到来已经不可阻挡,面对数据的爆炸式增长,尤其是半结构化数据和非结构化数据,NoSQL存储系统和分布式文件系统成为了技术浪潮,得到了长足的发展.非结构化数据目前呈现更加快速的增长趋势,IDC ...

  7. 不同云服务器下,ubuntu下开k3s集群

    首先先感谢老哥的文章:h构建多云环境下的K3S集群,但是我尝试在centos 8.2上面前面一直执行报错 并且安装glibc 2.17时还会报错make版本太低,所以直接放弃centos,投入ubun ...

  8. Ubuntu 20.04安装Docker

    Docker学习系列文章 入门必备:十本你不容错过的Docker入门到精通书籍推荐 day1.全面的Docker快速入门教程 day2.CentOS 8.4安装Docker day3.Windows1 ...

  9. 通过 Terracotta实现基于Tomcat的Web应用集群

    [转]通过 Terracotta实现基于Tomcat的Web应用集群 博客分类: 企业应用面临的问题 Java&Socket 开源组件的应用 tomcatweb session集群服务器负载均 ...

  10. Database基础(七):部署集群基础环境、MySQL-MMM架构部署、MySQL-MMM架构使用

    一.部署集群基础环境 目标: 本案例要求为MySQL集群准备基础环境,完成以下任务操作: 数据库授权 部署MySQL双主多从结构 配置本机hosts解析记录 方案: 使用4台RHEL 6虚拟机,如下图 ...

随机推荐

  1. 蓝牙mesh组网实践(手机配网例程改低功耗)

    目录 在22年7月版本的CH583EVT更新之后,582芯片的adv_vendor_self_provision_with_peripheral例程,适配了wch mesh手机app,支持了OTA,成 ...

  2. Pycharm去除白色波浪线

  3. Redis哨兵模式+缓存穿透、击穿和雪崩

    一.哨兵模式概述(自动选主机的方式)主从切换技术:当主机宕机后,需要手动把一台从(slave)服务器切换为主服务器,这就需要人工干预,费时费力,还回造成一段时间内服务不可用,所以推荐哨兵架构(Sent ...

  4. 关于oracle中scott用户恢复到初始状态的步骤,和一些问题解决方法。

    一般恢复步骤: sqlplus 连接到sys用户sqlplus / as sysdba运行脚本进行初始恢复start ?/rdbms/admin/utlsampl.sql;恢复后,用户为锁定状态,需要 ...

  5. NextCloud 17.0.1 升级到NextCloud 23.0.0

    NextCloud 版本过低使用时间过长,想升级一下. 问题记录及参考文档 本次采用离线升级(在线不能下载) 官网下载https://nextcloud.com/install/#  23.0.0最新 ...

  6. Java基础-注释、标识符和关键字、数据类型及拓展

    注释 单行注释// 多行注释/* */ 文档注释/** */ 标识符 Java所有的组成部分都需要名字.类名.变量名及方法名都被成为标识符 关键字 数据类型 强类型语言(安全性高,java) 要求变量 ...

  7. node 版本管理器 nvs

    node 总是在不断的升级,以前老项目在运行时可能会报错 我遇到了一个 PostCSS received undefined instead of CSS string 查了下可能是node-sass ...

  8. linux下安装jdk8,nginx

    jdk8(官网下载的是jdk-8u231-linux-x64.tar.gz) 1.在/usr/local这路径下建一个jdk的文件夹,将下载好的jdk-8u231-linux-x64.tar.gz上传 ...

  9. VM虚拟机15安装Kali Linux2020版详细教程

    下载kali镜像 kali Linux官网地址https://www.kali.org/downloads/下载相对应的电脑版本 打开Vmware虚拟机 安装虚拟机看物理机配置,尽量不要太折腾电脑(虚 ...

  10. enzyme文档

    Enzyme是一个用于React的JavaScript测试实用程序,它使得更容易断言,操作和遍历您的React组件的输出,它模拟了jQuery的API,非常直观,易于使用和学习. 整理相当API为中文 ...