注:centos8单机版

注:本次实验手动配置密码均为admin

环境准备:配置hosts文件

192.168.116.85为本机IP

echo '192.168.116.85 controller vip myip' >> /etc/hosts
yum upgrade -y

禁用防火墙 与selinux

systemctl disable firewalld  --now
sed -i '/^SELINUX=/s/enforcing/disabled/' /etc/selinux/config
setenforce 0

注:网络组件与NetworkManager冲突,所有网络上需要禁用NetworkManager,启用network服务

# 安装Network服务
dnf install network-scripts -y
# 停用NetworkManager并禁止开机自启
systemctl stop NetworkManager && systemctl disable NetworkManager
# 启用 Network并设置开机自启
systemctl start network && systemctl enable network
#如果启用network报错,执行如下命令
/usr/lib/systemd/systemd-sysv-install enable network

1. 更换yum源

wget http://mirrors.aliyun.com/repo/Centos-8.repo

2. 下载openstack源

yum install  -y centos-release-openstack-ussuri

sed -i 's/^mirrorlist=http:\/\/mirrorlist.centos.org/#mirrorlist=http:\/\/mirrorlist.centos.org/g' /etc/yum.repos.d/C*
sed -i 's/^#baseurl=http:\/\/mirror.centos.org/baseurl=https:\/\/vault.centos.org/g' /etc/yum.repos.d/C*
sed -i 's/gpgcheck=1/gpgcheck=0/g' /etc/yum.repos.d/C* yum config-manager --set-enabled powertools
yum install -y python3-openstackclient
yum install -y openstack-selinux

3. 本地数据库配置

  • bind-address=127.0.0.1 #只允许本机访问。
  • bind-address=某个网卡的ip #例如bind-address=192.168.116.85,只能通过ip为192.168.116.85的网卡访问。
  • bind-address=0.0.0.0 #此规则是系统默认配置,监听所有网卡,即允许所有ip访问。
yum install -y mariadb mariadb-server python3-PyMySQL

cat > /etc/my.cnf.d/openstack.cnf << EOF
[mysqld]
bind-address = 192.168.116.85
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
EOF

systemctl enable mariadb --now

mysql_secure_installation

4. 配置rabbitmq

  遇到报错:缺libSDL2,erlang安装失败,rabbitmq安装失败;

  尝试单独下载erlang源,再次安装,依然失败:curl -s https://packagecloud.io/install/repositories/rabbitmq/erlang/script.rpm.sh|sh;失败后删除本操作产生的额外erlang源

  解决:wget https://pkgs.dyn.su/el8/extras/x86_64/SDL2-2.0.14-5.el8.x86_64.rpm;yum -y install SDL2-2.0.14-5.el8.x86_64.rpm   或yum -y upgrade试试

yum install -y rabbitmq-server
systemctl enable rabbitmq-server --now rabbitmqctl add_user openstack openstack

rabbitmqctl set_permissions openstack ".*" ".*" ".*"

5. 配置memcached

注:192.168.116.8为本机IP

yum install -y memcached python3-memcached

sed -i 's/OPTIONS=".*"/OPTIONS="-l 127.0.0.1,::1,192.168.116.85"/' /etc/sysconfig/memcached

systemctl enable memcached --now

6. 配置etcd

单节点可不部署

yum install -y etcd

7. 配置keystone

报错:
openstack token issue
Failed to discover available identity versions when contacting http://vip:5000/v3. Attempting to parse version from URL.
Unexpected exception for http://vip:5000/v3/auth/tokens: Failed to parse: http://vip:5000/v3/auth/tokens

解决:
yum -y upgrade

  

mysql -uroot -pAdmin123! -e'create database if not exists keystone;
grant all privileges on keystone.* to keystone@localhost identified by "keystone";
grant all privileges on keystone.* to keystone@"%" identified by "keystone";
flush privileges;'
yum -y install openstack-keystone httpd python3-mod_wsgi
sed -i -e '/^\[database\]/a connection \= mysql\+pymysql\:\/\/keystone:keystone\@vip\/keystone' -e '/^\[token\]/a provider \= fernet' /etc/keystone/keystone.conf
su -s /bin/sh -c "keystone-manage db_sync" keystone

keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
keystone-manage bootstrap --bootstrap-password admin   --bootstrap-admin-url http://vip:5000/v3/   --bootstrap-internal-url http://vip:5000/v3/   --bootstrap-public-url http://vip:5000/v3/   --bootstrap-region-id RegionOne

sed -i '/^\#ServerName/i ServerName 192.168.116.85' /etc/httpd/conf/httpd.conf
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
systemctl restart httpd
systemctl enable httpd --now
cat > openstack-admin.sh << EOF
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://vip:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
EOF source openstack-admin.sh

openstack domain create --description "An Example Domain" example #测试,创建域
openstack token issue #检错
openstack domain list #查看
openstack domain set  example --disable #禁用
openstack domain delete example #删除
openstack project create --domain default --description "Service Project" service
openstack project create --domain default --description "Demo Project" demo
openstack user create --domain default --password-prompt demo
openstack role create user
openstack role add --project demo --user demo user

8. 配置glance

mysql -uroot  -pAdmin123! -e'create database if not exists glance;
grant all privileges on glance.* to glance@localhost identified by "glance";
grant all privileges on glance.* to glance@"%" identified by "glance";
flush privileges;'
openstack user create --domain default --password-prompt glance

openstack role add --project service --user glance admin
openstack service create --name glance --description "OpenStack Image" image
openstack endpoint create --region RegionOne image public http://vip:9292
openstack endpoint create --region RegionOne image internal http://vip:9292
openstack endpoint create --region RegionOne image admin http://vip:9292
yum -y install openstack-glance

sed -i '/^\[database\]/a connection = mysql\+pymysql\:\/\/glance:glance\@vip\/glance' /etc/glance/glance-api.conf
sed -i '/^\[glance_store\]/a stores = file,http \ndefault_store = file \nfilesystem_store_datadir = /var/lib/glance/images/' /etc/glance/glance-api.conf
sed -i '/^\[keystone_authtoken\]/a www_authenticate_uri = http://vip:5000\nauth_url = http://vip:5000 \nmemcached_servers = 192.168.116.85:11211 \nauth_type = password \nproject_domain_name = Default \nuser_domain_name = Default \nproject_name = service \nusername = glance \npassword = admin' /etc/glance/glance-api.conf
sed -i '/^\[paste_deploy\]/a flavor = keystone' /etc/glance/glance-api.conf
su -s /bin/sh -c "glance-manage db_sync" glance

systemctl enable openstack-glance-api.service --now

9. 配置placement

mysql -uroot -pAdmin123! -e'create database placement;
grant all privileges on placement.* to placement@localhost identified by "placement";
grant all privileges on placement.* to placement@"%" identified by "placement";
flush privileges;'

openstack user create --domain default --password-prompt placement

openstack role add --project service --user placement admin
openstack service create --name placement --description "Placement API" placement
openstack endpoint create --region RegionOne placement public http://vip:8778
openstack endpoint create --region RegionOne placement internal http://vip:8778
openstack endpoint create --region RegionOne placement admin http://vip:8778
yum install -y openstack-placement-api

sed -i '/^\[placement_database\]/a connection = mysql+pymysql://placement:placement@vip/placement' /etc/placement/placement.conf
sed -i '/^\[api\]/a auth_strategy = keystone' /etc/placement/placement.conf
sed -i '/^\[keystone_authtoken\]/a www_authenticate_uri = http://vip:5000 \nauth_url = http://vip:5000/v3 \nmemcached_servers = 192.168.116.85:11211 \nauth_type = password \nproject_domain_name = Default \nuser_domain_name = Default \nproject_name = service \nusername = placement \npassword = admin' /etc/placement/placement.conf
su -s /bin/sh -c "placement-manage db sync" placement
sed -i  '/<\/VirtualHost>/i <Directory /usr/bin> \n   <IfVersion >= 2.4> \n      Require all granted \n   </IfVersion> \n   <IfVersion < 2.4> \n      Order allow,deny \n      Allow from all \n   </IfVersion> \n</Directory> ' /etc/httpd/conf.d/00-placement-api.conf

systemctl restart httpd

10. 配置nova

mysql -uroot -pAdmin123! -e"
create database nova_api;
create database nova;
create database nova_cell0;
grant all privileges on nova_api.* to 'nova'@'localhost' identified by 'nova';
grant all privileges on nova_api.* to 'nova'@'%' identified by 'nova';
grant all privileges on nova.* to 'nova'@'localhost' identified by 'nova';
grant all privileges on nova.* to 'nova'@'%' identified by 'nova';
grant all privileges on nova_cell0.* to 'nova'@'localhost' identified by 'nova';
grant all privileges on nova_cell0.* to 'nova'@'%' identified by 'nova';
flush privileges;"
openstack user create --domain default --password-prompt nova

openstack role add --project service --user nova admin
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region RegionOne compute public http://vip:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://vip:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://vip:8774/v2.1
yum install -y openstack-nova-api openstack-nova-conductor   openstack-nova-novncproxy openstack-nova-scheduler

sed -i '/^\[DEFAULT\]/a enabled_apis = osapi_compute,metadata \ntransport_url = rabbit://openstack:openstack@vip:5672/ \nmy_ip = 192.168.116.85' /etc/nova/nova.conf
sed -i '/^\[api\]/a auth_strategy = keystone' /etc/nova/nova.conf
sed -i '/^\[api_database\]/a connection = mysql+pymysql://nova:nova@vip/nova_api' /etc/nova/nova.conf
sed -i '/^\[database\]/a connection = mysql+pymysql://nova:nova@vip/nova' /etc/nova/nova.conf
sed -i '/^\[glance\]/a api_servers = http://vip:9292' /etc/nova/nova.conf
sed -i '/^\[keystone_authtoken\]/a www_authenticate_uri = http://vip:5000/ \nauth_url = http://vip:5000/ \nmemcached_servers = 192.168.116.85:11211 \nauth_type = password \nproject_domain_name = Default \nuser_domain_name = Default \nproject_name = service \nusername = nova \npassword = admin' /etc/nova/nova.conf
sed -i '/^\[oslo_concurrency\]/a lock_path = /var/lib/nova/tmp' /etc/nova/nova.conf
sed -i '/^\[placement\]/a region_name = RegionOne \nproject_domain_name = Default \nproject_name = service \nauth_type = password \nuser_domain_name = Default \nauth_url = http://vip:5000/v3 \nusername = placement \npassword = admin' /etc/nova/nova.conf
sed -i '/^\[vnc\]/a enabled = true \nserver_listen = $my_ip \nserver_proxyclient_address = $my_ip' /etc/nova/nova.conf
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova

验证:su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
systemctl enable openstack-nova-api openstack-nova-scheduler  openstack-nova-conductor openstack-nova-novncproxy
systemctl restart openstack-nova-api openstack-nova-scheduler openstack-nova-conductor openstack-nova-novncproxy

实验的时候发现了一个很好用的ini配置文件编辑工具:crudini

[libvirt]
#virt_type = kvm #物理机配置openstack
#virt_type = qemu #虚拟机配置openstack
####(官方:虚拟机必须配置libvirt为使用qemu而不是kvm。)####

  

yum install -y openstack-nova-compute
yum install -y crudini crudini --set /etc/nova/nova.conf vnc server_listen '0.0.0.0'
crudini --set /etc/nova/nova.conf vnc novncproxy_base_url http://VIP:6080/vnc_auto.html
crudini --set /etc/nova/nova.conf libvirt virt_type qemu
discover_hosts_in_cells_interval = 300
先启动:
systemctl restart libvirtd-tcp.socket
再启动:
systemctl enable libvirtd openstack-nova-compute
systemctl restart libvirtd openstack-nova-compute
openstack compute service list --service nova-compute

su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova

验证:
openstack compute service list
openstack catalog list
openstack image list
nova-status upgrade check

11. neutron配置

mysql -uroot -pAdmin123! -e'create database if not exists neutron;
grant all privileges on neutron.* to neutron@localhost identified by "neutron";
grant all privileges on neutron.* to neutron@"%" identified by "neutron";
flush privileges;'
openstack user create --domain default --password-prompt neutron

openstack role add --project service  --user neutron admin
openstack service create --name neutron --description "OpenStack NetWorking" network
openstack endpoint create --region RegionOne network public http://vip:9696
openstack endpoint create --region RegionOne network internal http://vip:9696
openstack endpoint create --region RegionOne network admin http://vip:9696
yum install -y openstack-neutron openstack-neutron-ml2   openstack-neutron-linuxbridge ebtables ipset iproute

crudini --set /etc/neutron/neutron.conf DEFAULT core_plugin ml2
crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins router
crudini --set /etc/neutron/neutron.conf DEFAULT transport_url rabbit://openstack:openstack@vip
crudini --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
crudini --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_status_changes true
crudini --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_data_changes true
crudini --set /etc/neutron/neutron.conf database connection mysql+pymysql://neutron:neutron@vip/neutron
crudini --set /etc/neutron/neutron.conf keystone_authtoken www_authenticate_uri http://vip:5000
crudini --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://VIP:5000
crudini --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers 192.168.116.85:11211
crudini --set /etc/neutron/neutron.conf keystone_authtoken auth_type password
crudini --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name default
crudini --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name default
crudini --set /etc/neutron/neutron.conf keystone_authtoken project_name service
crudini --set /etc/neutron/neutron.conf keystone_authtoken username neutron
crudini --set /etc/neutron/neutron.conf keystone_authtoken password admin
crudini --set /etc/neutron/neutron.conf nova auth_url http://vip:5000
crudini --set /etc/neutron/neutron.conf nova auth_type password
crudini --set /etc/neutron/neutron.conf nova project_domain_name default
crudini --set /etc/neutron/neutron.conf nova user_domain_name default
crudini --set /etc/neutron/neutron.conf nova region_name RegionOne
crudini --set /etc/neutron/neutron.conf nova project_name service
crudini --set /etc/neutron/neutron.conf nova username nova
crudini --set /etc/neutron/neutron.conf nova password admin
crudini --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers flat,vlan,vxlan
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers linuxbridge,l2population
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_flat flat_networks provider
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_vxlan vni_ranges 1:1000
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup enable_ipset true crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:ens192
crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan true
crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan local_ip 192.168.116.85
crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan l2_population true
crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group true
crudini --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

crudini --set /etc/neutron/l3_agent.ini DEFAULT interface_driver linuxbridge

crudini --set /etc/neutron/dhcp_agent.ini DEFAULT interface_driver linuxbridge
crudini --set /etc/neutron/dhcp_agent.ini DEFAULT dhcp_driver neutron.agent.linux.dhcp.Dnsmasq
crudini --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata true crudini --set /etc/neutron/metadata_agent.ini DEFAULT nova_metadata_host 192.168.116.85
crudini --set /etc/neutron/metadata_agent.ini DEFAULT metadata_proxy_shared_secret METADATA_SECRET crudini --set /etc/nova/nova.conf neutron auth_url http://vip:5000
crudini --set /etc/nova/nova.conf neutron auth_type password
crudini --set /etc/nova/nova.conf neutron project_domain_name default
crudini --set /etc/nova/nova.conf neutron user_domain_name default
crudini --set /etc/nova/nova.conf neutron region_name RegionOne
crudini --set /etc/nova/nova.conf neutron project_name service
crudini --set /etc/nova/nova.conf neutron username neutron
crudini --set /etc/nova/nova.conf neutron password admin
crudini --set /etc/nova/nova.conf neutron service_metadata_proxy true
crudini --set /etc/nova/nova.conf neutron metadata_proxy_shared_secret METADATA_SECRET
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
systemctl restart openstack-nova-api
systemctl enable neutron-server neutron-linuxbridge-agent neutron-dhcp-agent neutron-metadata-agent
systemctl start neutron-server neutron-linuxbridge-agent neutron-dhcp-agent neutron-metadata-agent
systemctl enable neutron-l3-agent
systemctl start neutron-l3-agent

12. cinder配置

mysql -uroot -pAdmin123! -e'create database if not exists cinder;
grant all privileges on cinder.* to cinder@localhost identified by "cinder";
grant all privileges on cinder.* to cinder@"%" identified by "cinder";
flush privileges;'
openstack user create --domain default --password-prompt cinder

openstack role add --project service --user cinder admin
openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3 openstack endpoint create --region RegionOne volumev2 public http://vip:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 internal http://vip:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 admin http://vip:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 public http://vip:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 internal http://vip:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 admin http://vip:8776/v3/%\(project_id\)s
yum install -y openstack-cinder

crudini --set /etc/cinder/cinder.conf DEFAULT transport_url rabbit://openstack:openstack@vip
crudini --set /etc/cinder/cinder.conf DEFAULT auth_strategy keystone
crudini --set /etc/cinder/cinder.conf DEFAULT my_ip 192.168.116.85
crudini --set /etc/cinder/cinder.conf database connection mysql+pymysql://cinder:cinder@vip/cinder
crudini --set /etc/cinder/cinder.conf oslo_concurrency lock_path /var/lib/cinder/tmp
crudini --set /etc/cinder/cinder.conf keystone_authtoken www_authenticate_uri http://vip:5000
crudini --set /etc/cinder/cinder.conf keystone_authtoken auth_url http://vip:5000
crudini --set /etc/cinder/cinder.conf keystone_authtoken memcached_servers 192.168.116.85:11211
crudini --set /etc/cinder/cinder.conf keystone_authtoken auth_type password
crudini --set /etc/cinder/cinder.conf keystone_authtoken project_domain_name default
crudini --set /etc/cinder/cinder.conf keystone_authtoken user_domain_name default
crudini --set /etc/cinder/cinder.conf keystone_authtoken project_name service
crudini --set /etc/cinder/cinder.conf keystone_authtoken username cinder
crudini --set /etc/cinder/cinder.conf keystone_authtoken password admin
su -s /bin/sh -c "cinder-manage db sync" cinder
crudini --set /etc/nova/nova.conf cinder os_region_name RegionOne systemctl restart openstack-nova-api
systemctl enable openstack-cinder-api openstack-cinder-scheduler
systemctl start openstack-cinder-api openstack-cinder-scheduler

存储

#yum install -y lvm2 device-mapper-persistent-data 

#systemctl enable lvm2-lvmetad
#systemctl start lvm2-lvmetad vgcreate cinder-volumes /dev/sdb sed -i '/sysfs_scan =/i \\tfilter = [ "a/sdb/", "r/.*/"]' /etc/lvm/lvm.conf crudini --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm
crudini --set /etc/cinder/cinder.conf DEFAULT glance_api_servers http://vip:9292 crudini --set /etc/cinder/cinder.conf lvm volume_driver cinder.volume.drivers.lvm.LVMVolumeDriver
crudini --set /etc/cinder/cinder.conf lvm volume_group cinder-volumes
crudini --set /etc/cinder/cinder.conf lvm target_protocol iscsi
crudini --set /etc/cinder/cinder.conf lvm target_helper lioadm systemctl enable openstack-cinder-volume target
systemctl start openstack-cinder-volume target
crudini --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm
crudini --set /etc/cinder/cinder.conf lvm target_helper lioadm
crudini --set /etc/cinder/cinder.conf lvm target_protocol iscsi
crudini --set /etc/cinder/cinder.conf lvm target_ip_address 192.168.116.85
crudini --set /etc/cinder/cinder.conf lvm volume_group vg_volume01
crudini --set /etc/cinder/cinder.conf lvm volume_driver cinder.volume.drivers.lvm.LVMVolumeDriver
crudini --set /etc/cinder/cinder.conf lvm volumes_dir $state_path/volumes
systemctl restart openstack-cinder-volume yum install -y openstack-cinder
yum install -y nfs-utils sed -i '/^#Domain =/a Domain = srv.world' /etc/idmapd.conf crudini --set /etc/cinder/cinder.conf DEFAULT backup_driver cinder.backup.drivers.nfs.NFSBackupDriver
crudini --set /etc/cinder/cinder.conf DEFAULT backup_mount_point_base /var/lib/cinder/backup_nfs
crudini --set /etc/cinder/cinder.conf DEFAULT backup_share nfs.srv.world:/var/lib/cinder-backup systemctl enable openstack-cinder-backup
systemctl start openstack-cinder-backup chown -R cinder. /var/lib/cinder/backup_nfs openstack volume backup create --name bk-disk_nfs-01 --incremental --force disk_nfs
openstack volume backup restore bk-disk_nfs-01 disk_nfs

openstack单机部署 未完成的更多相关文章

  1. 在虚拟机单机部署OpenStack Grizzly

    安装过程 安装Ubuntu 我手头有的是Ubuntu Server 12.04 64位版,就直接用了,默认安装即可,配置的时候很简单,如下 内存:1G 硬盘:20G 处理器:2 网络:NAT 装好以后 ...

  2. openstack_swift源代码分析——Swift单机部署

    本文对在单机部署swift 当中每个细节做具体的介绍,并对配置做对应的解释 PC物理机    Ubuntu-12.04-desktop-64位 Swift 版本号:1.13.1 Swift-clien ...

  3. 深入理解Openstack自动化部署

    前言 说实话,看到自己在博客园的排名感到惭愧,因为自己最近两年没有持续地在博客园上写技术博客了,有人私下问我是不是荒废了?翻翻15年和16年的博客,真的是少的可怜.一方面的确由于岗位的变化,导致了工作 ...

  4. Hadoop系列之(一):Hadoop单机部署

    1. Hadoop介绍 Hadoop是一个能够对海量数据进行分布式处理的系统架构. Hadoop框架的核心是:HDFS和MapReduce. HDFS分布式文件系统为海量的数据提供了存储, MapRe ...

  5. OpenStack Havana 部署在Ubuntu 12.04 Server 【OVS+GRE】(三)——计算节点的安装

    序:OpenStack Havana 部署在Ubuntu 12.04 Server [OVS+GRE] 计算节点: 1.准备结点 安装好ubuntu 12.04 Server 64bits后,进入ro ...

  6. OpenStack Havana 部署在Ubuntu 12.04 Server 【OVS+GRE】(二)——网络节点的安装

    序:OpenStack Havana 部署在Ubuntu 12.04 Server [OVS+GRE] 网络节点: 1.安装前更新系统 安装好ubuntu 12.04 Server 64bits后,进 ...

  7. OpenStack Havana 部署在Ubuntu 12.04 Server 【OVS+GRE】(一)——控制节点的安装

      序:OpenStack Havana 部署在Ubuntu 12.04 Server [OVS+GRE] 控制节点: 1.准备Ubuntu 安装好Ubuntu12.04 server 64bits后 ...

  8. OpenStack Havana 部署在Ubuntu 12.04 Server 【OVS+GRE】——序

    OpenStack Havana 部署在Ubuntu 12.04 Server [OVS+GRE](一)——控制节点的安装 OpenStack Havana 部署在Ubuntu 12.04 Serve ...

  9. OpenStack安装部署管理中常见问题解决方法

    一.网络问题-network 更多网络原理机制可以参考<OpenStack云平台的网络模式及其工作机制>. 1.1.控制节点与网络控制器区别 OpenStack平台中有两种类型的物理节点, ...

  10. Ecstore安装篇-2.单机部署【linux】

    单机部署实施-linux 单机部署实施-linux author :James,jimingsong@vip.qq.com since :2015-03-02 系统环境需求 软件来源 底层依赖 1. ...

随机推荐

  1. KingbaseES V8R6 账号异常登录锁定案例

    数据库版本: test=> select version(); version --------------------------------------------------------- ...

  2. MySQL建表语句生成Golang代码

    1. 背景 对于后台开发新的需求时,一般会先进行各种表的设计,写各个表的建表语句 然后根据建立的表,写对应的model代码.基础的增删改查代码(基础的增删改查服务可以划入DAO(Data Access ...

  3. DFS文件夹无法访问

    最近DFS的文件服务器出现了部分文件和文件夹无法访问的情况.客户端直接访问DFS成员的共享文件夹时有是会出现Element not found的错误.有时打开文件的时候会出现文件不存在,或者你没有权限 ...

  4. flutter系列之:flutter中常用的Stack layout详解

    [toc] 简介 对于现代APP的应用来说,为了更加美观,通常会需要用到不同图像的堆叠效果,比如在一个APP用户背景头像上面添加一个按钮,表示可以修改用户信息等. 要实现这样的效果,我们需要在一个Im ...

  5. 题解 P2471 【[SCOI2007]降雨量】

    原题传送门 前置芝士 离散化 ST表和RMQ问题 二分 正文 首先我们来分析一下题意. 题目会给出两个大小为 \(n\) 的数组,\(y\) 和 \(r\) ,其中 \(y_i\) 表示第 \(i\) ...

  6. Kubernetes DevOps: Harbor

    Harbor 是一个 CNCF 基金会托管的开源的可信的云原生 docker registry 项目,可以用于存储.签名.扫描镜像内容,Harbor 通过添加一些常用的功能如安全性.身份权限管理等来扩 ...

  7. 使用docker-compose部署WordPress项目

    创建空文件夹 假设新建一个名为 wordpress 的文件夹,然后进入这个文件夹. 创建 docker-compose.yml 文件 docker-compose.yml 文件将开启一个 wordpr ...

  8. Python实现给图片加水印功能

    前言 最近忙得连轴转,很久没更新博客了,代码倒是没啥写,积累了好些东西,接下来一有时间就来更新吧~ 本文记录使用Python实现给图片添加水印的功能实现过程 先看效果 把公众号的封面作为素材 原图是这 ...

  9. InetAddress.getLocalHost() 执行很慢?

    背景介绍 某次在 SpringBoot 2.2.0 项目的一个配置类中引入了这么一行代码: InetAddress.getLocalHost().getHostAddress() 导致项目启动明显变慢 ...

  10. 追求性能极致:Redis6.0的多线程模型

    Redis系列1:深刻理解高性能Redis的本质 Redis系列2:数据持久化提高可用性 Redis系列3:高可用之主从架构 Redis系列4:高可用之Sentinel(哨兵模式) Redis系列5: ...