注:1.volume_driver = cinder.volume.drivers.rbd.RBDDriver r和/usr/lib/python2.7/site-packages/cinder/volume/drivers/rbd.py是对应的
[root@node1 ~]# systemctl restart openstack-cinder-scheduler openstack-cinder-api
![]()
3.Nova配置
配置ceph.conf
# 如果需要从ceph rbd中启动虚拟机,必须将ceph配置为nova的临时后端;
# 推荐在计算节点的配置文件中启用rbd cache功能;
# 为了便于故障排查,配置admin socket参数,这样每个使用ceph rbd的虚拟机都有1个socket将有利于虚拟机性能分析与故障解决;
# 相关配置只涉及全部计算节点ceph.conf文件的[client]与[client.cinder]字段,以compute01节点为例
[root@node3 ~]# vim /etc/ceph/ceph.conf
[client]
rbd cache = true
rbd cache writethrough until flush = true
admin socket = /var/run/ceph/guests/$cluster-$type.$id.$pid.$cctid.asok
log file = /var/log/qemu/qemu-guest-$pid.log
rbd concurrent management ops = 20
[client.cinder]
keyring = /etc/ceph/ceph.client.cinder.keyring
# 创建ceph.conf文件中指定的socker与log相关的目录,并更改属主
[root@node3~]# mkdir -p /var/run/ceph/guests/ /var/log/qemu/
[root@node3 ~]# chown qemu:libvirt /var/run/ceph/guests/ /var/log/qemu/
注:生产环境发现/var/run/ceph/guests目录老是会在服务器重启后消失,并导致计算节点不可用(无法创建和删除云主机),所以我在下方写了一个定时检测并创建/var/run/ceph/guests/目录的任务
echo '*/3 * * * * root if [ ! -d /var/run/ceph/guests/ ] ;then mkdir -pv /var/run/ceph/guests/ /var/log/qemu/ && chown qemu:libvirt /var/run/ceph/guests/ /var/log/qemu/ && systemctl restart libvirtd.service openstack-nova-compute.service ;fi' >>/etc/crontab
# 在全部计算节点配置nova后端使用ceph集群的vms池,以node3节点为例
[root@node3 ~]# vim /etc/nova/nova.conf
[libvirt]
images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = cinder
# uuid前后一致
rbd_secret_uuid = 29355b97-1fd8-4135-a26e-d7efeaa27b0a
disk_cachemodes="network=writeback"
live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"# 禁用文件注入
inject_password = false
inject_key = false
inject_partition = -2
# 虚拟机临时root磁盘discard功能,”unmap”参数在scsi接口类型磁盘释放后可立即释放空间
hw_disk_discard = unmap
# 原有配置
virt_type=kvm
[root@node3 ~]# cat /etc/nova/nova.conf
[DEFAULT]
cpu_allocation_ratio=8
ram_allocation_ratio=2
disk_allocation_ratio=2
resume_guests_state_on_host_boot=true
reserved_host_disk_mb=20480
baremetal_enabled_filters=RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ExactRamFilter,ExactDiskFilter,ExactCoreFilter
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:openstack@10.30.1.208
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[api_database]
[barbican]
[cache]
[cells]
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[crypto]
[database]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://10.30.1.208:9292
[guestfs]
[healthcheck]
[hyperv]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
auth_uri = http://10.30.1.208:5000
auth_url = http://10.30.1.208:35357
memcached_servers = 10.30.1.208:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova
[libvirt]
images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = cinder
rbd_secret_uuid = 29355b97-1fd8-4135-a26e-d7efeaa27b0a
live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"
inject_password = false
inject_key = false
inject_partition = -2
hw_disk_discard = unmap
virt_type=kvm
[matchmaker_redis]
[metrics]
[mks]
[neutron]
url = http://10.30.1.208:9696
auth_url = http://10.30.1.208:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path=/var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://10.30.1.208:35357/v3
username = placement
password = placement
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[trusted_computing]
[upgrade_levels]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = 10.30.1.203
novncproxy_base_url = http://10.30.1.208:6080/vnc_auto.html
[workarounds]
[wsgi]
[xenserver]
[xvp]
重启nova服务
计算节点
[root@node3 ~]# systemctl restart libvirtd.service openstack-nova-compute.service
控制节点
[root@node1 ~]# systemctl restart openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service
注:重启nova服务后最好查看服务启动是否正常,如果openstack-nova-compute服务启动异常可以通过查看/var/log/nova/nova-compute.log日志排查
systemctl status libvirtd.service openstack-nova-compute.service
配置live-migration
修改/etc/libvirt/libvirtd.conf
# 在全部计算节点操作,以compute01节点为例;
# 以下给出libvirtd.conf文件的修改处所在的行num
[root@node3 ~]# egrep -vn "^$|^#" /etc/libvirt/libvirtd.conf
# 取消以下三行的注释22:listen_tls = 0
33:listen_tcp = 1
45:tcp_port = "16509"# 取消注释,并修改监听端口55:listen_addr = "172.30.200.41"# 取消注释,同时取消认证158:auth_tcp = "none"
修改/etc/sysconfig/libvirtd
# 在全部计算节点操作,以compute01节点为例;
# 以下给出libvirtd文件的修改处所在的行num
[root@node3 ~]# egrep -vn "^$|^#" /etc/sysconfig/libvirtd
# 取消注释9:LIBVIRTD_ARGS="--listen"
设置iptables
# live-migration时,源计算节点主动连接目的计算节点tcp16509端口,可以使用”virsh -c qemu+tcp://{node_ip or node_name}/system”连接目的计算节点测试;
# 迁移前后,在源目计算节点上的被迁移instance使用tcp49152~49161端口做临时通信;
# 因虚拟机已经启用iptables相关规则,此时切忌随意重启iptables服务,尽量使用插入的方式添加规则;
# 同时以修改配置文件的方式写入相关规则,切忌使用”iptables saved”命令;
# 在全部计算节点操作,以compute01节点为例
[root@node3 ~]# iptables -I INPUT -p tcp -m state --state NEW -m tcp --dport 16509 -j ACCEPT
[root@node3 ~]# iptables -I INPUT -p tcp -m state --state NEW -m tcp --dport 49152:49161 -j ACCEPT
重启服务
# libvirtd与nova-compute服务都需要重启
[root@node3 ~]# systemctl restart libvirtd.service openstack-nova-compute.service
# 查看服务
[root@node3 ~]# netstat -tunlp | grep 16509
tcp 0 0 10.30.1.203:16509 0.0.0.0:* LISTEN 9229/libvirtd
验证
如果使用ceph提供的volume做启动盘,即虚拟机运行镜像文件存放在共享存储上,此时可以方便地进行live-migration。
1)创建基于ceph存储的bootable存储卷
# 当nova从rbd启动instance时,镜像格式必须是raw格式,否则虚拟机在启动时glance-api与cinder均会报错;
# 首先进行格式转换,将*.img文件转换为*.raw文件
[root@node1 ~]# qemu-img convert -f qcow2 cirros-0.3.5-x86_64-disk.img -O raw cirros-0.3.5-x86_64-disk.raw
注:如果原来的镜像较大,我们在转换的过程中最好进行压缩,示例如下
virt-sparsify -x centos7.5.qcow2 --convert raw centos7.5.raw
# 生成raw格式镜像
[root@node1 ~]# openstack image create "cirros3.5" --file cirros-0.3.5-x86_64-disk.raw --disk-format raw --container-format bare --public
+------------------+----------------------------------------------------------------------------------------------------------+
| Field | Value |
+------------------+----------------------------------------------------------------------------------------------------------+
| checksum | 4bda4108d1a74dd73a6ae6d0ba369916 |
| container_format | bare |
| created_at | 2020-02-05T07:09:47Z |
| disk_format | raw |
| file | /v2/images/1333fc90-d9a6-4df8-9a8b-391b035770c0/file |
| id | 1333fc90-d9a6-4df8-9a8b-391b035770c0 |
| min_disk | 0 |
| min_ram | 0 |
| name | cirros3.5 |
| owner | 75aed7016c86445198356e78dddde4ba |
| properties | direct_url='rbd://272905d2-fd66-4ef6-a772-9cd73a274683/images/1333fc90-d9a6-4df8-9a8b-391b035770c0/snap' |
| protected | False |
| schema | /v2/schemas/image |
| size | 41126400 |
| status | active |
| tags | |
| updated_at | 2020-02-05T07:10:09Z |
| virtual_size | None |
| visibility | public |
+------------------+----------------------------------------------------------------------------------------------------------+
[root@node5 ~]# rbd ls images
1333fc90-d9a6-4df8-9a8b-391b035770c0
61fcdc23-5e82-4fac-9816-e7b93781188d
# 使用新镜像创建bootable卷
[root@node1 ~]# cinder create --image-id 1333fc90-d9a6-4df8-9a8b-391b035770c0 --volume-type ceph --name ceph-bootable1 2
+--------------------------------+--------------------------------------+
| Property | Value |
+--------------------------------+--------------------------------------+
| attachments | [] |
| availability_zone | nova |
| bootable | false |
| consistencygroup_id | None |
| created_at | 2020-02-05T09:44:50.000000 |
| description | None |
| encrypted | False |
| id | d5e8af24-7a9e-4509-82b8-6a2999aeeeb2 |
| metadata | {} |
| migration_status | None |
| multiattach | False |
| name | ceph-bootable1 |
| os-vol-host-attr:host | None |
| os-vol-mig-status-attr:migstat | None |
| os-vol-mig-status-attr:name_id | None |
| os-vol-tenant-attr:tenant_id | 75aed7016c86445198356e78dddde4ba |
| replication_status | None |
| size | 2 |
| snapshot_id | None |
| source_volid | None |
| status | creating |
| updated_at | None |
| user_id | 51ffe09d0ed342f4bf4e443e454055cc |
| volume_type | ceph |
+--------------------------------+--------------------------------------+
# 查看新创建的bootable卷
[root@node1 ~]# cinder list
+--------------------------------------+-----------+----------------+------+-------------+----------+-------------+
| ID | Status | Name | Size | Volume Type | Bootable | Attached to |
+--------------------------------------+-----------+----------------+------+-------------+----------+-------------+
| d5e8af24-7a9e-4509-82b8-6a2999aeeeb2 | available | ceph-bootable1 | 2 | ceph | true | |
| f36bba2d-8a8b-46b9-ab0c-8e0d0d62ed04 | available | ceph-v1 | 20 | ceph | false | |
+--------------------------------------+-----------+----------------+------+-------------+----------+-------------+
# 从基于ceph后端的volumes新建实例;
# “--boot-volume”指定具有”bootable”属性的卷,启动后,虚拟机运行在volumes卷
[root@node1 ~]# openstack flavor list
+--------------------------------------+------+------+------+-----------+-------+-----------+
| ID | Name | RAM | Disk | Ephemeral | VCPUs | Is Public |
+--------------------------------------+------+------+------+-----------+-------+-----------+
| bc3229e5-4614-48c3-943f-6a7783bcbbfc | 1c1g | 1024 | 20 | 0 | 1 | True |
| f4063a6c-b185-4bad-92d7-938ddb209553 | 1c2g | 2024 | 40 | 0 | 1 | False |
+--------------------------------------+------+------+------+-----------+-------+-----------+
[root@node1 ~]# openstack network list
+--------------------------------------+---------+--------------------------------------+
| ID | Name | Subnets |
+--------------------------------------+---------+--------------------------------------+
| 5ac5c948-909f-47ff-beba-a2ffaf917c5f | vlan99 | bbd536c6-a975-4841-8082-35b28de16ef0 |
| 98f5d807-80e0-48a3-9f40-97eb6ed15f33 | vlan809 | ffc3c430-e551-4c78-be5e-52e6aaf1484d |
+--------------------------------------+---------+--------------------------------------+
[root@node1 ~]# openstack security group list
+--------------------------------------+-----------+------------------------+----------------------------------+
| ID | Name | Description | Project |
+--------------------------------------+-----------+------------------------+----------------------------------+
| 5bb5f2b1-9210-470f-a4a7-2715220b2920 | allow all | | 75aed7016c86445198356e78dddde4ba |
| dc241f97-0099-448f-8be4-8a41f1a6a806 | default | Default security group | 15897818eb0a42a382b75bbeefb14983 |
+--------------------------------------+-----------+------------------------+----------------------------------+
[root@node1 ~]# nova boot --flavor bc3229e5-4614-48c3-943f-6a7783bcbbfc --boot-volume d5e8af24-7a9e-4509-82b8-6a2999aeeeb2 --nic net-id=5ac5c948-909f-47ff-beba-a2ffaf917c5f --security-group 5bb5f2b1-9210-470f-a4a7-2715220b2920 cirros-cephvolumes-instance1
或者
[root@node1 ~]# openstack server create --flavor bc3229e5-4614-48c3-943f-6a7783bcbbfc --volume d5e8af24-7a9e-4509-82b8-6a2999aeeeb2 --network 5ac5c948-909f-47ff-beba-a2ffaf917c5f --security-group 5bb5f2b1-9210-470f-a4a7-2715220b2920 cirros-cephvolumes-instance1
+--------------------------------------+-------------------------------------------------+
| Property | Value |
+--------------------------------------+-------------------------------------------------+
| OS-DCF:diskConfig | MANUAL |
| OS-EXT-AZ:availability_zone | |
| OS-EXT-SRV-ATTR:host | - |
| OS-EXT-SRV-ATTR:hostname | cirros-cephvolumes-instance1 |
| OS-EXT-SRV-ATTR:hypervisor_hostname | - |
| OS-EXT-SRV-ATTR:instance_name | |
| OS-EXT-SRV-ATTR:kernel_id | |
| OS-EXT-SRV-ATTR:launch_index | 0 |
| OS-EXT-SRV-ATTR:ramdisk_id | |
| OS-EXT-SRV-ATTR:reservation_id | r-3n00xb60 |
| OS-EXT-SRV-ATTR:root_device_name | - |
| OS-EXT-SRV-ATTR:user_data | - |
| OS-EXT-STS:power_state | 0 |
| OS-EXT-STS:task_state | scheduling |
| OS-EXT-STS:vm_state | building |
| OS-SRV-USG:launched_at | - |
| OS-SRV-USG:terminated_at | - |
| accessIPv4 | |
| accessIPv6 | |
| adminPass | GdtbyR3G3k62 |
| config_drive | |
| created | 2020-02-05T13:59:38Z |
| description | - |
| flavor:disk | 20 |
| flavor:ephemeral | 0 |
| flavor:extra_specs | {} |
| flavor:original_name | 1c1g |
| flavor:ram | 1024 |
| flavor:swap | 0 |
| flavor:vcpus | 1 |
| hostId | |
| host_status | |
| id | 8611811b-b4e9-4fdc-88b9-1d63427a664d |
| image | Attempt to boot from volume - no image supplied |
| key_name | - |
| locked | False |
| metadata | {} |
| name | cirros-cephvolumes-instance1 |
| os-extended-volumes:volumes_attached | [] |
| progress | 0 |
| security_groups | 5bb5f2b1-9210-470f-a4a7-2715220b2920 |
| status | BUILD |
| tags | [] |
| tenant_id | 75aed7016c86445198356e78dddde4ba |
| updated | 2020-02-05T13:59:38Z |
| user_id | 51ffe09d0ed342f4bf4e443e454055cc |
+--------------------------------------+-------------------------------------------------+
2)从ceph rbd启动虚拟机
# --nic:net-id指网络id,非subnet-id;
# 最后“cirros-cephrbd-instance1”为instance名
nova boot --flavor bc3229e5-4614-48c3-943f-6a7783bcbbfc --image 1333fc90-d9a6-4df8-9a8b-391b035770c0 --nic net-id=5ac5c948-909f-47ff-beba-a2ffaf917c5f --security-group 5bb5f2b1-9210-470f-a4a7-2715220b2920 cirros-cephrbd-instance1
+--------------------------------------+--------------------------------------------------+
| Property | Value |
+--------------------------------------+--------------------------------------------------+
| OS-DCF:diskConfig | MANUAL |
| OS-EXT-AZ:availability_zone | |
| OS-EXT-SRV-ATTR:host | - |
| OS-EXT-SRV-ATTR:hostname | cirros-cephrbd-instance1 |
| OS-EXT-SRV-ATTR:hypervisor_hostname | - |
| OS-EXT-SRV-ATTR:instance_name | |
| OS-EXT-SRV-ATTR:kernel_id | |
| OS-EXT-SRV-ATTR:launch_index | 0 |
| OS-EXT-SRV-ATTR:ramdisk_id | |
| OS-EXT-SRV-ATTR:reservation_id | r-d1kr04ao |
| OS-EXT-SRV-ATTR:root_device_name | - |
| OS-EXT-SRV-ATTR:user_data | - |
| OS-EXT-STS:power_state | 0 |
| OS-EXT-STS:task_state | scheduling |
| OS-EXT-STS:vm_state | building |
| OS-SRV-USG:launched_at | - |
| OS-SRV-USG:terminated_at | - |
| accessIPv4 | |
| accessIPv6 | |
| adminPass | SsERBjVHB849 |
| config_drive | |
| created | 2020-02-05T14:02:52Z |
| description | - |
| flavor:disk | 20 |
| flavor:ephemeral | 0 |
| flavor:extra_specs | {} |
| flavor:original_name | 1c1g |
| flavor:ram | 1024 |
| flavor:swap | 0 |
| flavor:vcpus | 1 |
| hostId | |
| host_status | |
| id | 1bbc59bf-6827-439e-86d3-21eda28d8b43 |
| image | cirros3.5 (1333fc90-d9a6-4df8-9a8b-391b035770c0) |
| key_name | - |
| locked | False |
| metadata | {} |
| name | cirros-cephrbd-instance1 |
| os-extended-volumes:volumes_attached | [] |
| progress | 0 |
| security_groups | 5bb5f2b1-9210-470f-a4a7-2715220b2920 |
| status | BUILD |
| tags | [] |
| tenant_id | 75aed7016c86445198356e78dddde4ba |
| updated | 2020-02-05T14:02:52Z |
| user_id | 51ffe09d0ed342f4bf4e443e454055cc |
+--------------------------------------+--------------------------------------------------+
# 查询生成的instance
[root@node1 ~]# openstack server list
+--------------------------------------+------------------------------+--------+----------------------+-----------+--------+
| ID | Name | Status | Networks | Image | Flavor |
+--------------------------------------+------------------------------+--------+----------------------+-----------+--------+
| 1bbc59bf-6827-439e-86d3-21eda28d8b43 | cirros-cephrbd-instance1 | ACTIVE | vlan99=172.16.99.102 | cirros3.5 | 1c1g |
| 8611811b-b4e9-4fdc-88b9-1d63427a664d | cirros-cephvolumes-instance1 | ACTIVE | vlan99=172.16.99.101 | | 1c1g |
+--------------------------------------+------------------------------+--------+----------------------+-----------+--------+
# 查看生成的instance的详细信息
[root@node1 ~]# nova show 1bbc59bf-6827-439e-86d3-21eda28d8b43
+--------------------------------------+----------------------------------------------------------+
| Property | Value |
+--------------------------------------+----------------------------------------------------------+
| OS-DCF:diskConfig | MANUAL |
| OS-EXT-AZ:availability_zone | nova |
| OS-EXT-SRV-ATTR:host | node4 |
| OS-EXT-SRV-ATTR:hostname | cirros-cephrbd-instance1 |
| OS-EXT-SRV-ATTR:hypervisor_hostname | node4 |
| OS-EXT-SRV-ATTR:instance_name | instance-0000010a |
| OS-EXT-SRV-ATTR:kernel_id | |
| OS-EXT-SRV-ATTR:launch_index | 0 |
| OS-EXT-SRV-ATTR:ramdisk_id | |
| OS-EXT-SRV-ATTR:reservation_id | r-d1kr04ao |
| OS-EXT-SRV-ATTR:root_device_name | /dev/vda |
| OS-EXT-SRV-ATTR:user_data | - |
| OS-EXT-STS:power_state | 1 |
| OS-EXT-STS:task_state | - |
| OS-EXT-STS:vm_state | active |
| OS-SRV-USG:launched_at | 2020-02-05T14:03:15.000000 |
| OS-SRV-USG:terminated_at | - |
| accessIPv4 | |
| accessIPv6 | |
| config_drive | |
| created | 2020-02-05T14:02:52Z |
| description | - |
| flavor:disk | 20 |
| flavor:ephemeral | 0 |
| flavor:extra_specs | {} |
| flavor:original_name | 1c1g |
| flavor:ram | 1024 |
| flavor:swap | 0 |
| flavor:vcpus | 1 |
| hostId | 96c74a94ec1b18dbfd8a3bcda847feeb82a58271b0945688129cde93 |
| host_status | UP |
| id | 1bbc59bf-6827-439e-86d3-21eda28d8b43 |
| image | cirros3.5 (1333fc90-d9a6-4df8-9a8b-391b035770c0) |
| key_name | - |
| locked | False |
| metadata | {} |
| name | cirros-cephrbd-instance1 |
| os-extended-volumes:volumes_attached | [] |
| progress | 0 |
| security_groups | allow all |
| status | ACTIVE |
| tags | [] |
| tenant_id | 75aed7016c86445198356e78dddde4ba |
| updated | 2020-02-05T14:03:15Z |
| user_id | 51ffe09d0ed342f4bf4e443e454055cc |
| vlan99 network | 172.16.99.102 |
+--------------------------------------+----------------------------------------------------------+
# ceph作为后端存储的虚机xml文件查看
[root@node4 ~]# virsh dumpxml 1bbc59bf-6827-439e-86d3-21eda28d8b43
<domain type='kvm' id='3'>
<name>instance-0000010a</name>
<uuid>1bbc59bf-6827-439e-86d3-21eda28d8b43</uuid>
<metadata>
<nova:instance xmlns:nova="http://openstack.org/xmlns/libvirt/nova/1.0">
<nova:package version="17.0.13-1.el7"/>
<nova:name>cirros-cephrbd-instance1</nova:name>
<nova:creationTime>2020-02-05 14:03:11</nova:creationTime>
<nova:flavor name="1c1g">
<nova:memory>1024</nova:memory>
<nova:disk>20</nova:disk>
<nova:swap>0</nova:swap>
<nova:ephemeral>0</nova:ephemeral>
<nova:vcpus>1</nova:vcpus>
</nova:flavor>
<nova:owner>
<nova:user uuid="51ffe09d0ed342f4bf4e443e454055cc">admin</nova:user>
<nova:project uuid="75aed7016c86445198356e78dddde4ba">admin</nova:project>
</nova:owner>
<nova:root type="image" uuid="1333fc90-d9a6-4df8-9a8b-391b035770c0"/>
</nova:instance>
</metadata>
<memory unit='KiB'>1048576</memory>
<currentMemory unit='KiB'>1048576</currentMemory>
<vcpu placement='static'>1</vcpu>
<cputune>
<shares>1024</shares>
</cputune>
<resource>
<partition>/machine</partition>
</resource>
<sysinfo type='smbios'>
<system>
<entry name='manufacturer'>RDO</entry>
<entry name='product'>OpenStack Compute</entry>
<entry name='version'>17.0.13-1.el7</entry>
<entry name='serial'>4a7258d2-f86c-af77-106d-598ffd558b8e</entry>
<entry name='uuid'>1bbc59bf-6827-439e-86d3-21eda28d8b43</entry>
<entry name='family'>Virtual Machine</entry>
</system>
</sysinfo>
<os>
<type arch='x86_64' machine='pc-i440fx-rhel7.6.0'>hvm</type>
<boot dev='hd'/>
<smbios mode='sysinfo'/>
</os>
<features>
<acpi/>
<apic/>
</features>
<cpu mode='custom' match='exact' check='full'>
<model fallback='forbid'>IvyBridge-IBRS</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='1' threads='1'/>
<feature policy='require' name='ss'/>
<feature policy='require' name='vmx'/>
<feature policy='require' name='pcid'/>
<feature policy='require' name='hypervisor'/>
<feature policy='require' name='arat'/>
<feature policy='require' name='tsc_adjust'/>
<feature policy='require' name='umip'/>
<feature policy='require' name='stibp'/>
<feature policy='require' name='ssbd'/>
<feature policy='require' name='xsaveopt'/>
<feature policy='require' name='pdpe1gb'/>
</cpu>
<clock offset='utc'>
<timer name='pit' tickpolicy='delay'/>
<timer name='rtc' tickpolicy='catchup'/>
<timer name='hpet' present='no'/>
</clock>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator>/usr/libexec/qemu-kvm</emulator>
<disk type='network' device='disk'>
<driver name='qemu' type='raw' cache='none'/>
<auth username='cinder'>
<secret type='ceph' uuid='29355b97-1fd8-4135-a26e-d7efeaa27b0a'/>
</auth>
<source protocol='rbd' name='vms/1bbc59bf-6827-439e-86d3-21eda28d8b43_disk'>
<host name='10.30.1.221' port='6789'/>
<host name='10.30.1.222' port='6789'/>
<host name='10.30.1.223' port='6789'/>
</source>
<target dev='vda' bus='virtio'/>
<alias name='virtio-disk0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</disk>
<controller type='usb' index='0' model='piix3-uhci'>
<alias name='usb'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
</controller>
<controller type='pci' index='0' model='pci-root'>
<alias name='pci.0'/>
</controller>
<interface type='bridge'>
<mac address='fa:16:3e:03:7d:e3'/>
<source bridge='brq5ac5c948-90'/>
<target dev='tapa2466968-b7'/>
<model type='virtio'/>
<mtu size='1500'/>
<alias name='net0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<serial type='pty'>
<source path='/dev/pts/3'/>
<log file='/var/lib/nova/instances/1bbc59bf-6827-439e-86d3-21eda28d8b43/console.log' append='off'/>
<target type='isa-serial' port='0'>
<model name='isa-serial'/>
</target>
<alias name='serial0'/>
</serial>
<console type='pty' tty='/dev/pts/3'>
<source path='/dev/pts/3'/>
<log file='/var/lib/nova/instances/1bbc59bf-6827-439e-86d3-21eda28d8b43/console.log' append='off'/>
<target type='serial' port='0'/>
<alias name='serial0'/>
</console>
<input type='tablet' bus='usb'>
<alias name='input0'/>
<address type='usb' bus='0' port='1'/>
</input>
<input type='mouse' bus='ps2'>
<alias name='input1'/>
</input>
<input type='keyboard' bus='ps2'>
<alias name='input2'/>
</input>
<graphics type='vnc' port='5901' autoport='yes' listen='0.0.0.0' keymap='en-us'>
<listen type='address' address='0.0.0.0'/>
</graphics>
<video>
<model type='cirrus' vram='16384' heads='1' primary='yes'/>
<alias name='video0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
</video>
<memballoon model='virtio'>
<stats period='10'/>
<alias name='balloon0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
</memballoon>
</devices>
<seclabel type='dynamic' model='dac' relabel='yes'>
<label>+107:+107</label>
<imagelabel>+107:+107</imagelabel>
</seclabel>
</domain>
# 验证是否从ceph rbd启动
[root@node1 ~]# rbd ls vms
3)对rbd启动的虚拟机进行live-migration
# 使用”nova show 1bbc59bf-6827-439e-86d3-21eda28d8b43”得知从rbd启动的instance在迁移前位于node4节点;
# 或使用”nova hypervisor-servers node4”进行验证;
[root@node1 ~]# nova live-migration cirros-cephrbd-instance1 node3
注:迁移前一定要保证node3和node4之间可以ssh无密钥访问(计算节点间无密钥访问是云主机能迁移成功的关键),示范如下
以ceph-host-04和ceph-host-02为例,其实过程就是在一台主机(ceph-host-04)上使用ssh-keygen生成密钥,再把/root/.ssh/id_rsa和/root/.ssh/id_rsa.pub文件拷贝给其他主机(包括ceph-host-04和ceph-host-02),这样可以使N台主机之间都能相互无密钥访问
[root@ceph-host-04 ~]# ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:MGkIRd0B3Juv6+7OlNOknVGWGKXOulP4b/ddw+e+RDg root@ceph-host-04
The key's randomart image is:
+---[RSA 2048]----+
| .ooo.+..... |
| . .o.o + . |
| . = oo + |
| . ooo o . |
| So= E . |
| .Boo + |
| *++ +o|
| ooo. . o.=|
| =Oo o.. +*|
+----[SHA256]-----+
[root@ceph-host-04 ~]# ssh-copy-id ceph-host-04
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'ceph-host-04 (10.30.1.224)' can't be established.
ECDSA key fingerprint is SHA256:qjCvy9Q/qRV2HIT0bt6ev//3rOGVntxAPQRDZ4aXfEE.
ECDSA key fingerprint is MD5:99:db:b6:3d:83:0e:c2:56:25:47:f6:1b:d7:bd:f0:ce.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@ceph-host-04's password:
Number of key(s) added: 1
Now try logging into the machine, with: "ssh 'ceph-host-04'"
and check to make sure that only the key(s) you wanted were added.
[root@ceph-host-04 ~]# ssh-copy-id ceph-host-02
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@ceph-host-02's password:
Number of key(s) added: 1
Now try logging into the machine, with: "ssh 'ceph-host-02'"
and check to make sure that only the key(s) you wanted were added.
[root@ceph-host-04 ~]# scp .ssh/id_rsa
id_rsa id_rsa.pub
[root@ceph-host-04 ~]# scp .ssh/id_rsa root@ceph-host-02:/root/.ssh/
id_rsa
[root@ceph-host-04 ~]# ssh ceph-host-02 w
01:23:10 up 5:20, 1 user, load average: 0.12, 0.18, 0.36
USER TTY FROM LOGIN@ IDLE JCPU PCPU WHAT
root pts/0 desktop-l37krfr. 23:27 1:58 0.14s 0.14s -bash
[root@ceph-host-02 ~]# ssh ceph-host-04 w
01:25:01 up 5:22, 1 user, load average: 0.00, 0.01, 0.05
USER TTY FROM LOGIN@ IDLE JCPU PCPU WHAT
root pts/0 desktop-l37krfr. 22:04 5.00s 0.26s 0.26s -bash
# 迁移过程中可查看状态(nova list和openstack server list --long --fit-width都可以查看状态)
[root@node1 ~]# nova list
+--------------------------------------+------------------------------+-----------+------------+-------------+----------------------+
| ID | Name | Status | Task State | Power State | Networks |
+--------------------------------------+------------------------------+-----------+------------+-------------+----------------------+
| 1bbc59bf-6827-439e-86d3-21eda28d8b43 | cirros-cephrbd-instance1 | MIGRATING | migrating | Running | vlan99=172.16.99.102 |
| 8611811b-b4e9-4fdc-88b9-1d63427a664d | cirros-cephvolumes-instance1 | ACTIVE | - | Running | vlan99=172.16.99.101 |
+--------------------------------------+------------------------------+-----------+------------+-------------+----------------------+
# 迁移完成后,查看instacn所在节点;
# 或使用”nova show 1bbc59bf-6827-439e-86d3-21eda28d8b43”命令查看”hypervisor_hostname”
# 使用“openstack server list --long --fit-width”命令也能查看相应的信息
[root@node1 ~]# nova hypervisor-servers node4
[root@node1 ~]# nova hypervisor-servers node3
扩展:批量创建云主机,并查看在ceph上的情况
把原有的qcow2格式的centos7.5镜像转换为raw格式并压缩(注:qcow转换为raw的镜像在openstack平台上老是运行不了,还是自己直接制作一个raw的镜像比较靠谱)
[root@node1 tmp]# virt-sparsify -x centos7.5.qcow2 --convert raw centos7.5.raw
[root@node1 tmp]# ls -lh
total 2.4G
-rw-r----- 1 qemu qemu 1.3G Feb 4 21:24 centos7.5.qcow2
-rw-r--r-- 1 root root 20G Feb 6 13:34 centos7.5.raw
[root@node1 tmp]# qemu-img info centos7.5.raw
image: centos7.5.raw
file format: raw
virtual size: 20G (21474836480 bytes)
disk size: 1.1G
传到openstack的glance上
[root@node1 tmp]# openstack image create "centos7.5" --file centos7.5.raw --disk-format raw --container-format bare --public
+------------------+----------------------------------------------------------------------------------------------------------+
| Field | Value |
+------------------+----------------------------------------------------------------------------------------------------------+
| checksum | 8862d942f2237a9478023fe48232d420 |
| container_format | bare |
| created_at | 2020-02-06T05:52:40Z |
| disk_format | raw |
| file | /v2/images/bf4bff9e-51de-4787-a76e-3a4637e7fe75/file |
| id | bf4bff9e-51de-4787-a76e-3a4637e7fe75 |
| min_disk | 0 |
| min_ram | 0 |
| name | centos7.5 |
| owner | 75aed7016c86445198356e78dddde4ba |
| properties | direct_url='rbd://272905d2-fd66-4ef6-a772-9cd73a274683/images/bf4bff9e-51de-4787-a76e-3a4637e7fe75/snap' |
| protected | False |
| schema | /v2/schemas/image |
| size | 21474836480 |
| status | active |
| tags | |
| updated_at | 2020-02-06T06:39:06Z |
| virtual_size | None |
| visibility | public |
+------------------+----------------------------------------------------------------------------------------------------------+
查看镜像在ceph中的情况
[root@node1 ~]# rbd ls images
bf4bff9e-51de-4787-a76e-3a4637e7fe75
[root@node1 ~]# rbd info --pool images bf4bff9e-51de-4787-a76e-3a4637e7fe75
rbd image 'bf4bff9e-51de-4787-a76e-3a4637e7fe75':
size 20GiB in 2560 objects
order 23 (8MiB objects)
block_name_prefix: rbd_data.d128d2b1e13e5
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
flags:
create_timestamp: Thu Feb 6 21:52:08 2020
使用脚本创建云主机
[root@node1 ~]# cat nova-create.sh
for i in `seq 1 9`;do
IP1=172.16.99.14${i}
IP2=192.168.9.22${i}
openstack server create --flavor 1c1g --availability-zone nova --image 'centos7.5' --nic net-id=5ac5c948-909f-47ff-beba-a2ffaf917c5f,v4-fixed-ip=${IP1} --nic net-id=98f5d807-80e0-48a3-9f40-97eb6ed15f33,v4-fixed-ip=${IP2} --security-group 5bb5f2b1-9210-470f-a4a7-2715220b2920 openstack-vm${i}
done
[root@node1 ~]# bash nova-create.sh
[root@node1 ~]# openstack server list
+--------------------------------------+---------------+--------+---------------------------------------------+-----------+--------+
| ID | Name | Status | Networks | Image | Flavor |
+--------------------------------------+---------------+--------+---------------------------------------------+-----------+--------+
| 9bca8f74-9a44-4697-b0e0-3631fc063e99 | openstack-vm9 | ACTIVE | vlan99=172.16.99.149; vlan809=192.168.9.229 | centos7.5 | 1c1g |
| 9e3a5d42-8347-48d4-a628-7c8caaf20ccd | openstack-vm8 | ACTIVE | vlan99=172.16.99.148; vlan809=192.168.9.228 | centos7.5 | 1c1g |
| 8b512e2b-c5d3-4ce9-9fec-6868488cfbfb | openstack-vm7 | ACTIVE | vlan99=172.16.99.147; vlan809=192.168.9.227 | centos7.5 | 1c1g |
| b5a5b16c-f565-433f-846b-cd94b9018995 | openstack-vm6 | ACTIVE | vlan99=172.16.99.146; vlan809=192.168.9.226 | centos7.5 | 1c1g |
| 271584ec-3dea-4351-8cf3-97fcd416e2c0 | openstack-vm5 | ACTIVE | vlan99=172.16.99.145; vlan809=192.168.9.225 | centos7.5 | 1c1g |
| fc6ae1f4-dc19-4d5b-b044-4564b40a72e3 | openstack-vm4 | ACTIVE | vlan99=172.16.99.144; vlan809=192.168.9.224 | centos7.5 | 1c1g |
| eabc346b-1354-4d30-913a-3983948e29d8 | openstack-vm3 | ACTIVE | vlan99=172.16.99.143; vlan809=192.168.9.223 | centos7.5 | 1c1g |
| ec84be63-687f-4dcf-9ce2-b87a923640ab | openstack-vm2 | ACTIVE | vlan99=172.16.99.142; vlan809=192.168.9.222 | centos7.5 | 1c1g |
| 5c459d9c-fb56-422b-a074-a142ba2d091d | openstack-vm1 | ACTIVE | vlan99=172.16.99.141; vlan809=192.168.9.221 | centos7.5 | 1c1g |
+--------------------------------------+---------------+--------+---------------------------------------------+-----------+--------+
查看这些云主机在ceph存储池中的情况
[root@node1 ~]# rbd ls vms
271584ec-3dea-4351-8cf3-97fcd416e2c0_disk
5c459d9c-fb56-422b-a074-a142ba2d091d_disk
8b512e2b-c5d3-4ce9-9fec-6868488cfbfb_disk
9bca8f74-9a44-4697-b0e0-3631fc063e99_disk
9e3a5d42-8347-48d4-a628-7c8caaf20ccd_disk
b5a5b16c-f565-433f-846b-cd94b9018995_disk
eabc346b-1354-4d30-913a-3983948e29d8_disk
ec84be63-687f-4dcf-9ce2-b87a923640ab_disk
fc6ae1f4-dc19-4d5b-b044-4564b40a72e3_disk
[root@node1 ~]# rbd info --pool vms 5c459d9c-fb56-422b-a074-a142ba2d091d_disk
rbd image '5c459d9c-fb56-422b-a074-a142ba2d091d_disk':
size 20GiB in 2560 objects
order 23 (8MiB objects)
block_name_prefix: rbd_data.d1ffc5b6d642c
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
flags:
create_timestamp: Thu Feb 6 23:10:32 2020
parent: images/bf4bff9e-51de-4787-a76e-3a4637e7fe75@snap
overlap: 20GiB
[root@node1 ~]# ceph df
RAW STORAGE:
CLASS SIZE AVAIL USED RAW USED %RAW USED
hdd 1.5 TiB 1.4 TiB 47 GiB 66 GiB 4.43
TOTAL 1.5 TiB 1.4 TiB 47 GiB 66 GiB 4.43
POOLS:
POOL ID STORED OBJECTS USED %USED MAX AVAIL
nova-metadata 6 13 MiB 25 38 MiB 0 461 GiB
nova-data 7 450 B 6 1.1 MiB 0 443 GiB
volumes 8 1.8 KiB 8 512 KiB 0 818 GiB
images 9 23 GiB 2.75k 43 GiB 3.10 703 GiB
vms 11 1.7 GiB 233 3.2 GiB 0.24 710 GiB
关于报错:
health问题解决
health: HEALTH_WARN
clock skew detected on mon.ceph-host-02, mon.ceph-host-03
这个是时间同步造成的
# ansible ceph -a 'yum install ntpdate -y'
# ansible ceph -a 'systemctl stop ntpdate'
# ansible ceph -m shell -a 'echo "0 2 * * * root timedatectl set-timezone Asia/Shanghai && ntpdate
time1.aliyun.com && hwclock -w >/dev/null 2>&1" >>/etc/crontab'
- openstack高可用集群21-生产环境高可用openstack集群部署记录
第一篇 集群概述 keepalived + haproxy +Rabbitmq集群+MariaDB Galera高可用集群 部署openstack时使用单个控制节点是非常危险的,这样就意味着单个节 ...
- openstack高可用集群17-openstack集成Ceph准备
Openstack集成Ceph准备 Openstack环境中,数据存储可分为临时性存储与永久性存储. 临时性存储:主要由本地文件系统提供,并主要用于nova虚拟机的本地系统与临时数据盘,以及存储gla ...
- openstack高可用集群16-ceph介绍和部署
Ceph Ceph是一个可靠.自动重均衡.自动恢复的分布式存储系统,根据场景划分可以将Ceph分为三大块,分别是对象存储.块设备和文件系统服务.块设备存储是Ceph的强项. Ceph的主要优点是分布式 ...
- openstack高可用集群15-后端存储技术—GlusterFS(分布式存储)
- openstack高可用集群19-linuxbridge结合vxlan
生产环境,假设我们的openstack是公有云,我们一般的linuxbridge结合vlan的模式相对于大量的用户来说是vlan是不够用的,于是我们引进vxlan技术解决云主机内网网络通讯的问题. 我 ...
- openstack高可用集群20-openstack计算节点宕机迁移方案
openstack计算节点宕机迁移方案 情景一:/var/lib/nova/instances/ 目录不共享的处理方法(类似手动迁移云主机到其他节点)
- 部署一套完整的Kubernetes高可用集群(二进制,v1.18版)
一.前置知识点 1.1 生产环境可部署Kubernetes集群的两种方式 目前生产部署Kubernetes集群主要有两种方式: kubeadm Kubeadm是一个K8s部署工具,提供kubeadm ...
- 部署一套完整的Kubernetes高可用集群(二进制,最新版v1.18)下
七.高可用架构(扩容多Master架构) Kubernetes作为容器集群系统,通过健康检查+重启策略实现了Pod故障自我修复能力,通过调度算法实现将Pod分布式部署,并保持预期副本数,根据Node失 ...
- [ Openstack ] Openstack-Mitaka 高可用之 Pacemaker+corosync+pcs 高可用集群
目录 Openstack-Mitaka 高可用之 概述 Openstack-Mitaka 高可用之 环境初始化 Openstack-Mitaka 高可用之 Mariadb-Galera集群 ...
随机推荐
- Unable to locate package python3 错误解决办法
错误 huny@DESKTOP-N1EBKQP:/mnt/c/Users/Administrator$ sudo apt-get install python3 Reading package lis ...
- C语言讲义——注释
注释 什么是注释? --注释写在代码中的文字,不参与代码编译,不影响运行结果. 为什么要注释?--让代码可读性更强. C语言有两种注释: 单行注释 // 多行注释 /* */ 多行注释可以只有一行, ...
- Alpha冲刺-第四次冲刺笔记
Alpha冲刺-冲刺笔记 这个作业属于哪个课程 https://edu.cnblogs.com/campus/fzzcxy/2018SE2 这个作业要求在哪里 https://edu.cnblogs. ...
- JDK(JDK8,JDK11)高速下载
JDK(JDK8,JDK11)高速下载 oracl 需要登陆才能下载,网速还贼慢. 华为云各版本高速下载通道:https://repo.huaweicloud.com/java/jdk/
- 再也不担心写出臃肿的Flink流处理程序啦,发现一款将Flink与Spring生态完美融合的脚手架工程-懒松鼠Flink-Boot
目录 你可能面临如下苦恼: 接口缓存 重试机制 Bean校验 等等...... 它为流计算开发工程师解决了 有了它你的代码就像这样子: 仓库地址:懒松鼠Flink-Boot 1. 组织结构 2. 技术 ...
- JVM(五)-垃圾收集器入门
概述: 大家都知道java相较于c.c++而言最大的优点就是JVM会帮助程序员去回收垃圾,实现对内存的自动化管理.那为什么程序员还需要去了解垃圾回收和内存分配?答案很简单,当需要排查各种内存溢内存泄漏 ...
- 转:浅谈HTTP中Get、Post、Put与Delete的区别
1.GET请求会向数据库发索取数据的请求,从而来获取信息,该请求就像数据库的select操作一样,只是用来查询一下数据,不会修改.增加数据,不会影响资源的内容,即该请求不会产生副作用.无论进行多少次操 ...
- PyQt学习随笔:通过自定义类重写QApplication的notify方法捕获应用的所有消息
PyQt程序通过调用QApplication类的exec_()(sys.exit(app.exec_()) 进入程序主循环,开始处理事件,它从事件队列中获取本地窗口系统事件,将它们转化为 QEvent ...
- PyQt(Python+Qt)学习随笔:QAbstractItemView的showDropIndicator属性
老猿Python博文目录 老猿Python博客地址 概述 QAbstractItemView的showDropIndicator属性用于控制在拖拽过程中显示当前拖拽到的位置,当释放时则在当前拖拽位置覆 ...
- 超详细讲解mysql存储过程中的in/out/inout
存储过程 大概定义:用一个别名来描述多个sql语句的执行过程. 最简单 delimiter // create PROCEDURE p1() begin select * from userinfo; ...