020 ceph作openstack的后端存储
一、使用ceph做glance后端
1.1 创建用于存储镜像的池
[root@serverc ~]# ceph osd pool create images 128 128
pool 'images' created
[root@serverc ~]# ceph osd pool application enable images rbd
enabled application 'rbd' on pool 'images'
1.2 创建client.glance账号并授权
[root@serverc ~]# ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children,allow rwx pool=images' -o /etc/ceph/ceph.client.glance.keyring
[root@serverc ~]# ll /etc/ceph/ceph.client.glance.keyring
-rw-r--r-- 1 root root 64 Mar 31 10:33 /etc/ceph/ceph.client.glance.keyring
1.3 在glance服务器上安装ceph客户端
[root@serverb ~]# yum -y install ceph-common
从ceph服务端将ceph.conf以及ceph.client.glance.keyring复制到glance服务器
[root@serverc ceph]# scp -r /etc/ceph/ceph.conf /etc/ceph/ceph.client.glance.keyring serverb:/etc/ceph/
ceph.conf % .5MB/s :
ceph.client.glance.keyring % .1KB/s :
1.4 在客户端修改相关权限
[root@serverb ~]# chown glance.glance /etc/ceph/ceph.client.glance.keyring
1.5 修改配合文件
修改客户端的/etc/ceph/ceph.conf
[root@serverb ~]# vim /etc/ceph/ceph.conf
[client.glance]
keyring = /etc/ceph/ceph.client.glance.keyring
修改/etc/glance/glance-api.conf
[glance_store]
stores = rbd
default_store = rbd
filesystem_store_datadir = /var/lib/glance/images/
rbd_store_chunk_size =
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf
os_region_name=RegionOne
[root@serverb ~]# grep -Ev "^$|^[#;]" /etc/glance/glance-api.conf
[DEFAULT]
bind_host = 0.0.0.0
bind_port =
workers =
image_cache_dir = /var/lib/glance/image-cache
registry_host = 0.0.0.0
debug = False
log_file = /var/log/glance/api.log
log_dir = /var/log/glance
[cors]
[cors.subdomain]
[database]
connection = mysql+pymysql://glance:27c082e7c4a9413c@172.25.250.11/glance
[glance_store]
stores = rbd
default_store = rbd
filesystem_store_datadir = /var/lib/glance/images/
rbd_store_chunk_size =
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf
os_region_name=RegionOne
[image_format]
[keystone_authtoken]
auth_uri = http://172.25.250.11:5000/v2.0
auth_type = password
project_name=services
username=glance
password=99b29d9142514f0f
auth_url=http://172.25.250.11:35357
[matchmaker_redis]
[oslo_concurrency]
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
policy_file = /etc/glance/policy.json
[paste_deploy]
flavor = keystone
[profiler]
[store_type_location_strategy]
[task]
[taskflow_executor]
重启glance-api
[root@serverb ~]# systemctl restart openstack-glance-api
1.6 验证
下载镜像
[root@foundation ~]# wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
[root@foundation ~]# scp cirros-0.4.0-x86_64-disk.img root@serverb:/tmp/
[root@serverb ~]# cd /tmp/
-rw-r--r-- 1 root root 12716032 Mar 31 10:39 cirros-0.4.0-x86_64-disk.img
-rw-r--r--. root root Mar : rht
-rw-r--r--. root root Mar : rht-vm-hosts
-rw-r--r--. root root Mar : rht-wks
drwx------ root root Mar : systemd-private-2bf1332dd8ae4d5f95d9f9a64e0ef5ee-chronyd.service-I1ANDV
drwx------ root root Mar : systemd-private-2bf1332dd8ae4d5f95d9f9a64e0ef5ee-epmd@0.0.0.0.service-0il3SD
drwx------ root root Mar : systemd-private-2bf1332dd8ae4d5f95d9f9a64e0ef5ee-httpd.service-mWaw6A
drwx------ root root Mar : systemd-private-2bf1332dd8ae4d5f95d9f9a64e0ef5ee-mariadb.service-xt5VbD
drwx------ root root Mar : systemd-private-2bf1332dd8ae4d5f95d9f9a64e0ef5ee-openstack-glance-api.service-RVKYpk
drwx------ root root Mar : systemd-private-2bf1332dd8ae4d5f95d9f9a64e0ef5ee-openstack-glance-registry.service-Bc5DYB
[root@serverb tmp]# glance image-list
You must provide a username via either --os-username or env[OS_USERNAME]
[root@serverb ~]# source keystonerc_admin
[root@serverb ~(keystone_admin)]# glance image-list
+----+------+
| ID | Name |
+----+------+
+----+------+
[root@serverb ~(keystone_admin)]# glance image-create --name cirros --file /tmp/cirros-0.4.0-x86_64-disk.img --disk-format qcow2 --container-format bare --progress
[=============================>] %
+------------------+--------------------------------------+
| Property | Value |
+------------------+--------------------------------------+
| checksum | 443b7623e27ecf03dc9e01ee93f67afe |
| container_format | bare |
| created_at | --30T10::44Z |
| disk_format | qcow2 |
| id | 79cfc319-f60a-45d4-834f-b70dc20c7975 |
| min_disk | |
| min_ram | |
| name | cirros |
| owner | 79cf145d371e48ef96f608cbf85d1788 |
| protected | False |
| size | |
| status | active |
| tags | [] |
| updated_at | --30T10::47Z |
| virtual_size | None |
| visibility | private |
+------------------+--------------------------------------+
[root@serverb ~(keystone_admin)]# glance image-create --name cirros --file /tmp/cirros-0.4.0-x86_64-disk.img --disk-format qcow2 --container-format bare --progress
[=============================>] %
+------------------+--------------------------------------+
| Property | Value |
+------------------+--------------------------------------+
| checksum | 443b7623e27ecf03dc9e01ee93f67afe |
| container_format | bare |
| created_at | --30T01::49Z |
| disk_format | qcow2 |
| id | ab67abe6-7d65-407f-88e9-7b46d873b477 |
| min_disk | |
| min_ram | |
| name | cirros |
| owner | 79cf145d371e48ef96f608cbf85d1788 |
| protected | False |
| size | |
| status | active |
| tags | [] |
| updated_at | --30T01::49Z |
| virtual_size | None |
| visibility | private |
+------------------+--------------------------------------+
[root@serverb ~(keystone_admin)]# glance image-list
+--------------------------------------+--------+
| ID | Name |
+--------------------------------------+--------+
| 79cfc319-f60a-45d4-834f-b70dc20c7975 | cirros |
| ab67abe6-7d65-407f-88e9-7b46d873b477 | cirros |
+--------------------------------------+--------+
1.7 删除一个image
[root@serverb tmp(keystone_admin)]# glance image-delete ab67abe6-7d65-407f-88e9-7b46d873b477
[root@serverb ~(keystone_admin)]# glance image-list
+--------------------------------------+--------+
| ID | Name |
+--------------------------------------+--------+
| 79cfc319-f60a-45d4-834f-b70dc20c7975 | cirros |
+--------------------------------------+--------+
[root@serverc ~]# rados -p images ls
rbd_object_map.105f76fe073c.0000000000000004
rbd_directory
rbd_data.105f76fe073c.0000000000000001
rbd_info
rbd_id.79cfc319-f60a-45d4-834f-b70dc20c7975
rbd_object_map.105f76fe073c
rbd_data.105f76fe073c.0000000000000000
rbd_header.105f76fe073c
[root@serverc ~]# rbd ls images
79cfc319-f60a-45d4-834f-b70dc20c7975
[root@serverc ~]# rbd info images/79cfc319-f60a-45d4-834f-b70dc20c7975
rbd image '79cfc319-f60a-45d4-834f-b70dc20c7975':
size kB in objects
order ( kB objects)
block_name_prefix: rbd_data.105f76fe073c
format:
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
flags:
create_timestamp: Sun Mar ::
[root@serverb ~(keystone_admin)]# nova flavor-list
+----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
| ID | Name | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public |
+----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
| | m1.tiny | | | | | | 1.0 | True |
| | m1.small | | | | | | 1.0 | True |
| | m1.medium | | | | | | 1.0 | True |
| | m1.large | | | | | | 1.0 | True |
| | m1.xlarge | | | | | | 1.0 | True |
+----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
1.8 使用上传的镜像创建一个nova虚拟机
# nova boot --flavor 1 --image <image id> <虚拟机名称>
[root@serverb ~(keystone_admin)]# nova boot --flavor 1 --image 3d80ba00-b4c7-4f3c-98b8-17d9fd140216 vm1
+--------------------------------------+-----------------------------------------------+
| Property | Value |
+--------------------------------------+-----------------------------------------------+
| OS-DCF:diskConfig | MANUAL |
| OS-EXT-AZ:availability_zone | |
| OS-EXT-SRV-ATTR:host | - |
| OS-EXT-SRV-ATTR:hostname | vm1 |
| OS-EXT-SRV-ATTR:hypervisor_hostname | - |
| OS-EXT-SRV-ATTR:instance_name | |
| OS-EXT-SRV-ATTR:kernel_id | |
| OS-EXT-SRV-ATTR:launch_index | |
| OS-EXT-SRV-ATTR:ramdisk_id | |
| OS-EXT-SRV-ATTR:reservation_id | r-7ygb36rz |
| OS-EXT-SRV-ATTR:root_device_name | - |
| OS-EXT-SRV-ATTR:user_data | - |
| OS-EXT-STS:power_state | |
| OS-EXT-STS:task_state | scheduling |
| OS-EXT-STS:vm_state | building |
| OS-SRV-USG:launched_at | - |
| OS-SRV-USG:terminated_at | - |
| accessIPv4 | |
| accessIPv6 | |
| adminPass | 3j2dpZjCXZn8 |
| config_drive | |
| created | --29T12::30Z |
| description | - |
| flavor | m1.tiny () |
| hostId | |
| host_status | |
| id | dec39eb4-75f5-47eb-b335-1e2b1833253d |
| image | cirros (3d80ba00-b4c7-4f3c-98b8-17d9fd140216) |
| key_name | - |
| locked | False |
| metadata | {} |
| name | vm1 |
| os-extended-volumes:volumes_attached | [] |
| progress | |
| security_groups | default |
| status | BUILD |
| tags | [] |
| tenant_id | 79cf145d371e48ef96f608cbf85d1788 |
| updated | --29T12::30Z |
| user_id | 8e0be34493e04722ba03ab30fbbf3bf8 |
+--------------------------------------+-----------------------------------------------+
[root@serverb ~(keystone_admin)]# nova list
+--------------------------------------+------+--------+------------+-------------+----------------------------+
| ID | Name | Status | Task State | Power State | Networks |
+--------------------------------------+------+--------+------------+-------------+----------------------------+
| dec39eb4-75f5-47eb-b335-1e2b1833253d | vm1 | ERROR | - | NOSTATE | novanetwork=192.168.32.255 |
+--------------------------------------+------+--------+------------+-------------+----------------------------+
虚拟机状态错误,有与Openstack的配置需要完善,后续可以继续更新
二、 使用ceph作为cinder后端
[root@serverb ~(keystone_admin)]# cinder list
+----+--------+------+------+-------------+----------+-------------+
| ID | Status | Name | Size | Volume Type | Bootable | Attached to |
+----+--------+------+------+-------------+----------+-------------+
+----+--------+------+------+-------------+----------+-------------+
2.1 为cinder也创建一个rbd池
[root@serverc ~]# ceph osd pool create volumes 64 64
pool 'volumes' created
[root@serverc ~]# ceph osd pool application enable volumes rbd
enabled application 'rbd' on pool 'volumes'
2.2 为client.cinder用户授权
[root@serverc ~]# ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images, allow rwx pool=volumes' -o /etc/ceph/ceph.client.cinder.keyring
[root@serverc ~]# ceph auth get-key client.cinder -o /etc/ceph/temp.client.cinder.key
[root@serverc ~]# cat /etc/ceph/temp.client.cinder.key
AQA5KaBcszk/JxAAPdymqbMzqrfhZ+GyqZgUvg==
[root@serverc ~]# scp -r /etc/ceph/ceph.client.glance.keyring /etc/ceph/ceph.client.cinder.keyring serverb:/etc/ceph/
root@serverb's password: ceph.client.glance.keyring % .6KB/s : ceph.client.cinder.keyring % .5KB/s :
2.3 在cinder服务器上安装ceph的客户端
[root@serverb ~]# yum -y install ceph-commom
[root@serverb tmp]# chown cinder.cinder -R /etc/ceph/ceph.client.cinder.keyring
2.4 修改客户端/etc/ceph/ceph.conf
[root@serverb tmp]# vim /etc/ceph/ceph.conf
[client.cinder]
keyring = /etc/ceph/ceph.client.cinder.keyring
2.5 生成libvirt的secret
[root@serverb tmp(keystone_admin)]# uuidgen
ade72e47-ce6f-4f44-a97d-d7dff6aef99c
[root@serverb tmp(keystone_admin)]# vim /etc/ceph/secret.xml
<secret ephemeral="no" private="no">
<uuid>ade72e47-ce6f-4f44-a97d-d7dff6aef99c</uuid>
<usage type="ceph">
<name>client.cinder secret</name>
</usage>
</secret>
[root@serverb tmp(keystone_admin)]# virsh secret-define --file /etc/ceph/secret.xml
Secret ade72e47-ce6f-4f44-a97d-d7dff6aef99c created
[root@serverb tmp(keystone_admin)]# virsh secret-list
UUID Usage
--------------------------------------------------------------------------------
ade72e47-ce6f-4f44-a97d-d7dff6aef99c ceph client.cinder secret
2.6 将密钥设置到secret中
[root@serverb tmp(keystone_admin)]# virsh secret-set-value --secret ade72e47-ce6f-4f44-a97d-d7dff6aef99c --base64 $(cat /etc/ceph/temp.client.cinder.key)
Secret value set
[root@serverb tmp(keystone_admin)]# virsh secret-list
UUID Usage
--------------------------------------------------------------------------------
ade72e47-ce6f-4f44-a97d-d7dff6aef99c ceph client.cinder secret
[root@serverb tmp(keystone_admin)]# ceph -s --id cinder
cluster:
id: 70ec7a0b-7b4d-4c4d--3eb5ce3e8e50
health: HEALTH_OK services:
mon: daemons, quorum serverc,serverd,servere
mgr: servere(active), standbys: serverc, serverd
osd: osds: up, in data:
pools: pools, pgs
objects: objects, kB
usage: MB used, GB / GB avail
pgs: active+clean
2.7 修改 /etc/cinder/cinder.conf
[root@serverb tmp(keystone_admin)]# vim /etc/cinder/cinder.conf
[DEFALUT]
enabled_backends = rbd2
default_volume_type = rbd2
glance_api_version = [rbd2]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_pool = volumes
rbd_user = cinder
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_secret_uuid = f50719e8-e5b7-404e-980a-c80254e4541c
rbd_max_clone_depth =
rbd_store_chunk_size =
rados_connect_timeout = -
# 指定volume_backend_name,可忽略
#volume_backend_name = rbd2
2.8 创建指定的cinder的volume类型
[root@serverb tmp(keystone_admin)]# cinder type-create rbd2
+--------------------------------------+------+-------------+-----------+
| ID | Name | Description | Is_Public |
+--------------------------------------+------+-------------+-----------+
| c92590e9-33f8---945dc3eb4548 | rbd2 | - | True |
+--------------------------------------+------+-------------+-----------+
[root@serverb tmp(keystone_admin)]# cinder type-key rbd2 set volume_backend_name=rbd2
[root@serverb tmp(keystone_admin)]# systemctl restart openstack-cinder-volume
[root@serverb tmp(keystone_admin)]# systemctl restart openstack-cinder-api
[root@serverb tmp(keystone_admin)]# systemctl restart openstack-cinder-scheduler
2.9 验证
[root@serverb tmp(keystone_admin)]# cinder create --name new-volume --display-name 'ceph storage' 2 --volume_type rbd2
+--------------------------------+--------------------------------------+
| Property | Value |
+--------------------------------+--------------------------------------+
| attachments | [] |
| availability_zone | nova |
| bootable | false |
| consistencygroup_id | None |
| created_at | --31T03::34.000000 |
| description | None |
| encrypted | False |
| id | 5aa151ad-978c-40b3-bca9-ead7c34358ff |
| metadata | {} |
| migration_status | None |
| multiattach | False |
| name | ceph storage |
| os-vol-host-attr:host | None |
| os-vol-mig-status-attr:migstat | None |
| os-vol-mig-status-attr:name_id | None |
| os-vol-tenant-attr:tenant_id | 79cf145d371e48ef96f608cbf85d1788 |
| replication_status | disabled |
| size | |
| snapshot_id | None |
| source_volid | None |
| status | creating |
| updated_at | None |
| user_id | 8e0be34493e04722ba03ab30fbbf3bf8 |
| volume_type | rbd2 |
+--------------------------------+--------------------------------------+
[root@serverb tmp(keystone_admin)]# cinder list
+--------------------------------------+-----------+--------------+------+-------------+----------+-------------+
| ID | Status | Name | Size | Volume Type | Bootable | Attached to |
+--------------------------------------+-----------+--------------+------+-------------+----------+-------------+
| 5aa151ad-978c-40b3-bca9-ead7c34358ff | available | ceph storage | | rbd2 | false | |
+--------------------------------------+-----------+--------------+------+-------------+----------+-------------+
三、使用keystone验证rados网关
3.1 配置rados服务器端
[root@serverc ~]# yum install -y ceph-radosgw
[root@serverc ~]# ceph auth get-or-create client.rgw.serverc mon 'allow rwx' osd 'allow rwx' -o /etc/ceph/ceph.client.rgw.serverc.keyring
[root@serverc ~]# vim /etc/ceph/ceph.conf
[root@serverc ~]# systemctl restart ceph-radosgw@rgw.serverc
[root@serverc ~]# ps -ef|grep rados
root : ? :: /usr/bin/radosgw -f --cluster ceph --name client.rgw.serverc --setuser ceph --setgroup ceph
3.2 在keystone上创建服务与端点
[root@serverb tmp(keystone_admin)]# openstack service create --description "Swift Service" --name swift object-store
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | Swift Service |
| enabled | True |
| id | 1dd0d40cd61d4bed870cc3c302a001da |
| name | swift |
| type | object-store |
+-------------+----------------------------------+
[root@serverb tmp(keystone_admin)]# openstack endpoint create --region RegionOne --publicurl "http://serverc.lab.example.com/swift/v1" --adminurl "http://serverc.lab.example.com/swift/v1" --internalurl "http://serverc.lab.example.com/swift/v1" swift
+--------------+-----------------------------------------+
| Field | Value |
+--------------+-----------------------------------------+
| adminurl | http://serverc.lab.example.com/swift/v1 |
| id | 47f906c29a904571a44dcd99ea27561c |
| internalurl | http://serverc.lab.example.com/swift/v1 |
| publicurl | http://serverc.lab.example.com/swift/v1 |
| region | RegionOne |
| service_id | 1dd0d40cd61d4bed870cc3c302a001da |
| service_name | swift |
| service_type | object-store |
+--------------+-----------------------------------------+
[root@serverb tmp(keystone_admin)]# openstack service list
+----------------------------------+----------+--------------+
| ID | Name | Type |
+----------------------------------+----------+--------------+
| 1dd0d40cd61d4bed870cc3c302a001da | swift | object-store |
| 26a3d56178cd4da2bca93e775ce4efac | cinderv3 | volumev3 |
| 834ee6fe73b2425fb5bb667ccdfdf6a7 | cinderv2 | volumev2 |
| 9581f6be4b4e4112bdb8d1cb8ef2794b | keystone | identity |
| a43b4be139364c4fbf9555e12eeabfed | glance | image |
| a63dad7778b744bfbc263dd73caf0fdb | cinder | volume |
| f3f2b987cdc14d7996bacbd13d3301e1 | nova | compute |
+----------------------------------+----------+--------------+
[root@serverb tmp(keystone_admin)]# openstack service show swift
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | Swift Service |
| enabled | True |
| id | 1dd0d40cd61d4bed870cc3c302a001da |
| name | swift |
| type | object-store |
+-------------+----------------------------------+
[root@serverb tmp(keystone_admin)]# openstack endpoint show swift
+--------------+-----------------------------------------+
| Field | Value |
+--------------+-----------------------------------------+
| adminurl | http://serverc.lab.example.com/swift/v1 |
| enabled | True |
| id | 47f906c29a904571a44dcd99ea27561c |
| internalurl | http://serverc.lab.example.com/swift/v1 |
| publicurl | http://serverc.lab.example.com/swift/v1 |
| region | RegionOne |
| service_id | 1dd0d40cd61d4bed870cc3c302a001da |
| service_name | swift |
| service_type | object-store |
+--------------+-----------------------------------------+
3.3 获取keystone admin token
[root@serverb tmp(keystone_admin)]# cat /etc/keystone/keystone.conf |grep admin_token
# value is ignored and the `admin_token` middleware is effectively disabled.
# However, to completely disable `admin_token` in production (highly
# `AdminTokenAuthMiddleware` (the `admin_token_auth` filter) from your paste
#admin_token = <None>
admin_token = fb032ccf285a432b81c6fe347be8a07d
3.4 修改 /etc/ceph/ceph.conf
[root@serverc ~]# vim /etc/ceph/ceph.conf
[client.rgw.serverc]
host = serverc
keyring = /etc/ceph/ceph.client.rgw.serverc.keyring
rgw_frontends = civetweb port= num_threads=
log = /var/log/ceph/$cluster.$name.log
rgw_dns_name = serverc.lab.example.com rgw_keystone_url = http://serverb.lab.example.com:5000
rgw_keystone_admin_token = fb032ccf285a432b81c6fe347be8a07d
rgw_keystone_accepted_roles = admin member swiftoperator
rgw_keystone_token_cache_size =
rgw_keystone_revocation_interval =
rgw_keystone_verify_ssl = false
[root@serverc ~]# systemctl restart ceph-radosgw@rgw.serverc
[root@serverc ~]# ps -ef|grep rados
ceph : ? :: /usr/bin/radosgw -f --cluster ceph --name client.rgw.serverc --setuser ceph --setgroup ceph
[root@serverb tmp(keystone_admin)]# ps -ef |grep keystone
keystone : ? :: keystone-admin -DFOREGROUND
keystone : ? :: keystone-admin -DFOREGROUND
keystone : ? :: keystone-main -DFOREGROUND
keystone : ? :: keystone-main -DFOREGROUND
[root@serverb tmp(keystone_admin)]# netstat -ntlp |grep 987
tcp6 ::: :::* LISTEN /httpd
tcp6 ::: :::* LISTEN /httpd
tcp6 ::: :::* LISTEN /httpd
3.5 客户端验证
[root@serverb tmp(keystone_admin)]# swift list
[root@serverb tmp(keystone_admin)]# swift post testbucket
[root@serverb tmp(keystone_admin)]# swift list
testbucket
[root@serverc ~]# ceph osd pool ls
images
volumes
.rgw.root
default.rgw.control
default.rgw.meta
default.rgw.log
default.rgw.buckets.index
[root@serverc ~]# rados -p default.rgw.buckets.index ls
.dir.ce5b2073-728f-42d5-8fac-b2e0aa2a41a3.4333.1
[root@serverb tmp(keystone_admin)]# swift upload testbucket /etc/ceph/secret.xml
etc/ceph/secret.xml
[root@serverc ~]# rados -p default.rgw.buckets.data ls
ce5b2073-728f-42d5-8fac-b2e0aa2a41a3..1_etc/ceph/secret.xml
博主声明:本文的内容来源主要来自誉天教育晏威老师,由本人实验完成操作验证,需要的博友请联系誉天教育(http://www.yutianedu.com/),获得官方同意或者晏老师(https://www.cnblogs.com/breezey/)本人同意即可转载,谢谢!
020 ceph作openstack的后端存储的更多相关文章
- 使用 ceph 作为 openstack 的后端
openstack 与 ceph 集成 在 ceph 上创建 openstack 需要的 pool. sudo ceph osd pool create volumes 128 sudo ceph o ...
- 配置Ceph集群为OpenStack后端存储
配置Ceph存储为OpenStack的后端存储 1 前期配置 Ceph官网提供的配置Ceph块存储为OpenStack后端存储的文档说明链接地址:http://docs.ceph.com/docs/ ...
- k8s使用ceph的rbd作后端存储
k8s使用rbd作后端存储 k8s里的存储方式主要有三种.分别是volume.persistent volumes和dynamic volume provisioning. volume: 就是直接挂 ...
- Openstack_后端存储平台Ceph
框架图 介绍 一种为优秀的性能.可靠性和可扩展性而设计的统一的.分布式文件系统 特点 CRUSH算法 Crush算法是ceph的两大创新之一,简单来说,ceph摒弃了传统的集中式存储元数据寻址的方案, ...
- OpenStack Cinder 与各种后端存储技术的集成叙述与实践
先说下下loop设备 loop设备及losetup命令介绍 1. loop设备介绍 在类 UNIX 系统里,loop 设备是一种伪设备(pseudo-device),或者也可以说是仿真设备.它能使我们 ...
- 配置cinder-backup服务使用ceph作为后端存储
在ceph监视器上执行 CINDER_PASSWD='cinder1234!'controllerHost='controller'RABBIT_PASSWD='0penstackRMQ' 1.创建p ...
- 配置cinder-volume服务使用ceph作为后端存储
在ceph监视器上执行 CINDER_PASSWD='cinder1234!'controllerHost='controller'RABBIT_PASSWD='0penstackRMQ' 1.创建p ...
- Openstack入门篇(十八)之Cinder服务-->使用NFS作为后端存储
1.安装cinder-volume组件以及nfs [root@linux-node2 ~]# yum install -y openstack-cinder python-keystone [root ...
- Openstack使用NFS作为后端存储
续:Openstack块存储cinder安装配置 接上使用ISCSI作为后端存储,使用NFS作为后端存储配置 参考官方文档:https://wiki.openstack.org/wiki/How_to ...
随机推荐
- python生成器和各种推导式
一. 生成器 本质就是迭代器. 一个一个的创建对象 创建生成器的方式: 1. 生成器函数 2. 通过生成器表达式来获取生成器 3. 类型转换(看不到) 二. 生成器函数 (重点) 生成器函数中包含 y ...
- winfrom 中 label 文字随着窗体大小变化
在进行winfrom 开发过程中,窗体中的文字需要随着窗体大小变化,否则会影响窗体的美观和客户的体验. 之前曾经试过几种方法效果都不满意,例如将label的Dock 属性设置为fill.这样的设置对解 ...
- PyTorch代码调试利器: 自动print每行代码的Tensor信息
本文介绍一个用于 PyTorch 代码的实用工具 TorchSnooper.作者是TorchSnooper的作者,也是PyTorch开发者之一. GitHub 项目地址: https://github ...
- UIImageView xib里面拉伸图片技巧
拉伸图片的时候代码里和xib里面的图片名字去掉@2x,但是原始图片文件得要xxx@2x.png The X and Y values seem to be the positions for the ...
- 支付宝防并发方案之"一锁二判三更新"
每年支付宝在双11和双12的活动中,都展示了绝佳的技术能力.这个能力不但体现在处理高TPS量的访问,更体现在几乎不会出错,不会出现重复支付的情况,那这个是怎么做到的呢? 诚然,为了实现在高并发下仍不会 ...
- @noi.ac - 489@ shuffle
目录 @description@ @solution@ @accepted code@ @details@ @description@ 给定一个长度为 n 的序列 s1,s2,-,sn,它有 2^n− ...
- hdu 1535 Invitation Cards(spfa)
Invitation Cards Time Limit: 10000/5000 MS (Java/Others) Memory Limit: 65536/65536 K (Java/Others ...
- 正则表达式中的"\."表示什么意思
\ 这是引用符,用来将这里列出的这些元字符当作普通的字符来进行匹配.例如正则表达式\$被用来匹配美元符号,而不是行尾,类似的,正则表达式\.用来匹配点字符,而不是任何字符的通配符.
- Java Integer类的缓存
首先看一段代码(使用JDK 5),如下: public class Hello { public static void main(String[] args) { int a = 1000, b = ...
- Python--day63--添加书籍
添加书籍的代码: