知道cluster topology,是因为这5种cluster map。
======================================
知道cluster topology,是因为这5种cluster map。
相关命令
有命令补全,跟交换机命令行一样
ceph mon dump
ceph osd dump
ceph fs dump
ceph pg dump 这个需要反编译,来得到文本
ceph osd getcrushmap -o crush
crushtool -d crush -o crush1
======================================
[root@ali- dd]# ceph mon dump
dumped monmap epoch
epoch
fsid 69e6081b-075f-4f39-8cf3-f1e5bd68908b
last_changed -- ::31.228140
created -- ::21.704124
: 192.168.3.51:/ mon.ali-
: 192.168.3.52:/ mon.ali-
: 192.168.3.53:/ mon.ali- ======================================
[root@ali- dd]# ceph fs dump
dumped fsmap epoch
e1
enable_multiple, ever_enabled_multiple: ,
compat: compat={},rocompat={},incompat={=base v0.,=client writeable ranges,=default file layouts on dirs,=dir inode in separate object,=mds uses versioned encoding,=dirfrag is stored in omap,=file layout v2}
legacy client fscid: - No filesystems configured ====================================== [root@ali- dd]# ceph pg dump
dumped all
version
stamp -- ::24.077612
last_osdmap_epoch
last_pg_scan
full_ratio 0.9
nearfull_ratio 0.8 [root@ceph1 ~]# ceph pg ls
PG OBJECTS DEGRADED MISPLACED UNFOUND BYTES LOG STATE STATE_STAMP VERSION REPORTED UP ACTING SCRUB_STAMP DEEP_SCRUB_STAMP
1.0 active+clean -- ::54.430131 '2 57:95 [1,2,0]p1 [1,2,0]p1 2019-03-28 02:42:54.430020 2019-03-28 02:42:54.430020
1.1 active+clean -- ::33.846731 '0 57:78 [2,0,1]p2 [2,0,1]p2 2019-03-27 20:42:33.846600 2019-03-27 20:42:33.846600
1.2 active+clean -- ::31.853254 '0 57:92 [1,0,2]p1 [1,0,2]p1 2019-03-27 20:02:31.853127 2019-03-21 18:53:07.286885
1.3 active+clean -- ::29.499574 '0 57:94 [0,1,2]p0 [0,1,2]p0 2019-03-28 01:04:29.499476 2019-03-21 18:53:07.286885
1.4 active+clean -- ::42.694788 '0 57:77 [2,1,0]p2 [2,1,0]p2 2019-03-28 10:17:42.694658 2019-03-21 18:53:07.286885
1.5 active+clean -- ::49.922515 '0 57:78 [2,0,1]p2 [2,0,1]p2 2019-03-28 14:33:49.922414 2019-03-21 18:53:07.286885
1.6 active+clean -- ::08.897114 '0 57:78 [2,1,0]p2 [2,1,0]p2 2019-03-28 08:33:08.897044 2019-03-25 19:51:32.716535
1.7 active+clean -- ::16.417698 '0 57:92 [1,2,0]p1 [1,2,0]p1 2019-03-27 21:37:16.417553 2019-03-22 23:05:53.863908
2.0 active+clean -- ::09.127196 '1 57:155 [1,2,0]p1 [1,2,0]p1 2019-03-27 15:07:09.127107 2019-03-22 15:05:32.211389
2.1 active+clean -- ::41.958378 '0 57:89 [0,2,1]p0 [0,2,1]p0 2019-03-27 20:55:41.958328 2019-03-27 20:55:41.958328
2.2 active+clean -- ::45.117140 '0 57:87 [1,0,2]p1 [1,0,2]p1 2019-03-28 03:09:45.117036 2019-03-28 03:09:45.117036
2.3 active+clean -- ::17.944907 '0 57:87 [1,0,2]p1 [1,0,2]p1 2019-03-27 08:54:17.944792 2019-03-26 05:44:21.586541
2.4 active+clean -- ::52.040458 '0 57:89 [0,2,1]p0 [0,2,1]p0 2019-03-27 23:42:52.040353 2019-03-22 15:05:32.211389
2.5 active+clean -- ::15.908085 '0 57:73 [2,0,1]p2 [2,0,1]p2 2019-03-27 14:26:15.908022 2019-03-22 15:05:32.211389
2.6 active+clean -- ::22.282027 '2 57:161 [0,2,1]p0 [0,2,1]p0 2019-03-28 15:00:22.281923 2019-03-26 05:39:41.395132
2.7 active+clean -- ::39.415262 '4 57:253 [1,2,0]p1 [1,2,0]p1 2019-03-27 17:09:39.415167 2019-03-27 17:09:39.415167 [root@ceph1 rbdpool]# ceph pg map 8.13
osdmap e55 pg 8.13 (8.13) -> up [,,] acting [,,] pg id由{pool-num}.{pg-id}组成
ceph osd lspools [root@ceph1 rbdpool]# ceph pg stat
pgs: active+clean; GiB data, GiB used, 8.4 GiB / GiB avail
[root@client mnt]# rm -rf a*
上面的删除操作后,下面的pg才开始清理
[root@ceph1 rbdpool]# ceph pg stat
pgs: active+clean; 2.5 MiB data, 3.5 GiB used, GiB / GiB avail; 8.7 KiB/s rd, B/s wr, op/s ====================================== [root@ali- dd]# ceph osd getcrushmap -o crush [root@ali- dd]# file crush
crush: MS Windows icon resource - icons, -colors [root@ali- dd]# crushtool -d crush -o crush1
[root@ali- dd]# file crush1
crush1: ASCII text [root@ali- dd]# cat crush1
# begin crush map
tunable choose_local_tries
tunable choose_local_fallback_tries
tunable choose_total_tries
......
rule pool-d83c6154956b44aea7639c7bd4c45c65-rule {
id
type replicated
min_size
max_size
step take pool-d83c6154956b44aea7639c7bd4c45c65-root
step chooseleaf firstn type rack
step emit
} # end crush map
[root@ali- dd]# ====================================== [root@ali- dd]# ceph osd dump
epoch
fsid 69e6081b-075f-4f39-8cf3-f1e5bd68908b
created -- ::22.409031
modified -- ::38.522821
flags nodeep-scrub,sortbitwise,recovery_deletes,purged_snapdirs
crush_version
full_ratio 0.9
backfillfull_ratio 0.85
nearfull_ratio 0.8
omap_full_ratio 0.9
omap_backfillfull_ratio 0.85
omap_nearfull_ratio 0.8
require_min_compat_client luminous
min_compat_client luminous
require_osd_release luminous
pool 'pool-d83c6154956b44aea7639c7bd4c45c65' replicated size min_size crush_rule object_hash rjenkins pg_num pgp_num last_change flags hashpspool stripe_width async_recovery_max_updates osd_backfillfull_ratio 0.85 osd_full_ratio 0.9 osd_nearfull_ratio 0.8 osd_omap_backfillfull_ratio 0.85 osd_omap_nearfull_ratio 0.8
removed_snaps [~]
max_osd
osd. down out weight up_from up_thru down_at last_clean_interval [,) 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.1.53:/ autoout,exists 54e32850-b1ef-44e1-8df9-d3c93bfe4807
osd. down out weight up_from up_thru down_at last_clean_interval [,) 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.1.53:/ autoout,exists 17af8207-2a25-405b-b87d-1c6d7806cc8d
osd. down out weight up_from up_thru down_at last_clean_interval [,) 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.1.53:/ autoout,exists 06cf6578-e516-4e4a-a494-10423b8999cd
osd. down out weight up_from up_thru down_at last_clean_interval [,) 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.1.53:/ autoout,exists bc31e4ab-a135--81b3-e92969921ba7
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.1.53:/ exists,up 62edd341-50b8-4cca-852f-852a51f96760
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.1.53:/ exists,up 00d0cd89-2e74--b4b4-6deaf465b97e
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.1.53:/ exists,up 8ed2597f-1a92-4b90--43b7953cffea
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.1.53:/ exists,up f5723232-3f04-4c22--bdc69d7bcff6 osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.1.52:/ exists,up f75a6ee5-cd79-499c--db400f0bed93
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.1.52:/ exists,up 30431fd9-306c--a5bd-cf6b9bc77ca1
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.1.52:/ exists,up 6ed49e4d-d640--957e-94d2f4ba055f
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.1.52:/ exists,up -5c5e-475c-8b41-d58980da3f43
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.1.52:/ exists,up 6168f2cd-de56--8fe5-c80e93f134cd
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.1.52:/ exists,up 26e54a1c-601a-4f3b-afdc-a0c5b140affc
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.1.52:/ exists,up fa366bda-3ac8---b156acffb4aa
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.1.52:/ exists,up e9a16507--465c-af80-9d371a9018ad osd. down out weight up_from up_thru down_at last_clean_interval [,) 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.1.51:/ autoout,exists c39c2030-4ad2-49b2-a2bd-d6f26d9cc2c8
osd. down out weight up_from up_thru down_at last_clean_interval [,) 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.1.51:/ autoout,exists 9fa68652-dda8-485a--92d109bc7283
osd. down out weight up_from up_thru down_at last_clean_interval [,) 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.1.51:/ autoout,exists f91dc889-379d-427a--9525deb70603
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.1.51:/ exists,up 254c1dc1-c5aa-406d-a144-408c757f6b34
osd. down out weight up_from up_thru down_at last_clean_interval [,) 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.1.51:/ autoout,exists c13c44fd-397f-465d-bc14-917e8899e2fd
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.1.51:/ exists,up c5028149-28ec-4bd4-a5fe-3d13bdb82c6a
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.1.51:/ exists,up 27c2a32e-eef3-41c9--15246fb20ac4
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.1.51:/ exists,up 4f877615-df0d-40d0-a351-a21dc518c3f4
pg_upmap_items 1.1 [,]
pg_upmap_items 1.2 [,,,]
pg_upmap_items 1.3 [,]

ceph-cluster map的更多相关文章

  1. openstack(Pike 版)集群部署(八)--- 连接Ceph Cluster 作为后端存储

    一.openstack Glance + ceph Cluster 部署: 博客:http://www.cnblogs.com/weijie0717/p/8563294.html 参考 续 部分.  ...

  2. docker-compose 快速部署Prometheus之服务端并监控ceph cluster 使用钉钉webhook 报警

    现在环境是这样: ceph 4台: 192.168.100.21  ceph-node1 192.168.100.22  ceph-node2 192.168.100.23  ceph-node3 1 ...

  3. docker-compose 快速部署Prometheus,监控docker 容器, 宿主机,ceph -- cluster集群

    话不多说上菜: 现在环境是这样: ceph 4台: 192.168.100.21  ceph-node1 192.168.100.22  ceph-node2 192.168.100.23  ceph ...

  4. 理解 OpenStack + Ceph (2):Ceph 的物理和逻辑结构 [Ceph Architecture]

    本系列文章会深入研究 Ceph 以及 Ceph 和 OpenStack 的集成: (1)安装和部署 (2)Ceph RBD 接口和工具 (3)Ceph 物理和逻辑结构 (4)Ceph 的基础数据结构 ...

  5. CEPH集群操作入门--配置

      参考文档:CEPH官网集群操作文档   概述 Ceph存储集群是所有Ceph部署的基础. 基于RADOS,Ceph存储集群由两种类型的守护进程组成:Ceph OSD守护进程(OSD)将数据作为对象 ...

  6. ceph mimic版本 部署安装

    ceph 寻址过程 1. file --- object映射, 把file分割成N个相同的对象 2. object - PG 映射, 利用静态hash得到objectID的伪随机值,在 "位 ...

  7. Ceph剖析:Leader选举

    作者:吴香伟 发表于 2014/09/11 版权声明:可以任意转载,转载时务必以超链接形式标明文章原始出处和作者信息以及版权声明 Paxos算法存在活锁问题.从节点中选出Leader,然后将所有对数据 ...

  8. 理解 OpenStack + Ceph (7): Ceph 的基本操作和常见故障排除方法

    本系列文章会深入研究 Ceph 以及 Ceph 和 OpenStack 的集成: (1)安装和部署 (2)Ceph RBD 接口和工具 (3)Ceph 物理和逻辑结构 (4)Ceph 的基础数据结构 ...

  9. 理解 QEMU/KVM 和 Ceph(1):QEMU-KVM 和 Ceph RBD 的 缓存机制总结

    本系列文章会总结 QEMU/KVM 和 Ceph 之间的整合: (1)QEMU-KVM 和 Ceph RBD 的 缓存机制总结 (2)QEMU 的 RBD 块驱动(block driver) (3)存 ...

  10. 理解 OpenStack + Ceph (3):Ceph RBD 接口和工具 [Ceph RBD API and Tools]

    本系列文章会深入研究 Ceph 以及 Ceph 和 OpenStack 的集成: (1)安装和部署 (2)Ceph RBD 接口和工具 (3)Ceph 物理和逻辑结构 (4)Ceph 的基础数据结构 ...

随机推荐

  1. System.Web.Script.Serialization的引用

    解决方案: 找到C:\Program Files\Reference Assemblies\Microsoft\Framework\v3.0 ==>System.Web.Extensions.d ...

  2. jQuery基础--CSS操作、class操作、attr操作、prop操作

    1.1.1    css操作 功能:设置或者修改样式,操作的是style属性. 设置单个样式 //name:需要设置的样式名称 //value:对应的样式值 css(name, value); //使 ...

  3. python支持的进程与线程

    一.multiprocessing模块介绍 python中的多线程无法利用CPU资源,在python中大部分情况使用多进程.python中提供了非常好的多进程包multiprocessing. mul ...

  4. luoguP1600 天天爱跑步(NOIP2016)(主席树+树链剖分)

    阅读体验: https://zybuluo.com/Junlier/note/1303550 为什么这一篇的Markdown炸了? # 天天爱跑步题解(Noip2016)(桶+树上差分 ^ 树剖+主席 ...

  5. VUE小案例--跑马灯效果

    自学Vue课程中学到的一个小案例,跑马灯效果 <!DOCTYPE html> <html lang="zh-CN"> <head> <me ...

  6. Maven Filter与Profile隔离生产环境与开发环境

    Maven Filter与Profile隔离生产环境与开发环境 在不同的开发阶段,我们一般用到不同的环境,开发阶段使用开发环境的一套东西,测试环境使用测试环境的东西,可能有多个测试环境,生产环境使用的 ...

  7. CentOS7 SSH 密码正确,但仍提示“Permission denied”

    我看其他人解决办法,有的是防火墙端口,有的是sshd_config文件的密码登录,有的ip冲突等等,我都按照那些修改了,但是都不通,最后修改了这个,好用了. 看一下SElinux,敲sestatus, ...

  8. Python 循环列表删除元素的注意事项

    错误示范: class Solution: def removeElement(self, nums, val: int) -> int: for i, num in enumerate(num ...

  9. Zen Coding – 超快地写网页代码(注:已更名为Emmet)

    这篇博客能帮助快速上手这款插件,极大地提高开发效率废话不多说直接上例子 zen codeing的缩写规则 E 元素名 (div, p); 实例:(输入完<按快捷键ctrl+E>就会显示) ...

  10. 【抓包工具之Fiddler】增加IP列;session高亮

    Fiddler 在处理每个session时,脚本文件CustomRules.js中的方法都会运行,该脚本使得你可以隐藏,标识或任意修改负责的session.规则脚本在运行状态下就可以修改并重新编译,不 ...