rbd
常用命令
1 | [root@imwl-03 ceph]# ceph osd pool create ceph-demo 64 64 # 创建 pool 池 |
去除 feature
需要从后往前去除
1 | [root@imwl-03 ceph]# rbd feature disable ceph-demo/rdb-demo.img deep-flatten |
回收站
[imwl@imwl-03 ~]$ rbd create ceph-demo/ceph-trash.img –size 10G
[imwl@imwl-03 ~]$ rbd trash move ceph-demo/ceph-trash.img –expires-at 20230330 # # RDB 放入回收站,并设置过期时间 20230330
rbd: image ceph-trash.img will expire at 2023-03-30T00:00:00.000000+0800
[imwl@imwl-03 ~]$ rbd -p ceph-demo ls
rdb-demo.img
[imwl@imwl-03 ~]$ rbd -p ceph-demo trash ls # 查看 RDB 回收站信息
53231e410ba1 ceph-trash.img
[imwl@imwl-03 ~]$ rbd trash restore -p ceph-demo 53231e410ba1 # 还原
[imwl@imwl-03 ~]$ rbd -p ceph-demo ls
ceph-trash.img
rdb-demo.img
快照
使用上面的 rdb-demo.img , 挂载到 /meida 的块设备1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44[root@imwl-03 ~]$ rbd info ceph-demo/rdb-demo.img
rbd image 'rdb-demo.img':
size 2 GiB in 512 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 13cb8cac0db3
block_name_prefix: rbd_data.13cb8cac0db3
format: 2
features: layering
op_features:
flags:
create_timestamp: Wed Mar 29 12:43:30 2023
access_timestamp: Wed Mar 29 12:43:30 2023
modify_timestamp: Wed Mar 29 12:43:30 2023
[root@imwl-03 ~]$ rbd snap create ceph-demo/rdb-demo.img@snap_20230329 # 创建快照
Creating snap: 100% complete...done.
[root@imwl-03 ~]$ rbd snap ls ceph-demo/rdb-demo.img # 查看快照
SNAPID NAME SIZE PROTECTED TIMESTAMP
4 snap_20230329 2 GiB Wed Mar 29 15:35:55 2023
[root@imwl-03 ~]# rm -rf /media/1.txt # 模拟文件删除
[root@imwl-03 ~]# ls /media/
lost+found
[root@imwl-03 ~]# rbd snap rollback ceph-demo/rdb-demo.img@snap_20230329 # 使用快照恢复
Rolling back to snapshot: 100% complete...done.
[root@imwl-03 ~]# ls /media/ # 没有恢复,需要重新挂载
lost+found
[root@imwl-03 ~]# umount /media
[root@imwl-03 ~]# mount /dev/rbd0 /media/ # 无法挂载,需要重新 map
mount: /media: can't read superblock on /dev/rbd0
[root@imwl-03 ~]# rbd unmap ceph-demo/rdb-demo.img # unmap
[root@imwl-03 ~]# rbd map ceph-demo/rdb-demo.img # map
/dev/rbd0
[root@imwl-03 ~]# mount /dev/rbd0 /media/
[root@imwl-03 ~]# ls
[root@imwl-03 ~]# cat /media/1.txt # 已经恢复
1
[root@imwl-03 ~]# rbd snap remove ceph-demo/rdb-demo.img@snap_20230329 # 删除快照
Removing snap: 100% complete...done.
[root@imwl-03 ~]# rbd snap purge ceph-demo/rdb-demo.img # 也可以这样删除所有快照
[root@imwl-03 ~]# rbd snap ls ceph-demo/rdb-demo.img
克隆
使用上面的 rdb-demo.img , 挂载到 /meida 的块设备
1 | [root@imwl-03 ~]$ rbd snap create ceph-demo/rdb-demo.img@template # 创建快照 |
解除依赖,成独立 rbd1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20[root@imwl-03 ~]# rbd children ceph-demo/rdb-demo.img@template
ceph-demo/vm1-clone.img
[root@imwl-03 ~]# rbd flatten ceph-demo/vm1-clone.img
Image flatten: 100% complete...done.
[root@imwl-03 ~]# rbd info ceph-demo/vm1-clone.img # 已经没有 parent 信息
rbd image 'vm1-clone.img':
size 2 GiB in 512 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 5824261366b9
block_name_prefix: rbd_data.5824261366b9
format: 2
features: layering
op_features:
flags:
create_timestamp: Wed Mar 29 15:56:33 2023
access_timestamp: Wed Mar 29 15:56:33 2023
modify_timestamp: Wed Mar 29 15:56:33 2023
删除快照不影响1
2
3
4
5
6
7[root@imwl-03 ~]# rbd snap unprotect ceph-demo/rdb-demo.img@template
[root@imwl-03 ~]# rbd snap rm ceph-demo/rdb-demo.img@template
Removing snap: 100% complete...done.
[root@imwl-03 ~]# rbd device ls
id pool namespace image snap device
0 ceph-demo rdb-demo.img - /dev/rbd0
1 ceph-demo vm1-clone.img - /dev/rbd1
快照导出1
2
3
4
5
6[root@imwl-03 ~]# rbd snap create ceph-demo/vm1-clone.img@template
Creating snap: 100% complete...done.
[root@imwl-03 ~]# rbd export ceph-demo/vm1-clone.img@template vm1-clone.img
Exporting image: 100% complete...done.
[root@imwl-03 ~]# ls -hl vm1-clone.img
-rw-r--r--. 1 root root 2.0G Mar 29 16:21 vm1-clone.img
导入恢复
1 | [root@imwl-03 ~]# rm -rf /mnt/vm1/* |
增量备份恢复1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37[root@imwl-03 ~]# rbd snap create ceph-demo/vm1-clone-new.img@v1
Creating snap: 100% complete...done.
[root@imwl-03 ~]# echo 2 > /mnt/vm1/2.txt
[root@imwl-03 ~]# cat /mnt/vm1/2.txt
2
[root@imwl-03 ~]# rbd snap create ceph-demo/vm1-clone-new.img@v2
Creating snap: 100% complete...done.
[root@imwl-03 ~]# rbd snap ls ceph-demo/vm1-clone-new.img
SNAPID NAME SIZE PROTECTED TIMESTAMP
11 v1 2 GiB Wed Mar 29 16:28:45 2023
12 v2 2 GiB Wed Mar 29 16:29:43 2023
# 导出
[root@imwl-03 ~]# rbd export ceph-demo/vm1-clone-new.img@v1 vm1-clone-new-2.img # 全量导出
Exporting image: 100% complete...done.
[root@imwl-03 ~]# rbd export-diff ceph-demo/vm1-clone-new.img@v2 vm1-clone-new-2.img@v2 # 增量导出
Exporting image: 100% complete...done.
# 导入
[root@imwl-03 ~]# rbd import vm1-clone-new-2.img ceph-demo/vm1-clone-new2.img
Importing image: 100% complete...done.
[root@imwl-03 ~]# rbd import-diff vm1-clone-new-2.img@v2 ceph-demo/vm1-clone-new2.img
Importing image diff: 100% complete...done.
# 挂载查看
[root@imwl-03 ~]# rbd device map ceph-demo/vm1-clone-new2.img
/dev/rbd3
[root@imwl-03 ~]# mkdir /mnt/vm1new
[root@imwl-03 ~]# mount /dev/rbd3 /mnt/vm1new
[root@imwl-03 ~]# ls -l /mnt/vm1new
total 24
-rw-r--r--. 1 root root 2 Mar 29 13:56 1.txt
-rw-r--r--. 1 root root 2 Mar 29 16:29 2.txt
drwx------. 2 root root 16384 Mar 29 12:47 lost+found
[root@imwl-03 ~]# cat /mnt/vm1new/2.txt
2
fs
需要先创建文件系统, rook-ceph 已经创建了 myfs
1 | [root@imwl-03 ~]# ceph fs ls |
内核挂载1
2
3
4[root@imwl-03 ~]# mount -t ceph 192.168.2.132:6789,192.168.2.131:6789,192.168.2.133:6789:/ /mnt/ceph-kernel -o name=admin,secret=AQBsvyNk2aDQORAAOA2MdKFKUP1Y8sRQBxTJ3A==
[root@imwl-03 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
192.168.2.132:6789,192.168.2.131:6789,192.168.2.133:6789:/ 190G 0 190G 0% /mnt/ceph-kernel
用户态挂载, 需要安装 ceph-fuse
1 | [root@imwl-03 ~]# ceph-fuse -o rw -n client.admin -m 192.168.2.132:6789,192.168.2.131:6789,192.168.2.133:6789 /mnt/ceph-user |
验证, 两个目录内容是一样的1
2
3[root@imwl-03 ~]# echo hello > /mnt/ceph-kernel/hello.txt
[root@imwl-03 ~]# cat /mnt/ceph-user/hello.txt
hello
快照
不推荐使用1
2
3
4
5
6
7
8
9
10
11
12
13
14
15# ceph fs set myfs allow_new_snaps 1
enabled new snapshots
# mkdir .snap/snap1 # 创建快照,会拷贝所有文件到此目录
# ls -l .snap/snap1
total 1
-rw-r--r--. 1 root root 2 Mar 29 22:24 3.txt
# rm -rf 3.txt
# ls
# cp -ra .snap/snap1/* ./ # 恢复快照
# ls -l
total 1
-rw-r--r--. 1 root root 2 Mar 29 22:24 3.txt
# rmdir .snap/snap1 # 删除快照
对象存储
参考 rook-ceph 中的过程,是差不多的
处理告警
对于已经处理的告警,可以归档1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26[root@imwl-03 ~]# ceph -s
cluster:
id: ab47cbe5-a51e-4f93-8c53-28e426ee9543
health: HEALTH_WARN
1 mgr modules have recently crashed
services:
mon: 3 daemons, quorum a,b,c (age 4h)
mgr: a(active, since 4h), standbys: b
mds: 1/1 daemons up, 1 hot standby
osd: 6 osds: 6 up (since 4h), 6 in (since 4h)
data:
volumes: 1/1 healthy
pools: 4 pools, 113 pgs
objects: 658 objects, 2.2 GiB
usage: 4.0 GiB used, 596 GiB / 600 GiB avail
pgs: 113 active+clean
io:
client: 1.2 KiB/s rd, 2 op/s rd, 0 op/s wr
[root@imwl-03 ~]# ceph crash ls-new
ID ENTITY NEW
2023-03-29T04:33:49.298550Z_31fd79c2-e230-4ee1-b340-e9a7920a6e38 mgr.b *
[root@imwl-03 ~]# ceph crash archive-all
删除 osd
1 | ceph osd tree |