rook-ceph之ceph原始使用方式

rbd

常用命令

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
[root@imwl-03 ceph]# ceph osd pool create ceph-demo 64 64  # 创建 pool 池
pool 'ceph-demo' created

[root@imwl-03 ceph]# rbd create ceph-demo/rdb-demo.img --size 1G # 在 pool 池中创建 rbd
[root@imwl-03 ceph]# rbd create -p ceph-demo --image rdb-demo.img --size 1G

[root@imwl-03 ceph]# rbd -p ceph-demo ls # 查看 RDB
rdb-demo.img

[root@imwl-03 ceph]# rbd remove ceph-demo/rdb-demo1.img # 移除 rbd
Removing image: 100% complete...done.

[root@imwl-03 ceph]# rbd info ceph-demo/rdb-demo.img # 查看 RDB 信息
rbd image 'rdb-demo.img':
size 1 GiB in 256 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 13cb8cac0db3
block_name_prefix: rbd_data.13cb8cac0db3
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten # 很多特性新内核不支持,一般只保留 layering
op_features:
flags:
create_timestamp: Wed Mar 29 12:43:30 2023
access_timestamp: Wed Mar 29 12:43:30 2023
modify_timestamp: Wed Mar 29 12:43:30 2023

[root@imwl-03 ceph]# rbd map ceph-demo/rdb-demo.img # 挂载 RDB
/dev/rbd0

[root@imwl-03 ceph]# rbd device list # 查看 块设备列表
id pool namespace image snap device
0 ceph-demo rdb-demo.img - /dev/rbd0

[root@imwl-03 ceph]# mkfs.ext4 /dev/rbd0 # 格式化块设备
mke2fs 1.46.5 (30-Dec-2021)
Discarding device blocks: done
Creating filesystem with 262144 4k blocks and 65536 inodes
Filesystem UUID: 4affc63b-caad-44dc-a148-04af7a68bdcd
Superblock backups stored on blocks:
32768, 98304, 163840, 229376

Allocating group tables: done
Writing inode tables: done
Creating journal (8192 blocks): done
Writing superblocks and filesystem accounting information: done

[root@imwl-03 ceph]# mount /dev/rbd0 /media/ # 挂载块设备
[root@imwl-03 ceph]# ls /media/
lost+found
[root@imwl-03 ceph]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 4.0M 0 4.0M 0% /dev
tmpfs 7.7G 0 7.7G 0% /dev/shm
tmpfs 3.1G 15M 3.1G 1% /run
/dev/mapper/cs_192-root 62G 4.7G 57G 8% /
/dev/sdb1 1014M 291M 724M 29% /boot
/dev/mapper/cs_192-home 30G 251M 30G 1% /home
tmpfs 1.6G 108K 1.6G 1% /run/user/1000
/dev/sr1 8.9G 8.9G 0 100% /run/media/imwl/CentOS-Stream-9-BaseOS-x86_64
tmpfs 1.6G 36K 1.6G 1% /run/user/0
/dev/rbd0 974M 24K 907M 1% /media
[root@imwl-03 ceph]# echo 1 > /media/1.txt
[root@imwl-03 ceph]# cat /media/1.txt
1

[root@imwl-03 ceph]# rbd resize ceph-demo/rdb-demo.img --size 2G # RDB 块存储扩容(只支持扩容)
Resizing image: 100% complete...done.

[root@imwl-03 ceph]# rbd info ceph-demo/rdb-demo.img # 查看信息,块设备扩容成功
rbd image 'rdb-demo.img':
size 2 GiB in 512 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 13cb8cac0db3
block_name_prefix: rbd_data.13cb8cac0db3
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Wed Mar 29 12:43:30 2023
access_timestamp: Wed Mar 29 12:43:30 2023
modify_timestamp: Wed Mar 29 12:43:30 2023

[root@imwl-03 ceph]# df -h /media # 文件系统 未扩容
Filesystem Size Used Avail Use% Mounted on
/dev/rbd0 974M 28K 907M 1% /media

[root@imwl-03 ceph]# resize2fs /dev/rbd0 # 扩容文件系统
resize2fs 1.46.5 (30-Dec-2021)
Filesystem at /dev/rbd0 is mounted on /media; on-line resizing required
old_desc_blocks = 1, new_desc_blocks = 1
The filesystem on /dev/rbd0 is now 524288 (4k) blocks long.

[root@imwl-03 ceph]# df -h /media # 文件系统扩容成功
Filesystem Size Used Avail Use% Mounted on
/dev/rbd0 2.0G 28K 1.9G 1% /media

[root@imwl-03 ceph]# cat /media/1.txt # 文件未损坏
1

[root@imwl-03 ceph]# rados -p ceph-demo ls |grep rbd_data.13cb8cac0db3 # RDB 对应的 object
rbd_data.13cb8cac0db3.0000000000000120
rbd_data.13cb8cac0db3.0000000000000080
rbd_data.13cb8cac0db3.0000000000000100
rbd_data.13cb8cac0db3.0000000000000060
rbd_data.13cb8cac0db3.0000000000000020
rbd_data.13cb8cac0db3.00000000000000e0
rbd_data.13cb8cac0db3.0000000000000004
rbd_data.13cb8cac0db3.00000000000000a0
rbd_data.13cb8cac0db3.0000000000000000

[root@imwl-03 ceph]# rados -p ceph-demo stat rbd_data.13cb8cac0db3.0000000000000120 # object 信息
ceph-demo/rbd_data.13cb8cac0db3.0000000000000120 mtime 2023-03-29T13:57:36.000000+0800, size 8192

[root@imwl-03 ceph]# ceph osd map ceph-demo rbd_data.13cb8cac0db3.0000000000000120 # object 对应的 pg 和 osd
osdmap e69 pool 'ceph-demo' (4) object 'rbd_data.13cb8cac0db3.0000000000000120' -> pg 4.26c148c4 (4.4) -> up ([5,1,3], p5) acting ([5,1,3], p5)

[root@imwl-03 ceph]# ceph osd tree # osd 分布情况
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.58612 root default
-5 0.19537 host 192-168-2-131
0 hdd 0.09769 osd.0 up 1.00000 1.00000
3 hdd 0.09769 osd.3 up 1.00000 1.00000
-7 0.19537 host 192-168-2-132
1 hdd 0.09769 osd.1 up 1.00000 1.00000
4 hdd 0.09769 osd.4 up 1.00000 1.00000
-3 0.19537 host 192-168-2-133
2 hdd 0.09769 osd.2 up 1.00000 1.00000
5 hdd 0.09769 osd.5 up 1.00000 1.00000

[root@imwl-03 ceph]# ceph -s # 集群状态
cluster:
id: ab47cbe5-a51e-4f93-8c53-28e426ee9543
health: HEALTH_WARN
1 pool(s) do not have an application enabled

services:
mon: 3 daemons, quorum a,b,c (age 88m)
mgr: a(active, since 87m), standbys: b
mds: 1/1 daemons up, 1 hot standby
osd: 6 osds: 6 up (since 84m), 6 in (since 85m)

data:
volumes: 1/1 healthy
pools: 4 pools, 113 pgs
objects: 39 objects, 8.4 MiB
usage: 140 MiB used, 600 GiB / 600 GiB avail
pgs: 113 active+clean

io:
client: 1.2 KiB/s rd, 2 op/s rd, 0 op/s wr

[root@imwl-03 ceph]# ceph health detail # 集群健康详细
HEALTH_WARN 1 pool(s) do not have an application enabled; 1 mgr modules have recently crashed
[WRN] POOL_APP_NOT_ENABLED: 1 pool(s) do not have an application enabled
application not enabled on pool 'ceph-demo'
use 'ceph osd pool application enable <pool-name> <app-name>', where <app-name> is 'cephfs', 'rbd', 'rgw', or freeform for custom applications.

[root@imwl-03 ceph]# ceph osd pool application get ceph-demo # 查看当前 RDB 的 application 定义
{}

[root@imwl-03 ceph]# ceph osd pool application enable ceph-demo rbd # 设置 资源池的的类型,方便管理
enabled application 'rbd' on pool 'ceph-demo'

[root@imwl-03 ceph]# ceph osd pool application get ceph-demo
{
"rbd": {}
}

[root@imwl-03 ceph]# ceph -s # 集群恢复健康
cluster:
id: ab47cbe5-a51e-4f93-8c53-28e426ee9543
health: HEALTH_OK

services:
mon: 3 daemons, quorum a,b,c (age 89m)
mgr: a(active, since 88m), standbys: b
mds: 1/1 daemons up, 1 hot standby
osd: 6 osds: 6 up (since 86m), 6 in (since 86m)

data:
volumes: 1/1 healthy
pools: 4 pools, 113 pgs
objects: 39 objects, 8.4 MiB
usage: 141 MiB used, 600 GiB / 600 GiB avail
pgs: 113 active+clean

io:
client: 853 B/s rd, 1 op/s rd, 0 op/s wr

去除 feature

需要从后往前去除

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
[root@imwl-03 ceph]# rbd feature disable ceph-demo/rdb-demo.img deep-flatten
[root@imwl-03 ceph]# rbd feature disable ceph-demo/rdb-demo.img fast-diff
[root@imwl-03 ceph]# rbd feature disable ceph-demo/rdb-demo.img object-map
rbd: failed to update image features: (22) Invalid argument
2023-03-29T14:41:39.003+0800 7f613642da00 -1 librbd::Operations: one or more requested features are already disabled
[root@imwl-03 ceph]# rbd info ceph-demo/rdb-demo.img
rbd image 'rdb-demo.img':
size 2 GiB in 512 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 13cb8cac0db3
block_name_prefix: rbd_data.13cb8cac0db3
format: 2
features: layering, exclusive-lock
op_features:
flags:
create_timestamp: Wed Mar 29 12:43:30 2023
access_timestamp: Wed Mar 29 12:43:30 2023
modify_timestamp: Wed Mar 29 12:43:30 2023
[root@imwl-03 ceph]# rbd feature disable ceph-demo/rdb-demo.img exclusive-lock
[root@imwl-03 ceph]# rbd info ceph-demo/rdb-demo.img
rbd image 'rdb-demo.img':
size 2 GiB in 512 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 13cb8cac0db3
block_name_prefix: rbd_data.13cb8cac0db3
format: 2
features: layering
op_features:
flags:
create_timestamp: Wed Mar 29 12:43:30 2023
access_timestamp: Wed Mar 29 12:43:30 2023
modify_timestamp: Wed Mar 29 12:43:30 2023

回收站

[imwl@imwl-03 ~]$ rbd create ceph-demo/ceph-trash.img –size 10G

[imwl@imwl-03 ~]$ rbd trash move ceph-demo/ceph-trash.img –expires-at 20230330 # # RDB 放入回收站,并设置过期时间 20230330
rbd: image ceph-trash.img will expire at 2023-03-30T00:00:00.000000+0800

[imwl@imwl-03 ~]$ rbd -p ceph-demo ls
rdb-demo.img

[imwl@imwl-03 ~]$ rbd -p ceph-demo trash ls # 查看 RDB 回收站信息
53231e410ba1 ceph-trash.img

[imwl@imwl-03 ~]$ rbd trash restore -p ceph-demo 53231e410ba1 # 还原
[imwl@imwl-03 ~]$ rbd -p ceph-demo ls
ceph-trash.img
rdb-demo.img

快照

使用上面的 rdb-demo.img , 挂载到 /meida 的块设备

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
[root@imwl-03 ~]$ rbd info ceph-demo/rdb-demo.img
rbd image 'rdb-demo.img':
size 2 GiB in 512 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 13cb8cac0db3
block_name_prefix: rbd_data.13cb8cac0db3
format: 2
features: layering
op_features:
flags:
create_timestamp: Wed Mar 29 12:43:30 2023
access_timestamp: Wed Mar 29 12:43:30 2023
modify_timestamp: Wed Mar 29 12:43:30 2023
[root@imwl-03 ~]$ rbd snap create ceph-demo/rdb-demo.img@snap_20230329 # 创建快照
Creating snap: 100% complete...done.

[root@imwl-03 ~]$ rbd snap ls ceph-demo/rdb-demo.img # 查看快照
SNAPID NAME SIZE PROTECTED TIMESTAMP
4 snap_20230329 2 GiB Wed Mar 29 15:35:55 2023

[root@imwl-03 ~]# rm -rf /media/1.txt # 模拟文件删除
[root@imwl-03 ~]# ls /media/
lost+found
[root@imwl-03 ~]# rbd snap rollback ceph-demo/rdb-demo.img@snap_20230329 # 使用快照恢复
Rolling back to snapshot: 100% complete...done.
[root@imwl-03 ~]# ls /media/ # 没有恢复,需要重新挂载
lost+found
[root@imwl-03 ~]# umount /media
[root@imwl-03 ~]# mount /dev/rbd0 /media/ # 无法挂载,需要重新 map
mount: /media: can't read superblock on /dev/rbd0

[root@imwl-03 ~]# rbd unmap ceph-demo/rdb-demo.img # unmap
[root@imwl-03 ~]# rbd map ceph-demo/rdb-demo.img # map
/dev/rbd0
[root@imwl-03 ~]# mount /dev/rbd0 /media/
[root@imwl-03 ~]# ls
[root@imwl-03 ~]# cat /media/1.txt # 已经恢复
1

[root@imwl-03 ~]# rbd snap remove ceph-demo/rdb-demo.img@snap_20230329 # 删除快照
Removing snap: 100% complete...done.
[root@imwl-03 ~]# rbd snap purge ceph-demo/rdb-demo.img # 也可以这样删除所有快照
[root@imwl-03 ~]# rbd snap ls ceph-demo/rdb-demo.img

克隆

使用上面的 rdb-demo.img , 挂载到 /meida 的块设备

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
[root@imwl-03 ~]$ rbd snap create ceph-demo/rdb-demo.img@template # 创建快照
Creating snap: 100% complete...done.

[root@imwl-03 ~]# rbd clone ceph-demo/rdb-demo.img@template ceph-demo/vm1-clone.img # 快照需要被保护才能克隆
2023-03-29T15:55:55.341+0800 7f3c9964b640 -1 librbd::image::CloneRequest: 0x5650e49dbf90 validate_parent: parent snapshot must be protected
rbd: clone error: (22) Invalid argument

[root@imwl-03 ~]# rbd snap protect ceph-demo/rdb-demo.img@template # 保护快照,无法被删除
[root@imwl-03 ~]# rbd snap rm ceph-demo/rdb-demo.img@template
Removing snap: 0% complete...failed.
2023-03-29T15:54:33.329+0800 7f749ab24a00 -1 librbd::Operations: snapshot is protected


[root@imwl-03 ~]# rbd clone ceph-demo/rdb-demo.img@template ceph-demo/vm1-clone.img # 克隆
[root@imwl-03 ~]# rbd -p ceph-demo ls
ceph-trash.img
rdb-demo.img
vm1-clone.img

[root@imwl-03 ~]# rbd -p ceph-demo info vm1-clone.img
rbd image 'vm1-clone.img':
size 2 GiB in 512 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 5824261366b9
block_name_prefix: rbd_data.5824261366b9
format: 2
features: layering
op_features:
flags:
create_timestamp: Wed Mar 29 15:56:33 2023
access_timestamp: Wed Mar 29 15:56:33 2023
modify_timestamp: Wed Mar 29 15:56:33 2023
parent: ceph-demo/rdb-demo.img@template # 依赖
overlap: 2 GiB
# 数据信息还存在
[root@imwl-03 ~]# rbd device map ceph-demo/vm1-clone.img
/dev/rbd1
[root@imwl-03 ~]# mkdir /mnt/vm1
[root@imwl-03 ~]# mount /dev/rbd1 /mnt/vm1
[root@imwl-03 ~]# cat /mnt/vm1/1.txt
1

解除依赖,成独立 rbd

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
[root@imwl-03 ~]# rbd children ceph-demo/rdb-demo.img@template
ceph-demo/vm1-clone.img

[root@imwl-03 ~]# rbd flatten ceph-demo/vm1-clone.img
Image flatten: 100% complete...done.

[root@imwl-03 ~]# rbd info ceph-demo/vm1-clone.img # 已经没有 parent 信息
rbd image 'vm1-clone.img':
size 2 GiB in 512 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 5824261366b9
block_name_prefix: rbd_data.5824261366b9
format: 2
features: layering
op_features:
flags:
create_timestamp: Wed Mar 29 15:56:33 2023
access_timestamp: Wed Mar 29 15:56:33 2023
modify_timestamp: Wed Mar 29 15:56:33 2023

删除快照不影响

1
2
3
4
5
6
7
[root@imwl-03 ~]# rbd snap unprotect ceph-demo/rdb-demo.img@template
[root@imwl-03 ~]# rbd snap rm ceph-demo/rdb-demo.img@template
Removing snap: 100% complete...done.
[root@imwl-03 ~]# rbd device ls
id pool namespace image snap device
0 ceph-demo rdb-demo.img - /dev/rbd0
1 ceph-demo vm1-clone.img - /dev/rbd1

快照导出

1
2
3
4
5
6
[root@imwl-03 ~]# rbd snap create  ceph-demo/vm1-clone.img@template
Creating snap: 100% complete...done.
[root@imwl-03 ~]# rbd export ceph-demo/vm1-clone.img@template vm1-clone.img
Exporting image: 100% complete...done.
[root@imwl-03 ~]# ls -hl vm1-clone.img
-rw-r--r--. 1 root root 2.0G Mar 29 16:21 vm1-clone.img

导入恢复

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
[root@imwl-03 ~]# rm -rf /mnt/vm1/*
[root@imwl-03 ~]# umount /mnt/vm1

[root@imwl-03 ~]# ls -l
-rw-r--r--. 1 root root 2147483648 Mar 29 16:21 vm1-clone.img

[root@imwl-03 ~]# rbd import vm1-clone.img ceph-demo/vm1-clone-new.img # 导入
Importing image: 100% complete...done.

[root@imwl-03 ~]# rbd -p ceph-demo ls
ceph-trash.img
rdb-demo.img
vm1-clone-new.img
vm1-clone.img


[root@imwl-03 ~]# rbd device map ceph-demo/vm1-clone-new.img
/dev/rbd2

[root@imwl-03 ~]# mount /dev/rbd2 /mnt/vm1/ # 数据恢复
[root@imwl-03 ~]# ls -l /mnt/vm1/
total 20
-rw-r--r--. 1 root root 2 Mar 29 13:56 1.txt
drwx------. 2 root root 16384 Mar 29 12:47 lost+found
[root@imwl-03 ~]# cat /mnt/vm1/1.txt
1

增量备份恢复

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
[root@imwl-03 ~]# rbd snap create ceph-demo/vm1-clone-new.img@v1
Creating snap: 100% complete...done.
[root@imwl-03 ~]# echo 2 > /mnt/vm1/2.txt
[root@imwl-03 ~]# cat /mnt/vm1/2.txt
2
[root@imwl-03 ~]# rbd snap create ceph-demo/vm1-clone-new.img@v2
Creating snap: 100% complete...done.

[root@imwl-03 ~]# rbd snap ls ceph-demo/vm1-clone-new.img
SNAPID NAME SIZE PROTECTED TIMESTAMP
11 v1 2 GiB Wed Mar 29 16:28:45 2023
12 v2 2 GiB Wed Mar 29 16:29:43 2023

# 导出
[root@imwl-03 ~]# rbd export ceph-demo/vm1-clone-new.img@v1 vm1-clone-new-2.img # 全量导出
Exporting image: 100% complete...done.
[root@imwl-03 ~]# rbd export-diff ceph-demo/vm1-clone-new.img@v2 vm1-clone-new-2.img@v2 # 增量导出
Exporting image: 100% complete...done.

# 导入
[root@imwl-03 ~]# rbd import vm1-clone-new-2.img ceph-demo/vm1-clone-new2.img
Importing image: 100% complete...done.
[root@imwl-03 ~]# rbd import-diff vm1-clone-new-2.img@v2 ceph-demo/vm1-clone-new2.img
Importing image diff: 100% complete...done.

# 挂载查看
[root@imwl-03 ~]# rbd device map ceph-demo/vm1-clone-new2.img
/dev/rbd3
[root@imwl-03 ~]# mkdir /mnt/vm1new
[root@imwl-03 ~]# mount /dev/rbd3 /mnt/vm1new
[root@imwl-03 ~]# ls -l /mnt/vm1new
total 24
-rw-r--r--. 1 root root 2 Mar 29 13:56 1.txt
-rw-r--r--. 1 root root 2 Mar 29 16:29 2.txt
drwx------. 2 root root 16384 Mar 29 12:47 lost+found
[root@imwl-03 ~]# cat /mnt/vm1new/2.txt
2

fs

需要先创建文件系统, rook-ceph 已经创建了 myfs

1
2
[root@imwl-03 ~]# ceph fs ls
name: myfs, metadata pool: myfs-metadata, data pools: [myfs-data0 ]

内核挂载

1
2
3
4
[root@imwl-03 ~]# mount -t ceph 192.168.2.132:6789,192.168.2.131:6789,192.168.2.133:6789:/ /mnt/ceph-kernel -o name=admin,secret=AQBsvyNk2aDQORAAOA2MdKFKUP1Y8sRQBxTJ3A==
[root@imwl-03 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
192.168.2.132:6789,192.168.2.131:6789,192.168.2.133:6789:/ 190G 0 190G 0% /mnt/ceph-kernel

用户态挂载, 需要安装 ceph-fuse

1
2
3
4
5
6
7
[root@imwl-03 ~]# ceph-fuse -o rw   -n client.admin -m  192.168.2.132:6789,192.168.2.131:6789,192.168.2.133:6789 /mnt/ceph-user
2023-03-29T14:17:34.968+0800 7f01aa0b0180 -1 init, newargv = 0x55fe75940d00 newargc=17
ceph-fuse[1102634]: starting ceph client
ceph-fuse[1102634]: starting fuse
[root@imwl-03 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
ceph-fuse 190G 0 190G 0% /mnt/ceph-user

验证, 两个目录内容是一样的

1
2
3
[root@imwl-03 ~]# echo hello > /mnt/ceph-kernel/hello.txt
[root@imwl-03 ~]# cat /mnt/ceph-user/hello.txt
hello

快照

不推荐使用

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# ceph fs set myfs  allow_new_snaps 1
enabled new snapshots

# mkdir .snap/snap1 # 创建快照,会拷贝所有文件到此目录

# ls -l .snap/snap1
total 1
-rw-r--r--. 1 root root 2 Mar 29 22:24 3.txt
# rm -rf 3.txt
# ls
# cp -ra .snap/snap1/* ./ # 恢复快照
# ls -l
total 1
-rw-r--r--. 1 root root 2 Mar 29 22:24 3.txt
# rmdir .snap/snap1 # 删除快照

对象存储

参考 rook-ceph 中的过程,是差不多的

处理告警

对于已经处理的告警,可以归档

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
[root@imwl-03 ~]# ceph -s
cluster:
id: ab47cbe5-a51e-4f93-8c53-28e426ee9543
health: HEALTH_WARN
1 mgr modules have recently crashed

services:
mon: 3 daemons, quorum a,b,c (age 4h)
mgr: a(active, since 4h), standbys: b
mds: 1/1 daemons up, 1 hot standby
osd: 6 osds: 6 up (since 4h), 6 in (since 4h)

data:
volumes: 1/1 healthy
pools: 4 pools, 113 pgs
objects: 658 objects, 2.2 GiB
usage: 4.0 GiB used, 596 GiB / 600 GiB avail
pgs: 113 active+clean

io:
client: 1.2 KiB/s rd, 2 op/s rd, 0 op/s wr

[root@imwl-03 ~]# ceph crash ls-new
ID ENTITY NEW
2023-03-29T04:33:49.298550Z_31fd79c2-e230-4ee1-b340-e9a7920a6e38 mgr.b *
[root@imwl-03 ~]# ceph crash archive-all

删除 osd

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
ceph osd tree
ceph osd crush reweight osd.2 0.0
kubectl delete deploy -n rook-ceph rook-ceph-osd-2
ceph osd out 2
ceph osd crush remove osd.2

ceph auth del osd.2
ceph osd rm 2

ceph -s # 数据同步完则可以换盘

# 清除数据
DISK="/dev/vdc"
sgdisk --zap-all $DISK
dd if=/dev/zero of="$DISK" bs=1M count=100 oflag=direct,dsync