rook-ceph-tools使用

rook-ceph-tools

操作系统上使用 ceph 命令需要额外安装 ceph 的包,对于某些操作系统还需要编译安装。 可以使用 rook-ceph-tools 去操作 ceph

默认配置文件 https://github.com/rook/rook/blob/master/deploy/examples/toolbox.yaml

一般需要修改,修改内容.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33

kind: DaemonSet # 可以改成这样,每个节点都运行,也可以不改指定 node操作node
...
image: imwl/ceph:v15.2.6 # 修改镜像
...

securityContext: # root 权限,才能挂载设备
privileged: true
runAsUser: 0

...


volumeMounts: # 挂载这些使用 rbd 等设备以及 mount 等命令
- mountPath: /dev
name: dev
- mountPath: /sys/bus
name: sysbus
- mountPath: /lib/modules
name: libmodules

# if hostNetwork: false, the "rbd map" command hangs, see https://github.com/rook/rook/issues/2021
hostNetwork: true
volumes:
- name: dev
hostPath:
path: /dev
- name: sysbus
hostPath:
path: /sys/bus
- name: libmodules
hostPath:
path: /lib/modules

改完后文件示例

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: rook-ceph-tools
namespace: rook-ceph # namespace:cluster
labels:
app: rook-ceph-tools
spec:
selector:
matchLabels:
app: rook-ceph-tools
template:
metadata:
labels:
app: rook-ceph-tools
spec:
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: rook-ceph-tools
image: imwl/ceph:v17.2.6
command:
- /bin/bash
- -c
- |
# Replicate the script from toolbox.sh inline so the ceph image
# can be run directly, instead of requiring the rook toolbox
CEPH_CONFIG="/etc/ceph/ceph.conf"
MON_CONFIG="/etc/rook/mon-endpoints"
KEYRING_FILE="/etc/ceph/keyring"

# create a ceph config file in its default location so ceph/rados tools can be used
# without specifying any arguments
write_endpoints() {
endpoints=$(cat ${MON_CONFIG})

# filter out the mon names
# external cluster can have numbers or hyphens in mon names, handling them in regex
# shellcheck disable=SC2001
mon_endpoints=$(echo "${endpoints}"| sed 's/[a-z0-9_-]\+=//g')

DATE=$(date)
echo "$DATE writing mon endpoints to ${CEPH_CONFIG}: ${endpoints}"
cat <<EOF > ${CEPH_CONFIG}
[global]
mon_host = ${mon_endpoints}

[client.admin]
keyring = ${KEYRING_FILE}
EOF
}

# watch the endpoints config file and update if the mon endpoints ever change
watch_endpoints() {
# get the timestamp for the target of the soft link
real_path=$(realpath ${MON_CONFIG})
initial_time=$(stat -c %Z "${real_path}")
while true; do
real_path=$(realpath ${MON_CONFIG})
latest_time=$(stat -c %Z "${real_path}")

if [[ "${latest_time}" != "${initial_time}" ]]; then
write_endpoints
initial_time=${latest_time}
fi

sleep 10
done
}

# read the secret from an env var (for backward compatibility), or from the secret file
ceph_secret=${ROOK_CEPH_SECRET}
if [[ "$ceph_secret" == "" ]]; then
ceph_secret=$(cat /var/lib/rook-ceph-mon/secret.keyring)
fi

# create the keyring file
cat <<EOF > ${KEYRING_FILE}
[${ROOK_CEPH_USERNAME}]
key = ${ceph_secret}
EOF

# write the initial config file
write_endpoints

# continuously update the mon endpoints if they fail over
watch_endpoints
imagePullPolicy: IfNotPresent
tty: true
securityContext:
privileged: true
readOnlyRootFilesystem: false
runAsUser: 0
runAsGroup: 0
env:
- name: ROOK_CEPH_USERNAME
valueFrom:
secretKeyRef:
name: rook-ceph-mon
key: ceph-username
volumeMounts:
- mountPath: /etc/ceph
name: ceph-config
- name: mon-endpoint-volume
mountPath: /etc/rook
- name: ceph-admin-secret
mountPath: /var/lib/rook-ceph-mon
readOnly: true
- mountPath: /dev
name: dev
- mountPath: /sys/bus
name: sysbus
- mountPath: /lib/modules
name: libmodules
volumes:
- name: ceph-admin-secret
secret:
secretName: rook-ceph-mon
optional: false
items:
- key: ceph-secret
path: secret.keyring
- name: mon-endpoint-volume
configMap:
name: rook-ceph-mon-endpoints
items:
- key: data
path: mon-endpoints
- name: ceph-config
emptyDir: {}
- name: dev
hostPath:
path: /dev
- name: sysbus
hostPath:
path: /sys/bus
- name: libmodules
hostPath:
path: /lib/modules
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationSeconds: 5

使用

进入容器查看状态

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
[root@test-61 ~]# kubectl exec -it -n rook-ceph rook-ceph-tools-84d9889d64-wlm6x -- bash
[root@test-62 /]# ceph -s
cluster:
id: 546a216f-2c8e-4a9d-acf4-3041857a127a
health: HEALTH_OK

services:
mon: 3 daemons, quorum a,b,c (age 2w)
mgr: a(active, since 4h), standbys: b
mds: 1/1 daemons up, 1 hot standby
osd: 3 osds: 3 up (since 11d), 3 in (since 2w)

data:
volumes: 1/1 healthy
pools: 4 pools, 81 pgs
objects: 1.68k objects, 5.0 GiB
usage: 18 GiB used, 882 GiB / 900 GiB avail
pgs: 81 active+clean

io:
client: 1.2 KiB/s rd, 2.0 KiB/s wr, 2 op/s rd, 0 op/s wr

使用 rbd

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
[root@test-62 /]# rbd create replicapool/test --size 10
[root@test-62 /]# rbd info replicapool/test
rbd image 'test':
size 10 MiB in 3 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 249d8df433415b
block_name_prefix: rbd_data.249d8df433415b
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Mon Aug 14 02:36:22 2023
access_timestamp: Mon Aug 14 02:36:22 2023
modify_timestamp: Mon Aug 14 02:36:22 2023
[root@test-62 /]# rbd feature disable replicapool/test fast-diff deep-flatten object-map exclusive-lock
[root@test-62 /]# rbd map replicapool/test
/dev/rbd2
[root@test-62 /]# lsblk | grep rbd
rbd0 251:0 0 8G 0 disk
rbd1 251:16 0 8G 0 disk
rbd2 251:32 0 10M 0 disk
[root@test-62 /]# mkfs.ext4 -m0 /dev/rbd2 # 初次使用需要 格式化,使用 之前的 eg rbd0 不要这个操作
mke2fs 1.45.6 (20-Mar-2020)
Suggestion: Use Linux kernel >= 3.18 for improved stability of the metadata and journal checksum features.
Discarding device blocks: done
Creating filesystem with 10240 1k blocks and 2560 inodes
Filesystem UUID: 3e880522-712f-4a5e-ae2e-becc940ee973
Superblock backups stored on blocks:
8193

Allocating group tables: done
Writing inode tables: done
Creating journal (1024 blocks): done
Writing superblocks and filesystem accounting information: done

[root@test-62 /]# mkdir /tmp/rook-volume
[root@test-62 /]# mount /dev/rbd2 /tmp/rook-volume
[root@test-62 /]# df -h
Filesystem Size Used Avail Use% Mounted on
overlay 500G 47G 453G 10% /
tmpfs 118G 0 118G 0% /sys/fs/cgroup
devtmpfs 118G 0 118G 0% /dev
shm 64M 0 64M 0% /dev/shm
/dev/mapper/centos_172--20--50--245-root 295G 58G 238G 20% /usr/lib/modules
/dev/mapper/containerd-testpool 500G 47G 453G 10% /etc/hostname
tmpfs 235G 4.0K 235G 1% /var/lib/rook-ceph-mon
tmpfs 235G 12K 235G 1% /run/secrets/kubernetes.io/serviceaccount
/dev/rbd2 8.7M 172K 8.4M 2% /tmp/rook-volume
[root@test-62 /]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 11:0 1 1024M 0 rom
rbd0 251:0 0 8G 0 disk
rbd1 251:16 0 8G 0 disk
rbd2 251:32 0 10M 0 disk /tmp/rook-volume
vda 252:0 0 300G 0 disk
|-vda1 252:1 0 1G 0 part
`-vda2 252:2 0 299G 0 part
|-centos_172--20--50--245-root 253:0 0 295G 0 lvm /dev/termination-log
`-centos_172--20--50--245-swap 253:1 0 4G 0 lvm
vdb 252:16 0 100G 0 disk
vdc 252:32 0 500G 0 disk
`-containerd-testpool 253:4 0 500G 0 lvm /etc/resolv.conf
vdd 252:48 0 1T 0 disk
`-hadoop-testpool 253:3 0 1024G 0 lvm
vde 252:64 0 300G 0 disk
vdf 252:80 0 300G 0 disk

卸载 rbd

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
[root@test-62 /]# umount /tmp/rook-volume
[root@test-62 /]# rbd unmap /dev/rbd2
[root@test-62 /]# df -h
Filesystem Size Used Avail Use% Mounted on
overlay 500G 47G 453G 10% /
tmpfs 118G 0 118G 0% /sys/fs/cgroup
devtmpfs 118G 0 118G 0% /dev
shm 64M 0 64M 0% /dev/shm
/dev/mapper/centos_172--20--50--245-root 295G 58G 238G 20% /usr/lib/modules
/dev/mapper/containerd-testpool 500G 47G 453G 10% /etc/hostname
tmpfs 235G 4.0K 235G 1% /var/lib/rook-ceph-mon
tmpfs 235G 12K 235G 1% /run/secrets/kubernetes.io/serviceaccount
[root@test-62 /]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 11:0 1 1024M 0 rom
rbd0 251:0 0 8G 0 disk
rbd1 251:16 0 8G 0 disk
vda 252:0 0 300G 0 disk
|-vda1 252:1 0 1G 0 part
`-vda2 252:2 0 299G 0 part
|-centos_172--20--50--245-root 253:0 0 295G 0 lvm /dev/termination-log
`-centos_172--20--50--245-swap 253:1 0 4G 0 lvm
vdb 252:16 0 100G 0 disk
vdc 252:32 0 500G 0 disk
`-containerd-testpool 253:4 0 500G 0 lvm /etc/resolv.conf
vdd 252:48 0 1T 0 disk
`-hadoop-testpool 253:3 0 1024G 0 lvm
vde 252:64 0 300G 0 disk
vdf 252:80 0 300G 0 disk

使用 cephfs (上一章有创建)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
mkdir /tmp/registry

# Detect the mon endpoints and the user secret for the connection
[root@test-62 /]# mon_endpoints=$(grep mon_host /etc/ceph/ceph.conf | awk '{print $3}')
[root@test-62 /]# my_secret=$(grep key /etc/ceph/keyring | awk '{print $3}')

# Mount the filesystem
[root@test-62 /]# mount -t ceph -o mds_namespace=myfs,name=admin,secret=$my_secret $mon_endpoints:/ /tmp/registry
# mount -t ceph -o mds_namespace=myfs,name=admin,secret=AQAfwsBkNa7vEBAAeyKKOpqGxGKBJ2ar7/HBZg== 172.20.19.62:6789,172.20.19.61:6789,172.20.19.63:6789:/ /tmp/registry

[root@test-62 /]# df -h /tmp/registry
Filesystem Size Used Avail Use% Mounted on
overlay 500G 47G 453G 10% /

[root@test-62 /]# umount /tmp/registry

其他使用参考 ceph 官网