前置信息可参考 前文 PV-PVC(statefulset示例)
主要使用方法
pvc –> stroageclassname –> pv
PV 会完成和 Ceph 的对接,自动创建 RBD 块存储空间,期间由 plugin 驱动完成创建
kubernetes 需要配置文件和认证文件来访问 ceph 集群,rook-ceph 会自动帮助创建,直接使用即可
1. 手动生成 pvc,然后 pod 绑定 PVC(不建议)
手动生成 pv1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22apiVersion: v1
kind: PersistentVolume
metadata:
name: ceph-pv
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
rbd:
monitors:
- 10.68.86.239:6789
- 10.68.41.247:6789
- 10.68.19.37:6789
pool: replicapool
image: ceph-image
user: admin
secretRef:
name: ceph-secret
fsType: ext4
readOnly: false
persistentVolumeReclaimPolicy: Recycle
生成 pvc 动态绑定 pv
pvc-demo.yaml1
2
3
4
5
6
7
8
9
10
11
12
13apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-demo
labels:
app: pvc-demo
spec:
storageClassName: rook-ceph-block
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1G # 指定 storageClassName
pod-with-pvc-demo1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20apiVersion: v1
kind: Pod
metadata:
name: pod-demo
spec:
containers:
- name: demo
image: nginx:latest
imagePullPolicy: IfNotPresent
ports:
- name: www
protocol: TCP
containerPort: 80
volumeMounts:
- name: rbd
mountPath: /usr/share/nginx/html
volumes:
- name: rbd
persistentVolumeClaim:
claimName: pvc-demo
验证1
2
3
4
5
6
7
8
9
10
11
12[root@imwl-01 ~]# kubectl get pv |grep pvc-demo
pvc-ed593fbd-2475-43fd-a946-2e2d5fb0c582 1Gi RWO Delete Bound default/pvc-demo rook-ceph-block 61s
[root@imwl-01 ~]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc-demo Bound pvc-ed593fbd-2475-43fd-a946-2e2d5fb0c582 1Gi RWO rook-ceph-block 64s
[root@imwl-01 ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
pod-demo 1/1 Running 0 21m
[root@imwl-01 ~]# kubectl exec -it pod-demo -- ls -l /usr/share/nginx/html # 有挂载上
total 16
drwx------ 2 root root 16384 Mar 27 08:42 lost+found
2. 动态创建
sts-with-pvc.yaml1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
app: nginx
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: nginx
---
apiVersion: apps/v1
kind: StatefulSet # 使用 StatefulSet 必须先建立一个 无头服务
metadata:
name: web
spec:
selector:
matchLabels:
app: nginx
serviceName: "nginx"
replicas: 3
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www # 与下面对应
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: rook-ceph-block
验证1
2
3
4
5
6
7
8
9
10
11
12[root@imwl-01 ceph]# kubectl get pv |grep www
pvc-59ff8d81-1426-49cf-9f79-d267a22f16cf 1Gi RWO Delete Bound default/www-web-1 rook-ceph-block 61s
pvc-c1afd98d-1df3-4c84-9798-2f193ca07fbc 1Gi RWO Delete Bound default/www-web-0 rook-ceph-block 72s
pvc-edbc09cb-3106-4e10-8d1f-33f0768fb6a8 1Gi RWO Delete Bound default/www-web-2 rook-ceph-block 29s
[root@imwl-01 ceph]# kubectl get pvc |grep web
www-web-0 Bound pvc-c1afd98d-1df3-4c84-9798-2f193ca07fbc 1Gi RWO rook-ceph-block 82s
www-web-1 Bound pvc-59ff8d81-1426-49cf-9f79-d267a22f16cf 1Gi RWO rook-ceph-block 70s
www-web-2 Bound pvc-edbc09cb-3106-4e10-8d1f-33f0768fb6a8 1Gi RWO rook-ceph-block 38s
[root@imwl-01 ceph]# kubectl get pod |grep web
web-0 1/1 Running 0 92s
web-1 1/1 Running 0 80s
web-2 1/1 Running 0 48s
块存储 使用方法
上文就是 块存储的使用方法
共享文件存储使用方法
共享文件存储可以挂载到多台机器,或者 pod
部署 filesystem.yaml
1 | apiVersion: ceph.rook.io/v1 |
挂载到 linux 目录 /mnt/ceph
在各个主机上执行,然后可以看到 /mnt/ceph 内文件是共享的1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16mkdir /mnt/ceph
mon_endpoints=$(kubectl exec -it $(kubectl get pod -n rook-ceph -l app=rook-ceph-tools -o=jsonpath='{.items[0].metadata.name}') -n rook-ceph -- grep mon_host /etc/ceph/ceph.conf | awk '{print $3}'|sed -e 's/\r//g')
my_secret=$(kubectl exec -it $(kubectl get pod -n rook-ceph -l app=rook-ceph-tools -o=jsonpath='{.items[0].metadata.name}') -n rook-ceph -- grep key /etc/ceph/keyring | awk '{print $3}')
mount -t ceph ${mon_endpoints}:/ /mnt/ceph -o name=admin,secret=$my_secret
## 假设有子目录 direct_mount
## mount -t ceph ${mon_endpoints}:/direct_mount /mnt/ceph -o name=admin,secret=$my_secret
## 客户端挂载,写配置文件到 /etc/ceph
## yum install -y ceph-fuse
## ceph-fuse -m 192.168.2.133:6789,192.168.2.132:6789,192.168.2.131:6789 -r /direct_mount /mnt/vm1new
# 卸载
umount -lf /mnt/ceph
部署 storageClass.yaml 使用 上面的文件
1 | apiVersion: storage.k8s.io/v1 |
验证
1 | [root@imwl-175 ~]# kubectl -n rook-ceph get pod -l app=rook-ceph-mds |
使用 共享文件 存储
fs-deploy.yaml1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: cephfs-pvc
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
storageClassName: rook-cephfs
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp-deployment
spec:
selector:
matchLabels:
app: myapp
replicas: 3
template:
metadata:
labels:
app: myapp
spec:
containers:
- name: myapp
image: imwl/myapp:v1
ports:
- containerPort: 80
resources:
requests:
memory: "64Mi"
cpu: "250m"
limits:
memory: "128Mi"
cpu: "500m"
volumeMounts:
- name: cephfs-pvc
mountPath: /tmp
volumes:
- name: cephfs-pvc
persistentVolumeClaim:
claimName: cephfs-pvc
readOnly: false
应用后1
2
3
4
5
6
7
8
9
10
11
12
13[root@imwl-01 ceph]# kubectl get pod
NAME READY STATUS RESTARTS AGE
myapp-deployment-79c9bf589c-hgkrk 1/1 Running 0 21m
myapp-deployment-79c9bf589c-hm856 1/1 Running 0 21m
myapp-deployment-79c9bf589c-lbtck 1/1 Running 0 21m
[root@imwl-01 ceph]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
cephfs-pvc Bound pvc-929a3293-4bc5-46f6-be7e-782fbf822149 1Gi RWX rook-cephfs 22m
[root@imwl-01 ceph]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-929a3293-4bc5-46f6-be7e-782fbf822149 1Gi RWX Delete Bound default/cephfs-pvc rook-cephfs 22m
一个 pod 里写文件,另一个 pod 是相同的文件
1 | [root@imwl-01 ceph]# kubectl exec -it myapp-deployment-79c9bf589c-hgkrk -- sh |
对象存储
搭建
1 | apiVersion: ceph.rook.io/v1 |
暴露到集群外1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22apiVersion: v1
kind: Service
metadata:
name: rook-ceph-rgw-my-store-external
namespace: rook-ceph # namespace:cluster
labels:
app: rook-ceph-rgw
rook_cluster: rook-ceph # namespace:cluster
rook_object_store: my-store
spec:
ports:
- name: rgw
port: 80 # service port mentioned in object store crd
protocol: TCP
targetPort: 80 # 与 gateway 上对应
nodePort: 31088
selector:
app: rook-ceph-rgw
rook_cluster: rook-ceph # namespace:cluster
rook_object_store: my-store
sessionAffinity: None
type: NodePort
创建存储消费桶
1 | apiVersion: storage.k8s.io/v1 |
通过 ObjectBucketClaim 向 StorageClass 申请存储桶,创建了一个名为 ceph-bkt 的 bucket
1 | apiVersion: objectbucket.io/v1alpha1 |
查看1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22[root@imwl-01 ceph]# kubectl -n rook-ceph get pod -l app=rook-ceph-rgw
NAME READY STATUS RESTARTS AGE
rook-ceph-rgw-my-store-a-74d58648b8-5r9r2 2/2 Running 0 2m27s
[root@imwl-01 ceph]# kubectl -n rook-ceph get svc -l app=rook-ceph-rgw
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
rook-ceph-rgw-my-store ClusterIP 10.68.25.246 <none> 80/TCP 14m
rook-ceph-rgw-my-store-external NodePort 10.68.15.54 <none> 80:31088/TCP 4m20s
[root@imwl-01 ceph]# kubectl get storageclasses.storage.k8s.io
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
rook-ceph-block rook-ceph.rbd.csi.ceph.com Delete Immediate true 26h
rook-ceph-block-retain rook-ceph.rbd.csi.ceph.com Retain Immediate true 26h
rook-ceph-delete-bucket rook-ceph.ceph.rook.io/bucket Delete Immediate false 105s
rook-cephfs rook-ceph.cephfs.csi.ceph.com Delete Immediate true 26h
[root@imwl-01 ceph]# kubectl get objectbucketclaims.objectbucket.io
NAME AGE
ceph-delete-bucket 108s
[root@imwl-01 ceph]# radosgw-admin bucket list
[
"ceph-bkt-1afbdd70-2a65-4aad-9e7a-8c7b3bd5acb7"
]
配置 s3cmd
获取配置信息1
2
3
4
5
6
7[root@imwl-01 ~]# kubectl get configmaps ceph-delete-bucket -o yaml -o jsonpath='{.data.BUCKET_HOST}'
rook-ceph-rgw-my-skubectl get secrets ceph-delete-bucket -o yaml -o jsonpath='{.data.AWS_ACCESS_KEY_ID}' | base64 -ddata.AWS_ACCESS_KEY_ID}' | base64 -d
GX61WLYLE0Y1UCKECUQR[root@imwl-01 ~]#
[root@imwl-01 ~]# kubectl get secrets ceph-delete-bucket -o yamel -o jsonpath='{.data.AWS_SECRET_ACCESS_KEY}' | base64 -d
aTJuDL2ZtrHfoQrMoi
[root@imwl-01 ~]# kubectl get configmaps ceph-delete-bucket -o yaml -o jsonpath='{.data.BUCKET_HOST}'o yaml -o jsonpath='{.data.BUCKET_HOST}'
rook-ceph-rgw-my-store.rook-ceph.svc[root@imwl-01 ~]#
集群外更换 rook-ceph-rgw-my-store.rook-ceph.svc:80 为 ip:310881
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52[root@rook-ceph-tools-8558bfc844-9rmjm /]# s3cmd --configure
Enter new values or accept defaults in brackets with Enter.
Refer to user manual for detailed description of all options.
Access key and Secret key are your identifiers for Amazon S3. Leave them empty for using the env variables.
Access Key: GX61WLYLE0Y1UCKECUQR
Secret Key: aTJuDL2ZtrHfoQrMoidIv6Q8FT7LoA8ZBA4SbIOA
Default Region [US]:
Use "s3.amazonaws.com" for S3 Endpoint and not modify it to the target Amazon S3.
S3 Endpoint [s3.amazonaws.com]: rook-ceph-rgw-my-store.rook-ceph.svc:80
Use "%(bucket)s.s3.amazonaws.com" to the target Amazon S3. "%(bucket)s" and "%(location)s" vars can be used
if the target S3 system supports dns based buckets.
DNS-style bucket+hostname:port template for accessing a bucket [%(bucket)s.s3.amazonaws.com]: rook-ceph-rgw-my-store.rook-ceph.svc:80/%(bucket)
Encryption password is used to protect your files from reading
by unauthorized persons while in transfer to S3
Encryption password:
Path to GPG program [/usr/bin/gpg]:
When using secure HTTPS protocol all communication with Amazon S3
servers is protected from 3rd party eavesdropping. This method is
slower than plain HTTP, and can only be proxied with Python 2.7 or newer
Use HTTPS protocol [Yes]: no
On some networks all internet access must go through a HTTP proxy.
Try setting it here if you can't connect to S3 directly
HTTP Proxy server name:
New settings:
Access Key: GX61WLYLE0Y1UCKECUQR
Secret Key: aTJuDL2ZtrHfoQrMoidIv6Q8FT7LoA8ZBA4SbIOA
Default Region: US
S3 Endpoint: rook-ceph-rgw-my-store.rook-ceph.svc:80
DNS-style bucket+hostname:port template for accessing a bucket: rook-ceph-rgw-my-store.rook-ceph.svc:80/%(bucket)
Encryption password:
Path to GPG program: /usr/bin/gpg
Use HTTPS protocol: False
HTTP Proxy server name:
HTTP Proxy server port: 0
Test access with supplied credentials? [Y/n] y
Please wait, attempting to list all buckets...
Success. Your access key and secret key worked fine :-)
Now verifying that encryption works...
Not configured. Never mind.
Save settings? [y/N] y
Configuration saved to '/root/.s3cfg'
使用1
2
3
4
5
6
7
8
9
10
11
12
13[root@rook-ceph-tools-8558bfc844-9rmjm /]# s3cmd ls
2023-03-27 09:59 s3://ceph-bkt-1afbdd70-2a65-4aad-9e7a-8c7b3bd5acb7
[root@rook-ceph-tools-8558bfc844-9rmjm /]# s3cmd put /etc/passwd* s3://ceph-bkt-1afbdd70-2a65-4aad-9e7a-8c7b3bd5acb7
upload: '/etc/passwd' -> 's3://ceph-bkt-1afbdd70-2a65-4aad-9e7a-8c7b3bd5acb7/passwd' [1 of 2]
1295 of 1295 100% in 0s 91.34 KB/s done
upload: '/etc/passwd-' -> 's3://ceph-bkt-1afbdd70-2a65-4aad-9e7a-8c7b3bd5acb7/passwd-' [2 of 2]
1231 of 1231 100% in 0s 23.42 KB/s done
[root@rook-ceph-tools-8558bfc844-9rmjm /]# s3cmd get s3://ceph-bkt-1afbdd70-2a65-4aad-9e7a-8c7b3bd5acb7/passwd ./
download: 's3://ceph-bkt-1afbdd70-2a65-4aad-9e7a-8c7b3bd5acb7/passwd' -> './passwd' [1 of 1]
[root@rook-ceph-tools-8558bfc844-9rmjm /]# s3cmd rm s3://ceph-bkt-1afbdd70-2a65-4aad-9e7a-8c7b3bd5acb7/passwd
delete: 's3://ceph-bkt-1afbdd70-2a65-4aad-9e7a-8c7b3bd5acb7/passwd'
默认用户权限很低1
2[root@rook-ceph-tools-8558bfc844-9rmjm /]# s3cmd mb s3://test
ERROR: S3 error: 400 (TooManyBuckets)
创建用户
1 | apiVersion: ceph.rook.io/v1 |
获取信息并修改 ~/.s3cfg1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29[root@imwl-01 ~]# kubectl -n rook-ceph get secrets rook-ceph-object-user-my-store-my-user
NAME TYPE DATA AGE
rook-ceph-object-user-my-store-my-user kubernetes.io/rook 3 42s
[root@imwl-01 ~]# kubectl -n rook-ceph get secrets rook-ceph-object-user-my-store-my-user -o yaml
apiVersion: v1
data:
AccessKey: MVlJSzBERkhZTURLWU85RTFSS1c=
Endpoint: aHR0cDovL3Jvb2stY2VwaC1yZ3ctbXktc3RvcmUucm9vay1jZXBoLnN2Yzo4MA==
SecretKey: b2NHV2N3WHlEMEdlRE4zSUFsWmFtSVl2dDJlVFFRNmxXa0ZVdzJjbg==
kind: Secret
metadata:
creationTimestamp: "2023-03-27T10:43:23Z"
labels:
app: rook-ceph-rgw
rook_cluster: rook-ceph
rook_object_store: my-store
user: my-user
name: rook-ceph-object-user-my-store-my-user
namespace: rook-ceph
ownerReferences:
- apiVersion: ceph.rook.io/v1
blockOwnerDeletion: true
controller: true
kind: CephObjectStoreUser
name: my-user
uid: d940de90-3ec2-4006-a565-cb38751b8a88
resourceVersion: "247646"
uid: 7523f966-1e4a-4b88-9ea4-74a157e2d0c8
type: kubernetes.io/rook
验证1
2
3
4
5[root@rook-ceph-tools-8558bfc844-9rmjm /]# s3cmd mb s3://test
Bucket 's3://test/' created
[root@rook-ceph-tools-8558bfc844-9rmjm /]# s3cmd ls
2023-03-28 02:57 s3://test
快照
官网 https://github.com/kubernetes-csi/external-snapshotter
安装 crds 资源和控制器,其中控制器改镜像 imwl/snapshot-controller:v6.2.1
1 | # kubectl apply -k client/config/crd/ |
查看 api 版本1
2# kubectl api-versions
snapshot.storage.k8s.io/v1
查看创建的 pod1
2
3
4[root@imwl-01 external-snapshotter-master]# kubectl get pods -n kube-system -l app=snapshot-controller
NAME READY STATUS RESTARTS AGE
snapshot-controller-5899869978-4xwxq 1/1 Running 0 51m
snapshot-controller-5899869978-95nth 1/1 Running 0 52m
创建rdb快照类
创建一个volumesnapshotclass存储类1
2
3
4
5
6
7
8
9
10
11
12
13
14
15apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshotClass
metadata:
name: csi-rbdplugin-snapclass
driver: rook-ceph.rbd.csi.ceph.com # driver:namespace:operator
parameters:
# Specify a string that identifies your cluster. Ceph CSI supports any
# unique string. When Ceph CSI is deployed by Rook use the Rook namespace,
# for example "rook-ceph".
clusterID: rook-ceph # namespace:cluster
csi.storage.k8s.io/snapshotter-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/snapshotter-secret-namespace: rook-ceph #
deletionPolicy: Delete
## volumesnapshotclass.snapshot.storage.k8s.io/csi-rbdplugin-snapclass created
使用 Volumesnapshot
namespace: postgres-operator ,pvc test-pgha1-p9qd-pgdata 一个 pg 数据库的备份1
2
3
4
5
6
7
8
9
10
11
12---
apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshot
metadata:
name: rbd-pvc-snapshot
namespace: postgres-operator
spec:
volumeSnapshotClassName: csi-rbdplugin-snapclass
source:
persistentVolumeClaimName: test-pgha1-p9qd-pgdata
# volumesnapshot.snapshot.storage.k8s.io/rbd-pvc-snapshot created
当 为 true 则成功,false 需要定位。 之前遇到的问题是 namespace 不对1
2
3[root@imwl-01 ~]# kubectl get volumesnapshots.snapshot.storage.k8s.io -A
NAMESPACE NAME READYTOUSE SOURCEPVC SOURCESNAPSHOTCONTENT RESTORESIZE SNAPSHOTCLASS SNAPSHOTCONTENT CREATIONTIME AGE
postgres-operator rbd-pvc-snapshot true test-pgha1-p9qd-pgdata 20Gi csi-rbdplugin-snapclass snapcontent-5408b1d0-8c80-4196-b655-51d29e7d695a 7m52s 7m52s
创建 pvc 并 使用
1 | --- |
也可以直接克隆1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: postgres-operator
name: rbd-pvc-clone
spec:
storageClassName: rook-ceph-block
dataSource:
name: test-pgha1-p9qd-pgdata
kind: PersistentVolumeClaim ##dataSource定义了创建克隆PVC的来源
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi # 克隆的大小需要等于或大于源PVC
---
apiVersion: v1
kind: Pod
metadata:
name: csirbd-demo-pod2
namespace: postgres-operator
spec:
containers:
- name: web-server
image: nginx
volumeMounts:
- name: mypvc
mountPath: /mnt
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: rbd-pvc-clone
readOnly: false
创建完后,验证
有数据库的数据1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24[root@imwl-01 ~]# kubectl get pvc -n postgres-operator
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
test-pgha1-99kr-pgdata Bound pvc-413310df-f1b0-4c28-9c1d-3ecacad20dbe 20Gi RWO rook-ceph-block-retain 32m
test-pgha1-p9qd-pgdata Bound pvc-a1d3521a-ded6-4e2d-97b1-d4e34f94fae6 20Gi RWO rook-ceph-block-retain 32m
test-repo1 Bound pvc-5670d2fd-ed04-448a-9611-c418098d132a 40Gi RWO rook-ceph-block-retain 32m
rbd-pvc-restore Bound pvc-5c0af5a1-37d3-468d-bd45-331b7aa3695f 20Gi RWO rook-ceph-block 22m
rbd-pvc-clone Bound pvc-d24d021c-a841-49a5-a4d4-f2ae67e6913e 20Gi RWO rook-ceph-block 20s
[root@imwl-01 ~]# kubectl exec -it -n postgres-operator csirbd-demo-pod -- bash # 快照
root@csirbd-demo-pod:/# ls -l /mnt/
total 28
drwxrws--- 2 root tape 16384 Mar 29 10:23 lost+found
drwx--S--- 19 26 tape 4096 Mar 29 10:23 pg13
drwx------ 3 26 tape 4096 Mar 29 10:24 pg13_wal
drwxr-sr-x 3 26 tape 4096 Mar 29 10:23 pgbackrest
[root@imwl-01 ~]# kubectl exec -it -n postgres-operator csirbd-demo-pod2 -- bash # 克隆
root@csirbd-demo-pod2:/# ls -l mnt/
total 28
drwxrws--- 2 root tape 16384 Mar 29 10:23 lost+found
drwx--S--- 19 26 tape 4096 Mar 29 16:00 pg13
drwx------ 3 26 tape 4096 Mar 29 17:05 pg13_wal
drwxr-sr-x 3 26 tape 4096 Mar 29 10:23 pgbackrest
也不推荐使用创建cephfs快照类
创建一个volumesnapshotclass存储类1
2
3
4
5
6
7
8
9
10
11---
apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshotClass
metadata:
name: csi-cephfsplugin-snapclass
driver: rook-ceph.cephfs.csi.ceph.com # driver:namespace:operator
parameters:
clusterID: rook-ceph # namespace:cluster
csi.storage.k8s.io/snapshotter-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/snapshotter-secret-namespace: rook-ceph
deletionPolicy: Delete
通过VolumeSnapshot资源类型向VolumeSnapshotClass创建快照
1 | --- |
应用后1
2
3[root@imwl-01 ~]# kubectl get volumesnapshot
NAME READYTOUSE SOURCEPVC SOURCESNAPSHOTCONTENT RESTORESIZE SNAPSHOTCLASS SNAPSHOTCONTENT CREATIONTIME AGE
cephfs-pvc-snapshot true cephfs-pvc 1Gi csi-cephfsplugin-snapclass snapcontent-dc1adc4a-aabd-4efc-9918-200b2d01dc25 55s 55s
使用
1 | --- |
验证1
2
3
4
5
6
7
8
9
10
11
12
13
14[root@imwl-01 ~]# kubectl exec -it csifs-clone-pod -- sh # 内容一致
# ls /mnt
1.txt
# cat /mnt/1.txt
1
[root@imwl-01 ~]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
cephfs-pvc Bound pvc-929a3293-4bc5-46f6-be7e-782fbf822149 1Gi RWX rook-cephfs 67m
cephfs-pvc-clone Bound pvc-123d95f8-414d-4cec-ae77-1671ba897917 1Gi RWX rook-cephfs 2m53s
[root@imwl-01 ~]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-123d95f8-414d-4cec-ae77-1671ba897917 1Gi RWX Delete Bound default/cephfs-pvc-clone rook-cephfs 2m55s
pvc-929a3293-4bc5-46f6-be7e-782fbf822149 1Gi RWX Delete Bound default/cephfs-pvc rook-cephfs 67m