k8s云原生rook-ceph pvc快照与恢复(下)

发布于:2025-08-02 ⋅ 阅读:(19) ⋅ 点赞:(0)

#作者:Unstopabler

PVC快照创建与恢复

1.pod使用pvc资源

这里先创建一个pvc示例,pvc资源内容如下

[root@k8s-master rbd]# cat pvc.yaml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: rbd-pvc
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
  storageClassName: rook-ceph-block
[root@k8s-master rbd]# kubectl apply -f pvc.yaml

创建pod资源,并使用pvc创建 rbd-pvc名称如下

[root@k8s-master rbd]# cat pod.yaml
---
apiVersion: v1
kind: Pod
metadata:
  name: csirbd-demo-pod
spec:
  containers:
    - name: web-server
      image: nginx:latest
      volumeMounts:
        - name: mypvc
          mountPath: /var/lib/www/html
  volumes:
    - name: mypvc
      persistentVolumeClaim:
        claimName: rbd-pvc
        readOnly: false
[root@k8s-master rbd]#
[root@k8s-master rbd]# kubectl apply -f pod.yaml

查看创建pod资源

[root@k8s-master rbd]# kubectl get pod
NAME                     READY   STATUS    RESTARTS      AGE
csirbd-demo-pod          1/1     Running   1 (16h ago)   17h
[root@k8s-master rbd]#

查看pvc运行状态

[root@k8s-master rbd]# kubectl get pvc
NAME      STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS      AGE
rbd-pvc   Bound    pvc-c3f84e95-1b63-44f7-bbaa-02ac4e05c064   1Gi        RWO            rook-ceph-block   17h
[root@k8s-master rbd]#

2.pod里面创建数据

[root@k8s-master rbd]# kubectl exec -it csirbd-demo-pod -- bash
root@csirbd-demo-pod:/# df -h
Filesystem      Size  Used Avail Use% Mounted on
overlay          17G  5.0G   13G  30% /
tmpfs            64M     0   64M   0% /dev
tmpfs           2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/sda2        17G  5.0G   13G  30% /etc/hosts
shm              64M     0   64M   0% /dev/shm
/dev/rbd0       974M   28K  958M   1% /var/lib/www/html
tmpfs           3.8G   12K  3.8G   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs           2.0G     0  2.0G   0% /proc/acpi
tmpfs           2.0G     0  2.0G   0% /proc/scsi
tmpfs           2.0G     0  2.0G   0% /sys/firmware
root@csirbd-demo-pod:/# echo "hello rbd" > /var/lib/www/html/index1.html
root@csirbd-demo-pod:/# cat /var/lib/www/html/index1.html
hello rbd
root@csirbd-demo-pod:/#

3.创建rbd-pvc快照

[root@node1 rbd]# cat snapshot.yaml
---
# 1.17 <= K8s <= v1.19
# apiVersion: snapshot.storage.k8s.io/v1beta1
# K8s >= v1.20
apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshot
metadata:
  name: rbd-pvc-snapshot
spec:
  volumeSnapshotClassName: csi-rbdplugin-snapclass
  source:
    persistentVolumeClaimName: rbd-pvc
[root@node1 rbd]#

[root@k8s-master rbd]# kubectl apply -f snapshot.yaml
volumesnapshot.snapshot.storage.k8s.io/rbd-pvc-snapshot created
[root@k8s-master rbd]#

查看卷rbd-pvc快照是否创建成功

[root@k8s-master rbd]# kubectl get volumesnapshot
NAME               READYTOUSE   SOURCEPVC   SOURCESNAPSHOTCONTENT   RESTORESIZE   SNAPSHOTCLASS             SNAPSHOTCONTENT                                    CREATIONTIME   AGE
rbd-pvc-snapshot   true         rbd-pvc                             1Gi           csi-rbdplugin-snapclass   snapcontent-e6ba28ed-990f-4cdd-9bfa-a7d6ea19e655   29s            27s
[root@k8s-master rbd]#
[root@k8s-master rbd]# kubectl get volumesnapshotcontents.snapshot.storage.k8s.io
NAME                                               READYTOUSE   RESTORESIZE   DELETIONPOLICY   DRIVER                       VOLUMESNAPSHOTCLASS       VOLUMESNAPSHOT     VOLUMESNAPSHOTNAMESPACE   AGE
snapcontent-e6ba28ed-990f-4cdd-9bfa-a7d6ea19e655   true         1073741824    Delete           rook-ceph.rbd.csi.ceph.com   csi-rbdplugin-snapclass   rbd-pvc-snapshot   default                   91s
[root@k8s-master rbd]#

4.pod容器误删数据

[root@k8s-master rbd]# kubectl exec -it csirbd-demo-pod -- bash
root@csirbd-demo-pod:/# ls /var/lib/www/html/
index.html  index1.html  lost+found
root@csirbd-demo-pod:/# rm -fr /var/lib/www/html/index*
root@csirbd-demo-pod:/# ls /var/lib/www/html/
lost+found
root@csirbd-demo-pod:/#

5.pvc快照恢复

当容器中rbd块数据丢失或者误删除之后,快照功能就非常重要了,可以基于快照做数据恢复,通过pvc调用volumesnapshot

[root@node1 rbd]# cat pvc-restore.yaml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: rbd-pvc-restore
spec:
  storageClassName: rook-ceph-block
  dataSource:
    name: rbd-pvc-snapshot
    kind: VolumeSnapshot
    apiGroup: snapshot.storage.k8s.io
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
[root@node1 rbd]#
[root@k8s-master rbd]# kubectl apply -f pvc-restore.yaml
persistentvolumeclaim/rbd-pvc-restore created
[root@k8s-master rbd]#


[root@k8s-master rbd]# kubectl get pvc
NAME              STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS      AGE
rbd-pvc           Bound    pvc-c3f84e95-1b63-44f7-bbaa-02ac4e05c064   1Gi        RWO            rook-ceph-block   17h
rbd-pvc-restore   Bound    pvc-bf601bee-b8f8-437c-9b56-bf7fc7f32bb4   1Gi        RWO            rook-ceph-block   8s
[root@k8s-master rbd]#

6.pod挂载pvc验证数据

pod容器中调用pvc获取到快照中的数据

[root@k8s-master rbd]# cat pod-pvc-restore.yaml
---
apiVersion: v1
kind: Pod
metadata:
  name: csirbd-restore-pod
spec:
  containers:
    - name: web-server
      image: nginx:latest
      volumeMounts:
        - name: mypvc
          mountPath: /var/lib/www/html
  volumes:
    - name: mypvc
      persistentVolumeClaim:
        claimName: rbd-pvc-restore
        readOnly: false
[root@k8s-master rbd]# kubectl apply -f pod-pvc-restore.yaml
pod/csirbd-restore-pod created
[root@k8s-master rbd]# kubectl get pod -owide | grep csirbd-restore-pod
csirbd-restore-pod       1/1     Running   0             66s   10.244.2.81   k8s-node2   <none>           <none>
[root@k8s-master rbd]#

进入容器验证查看数据

[root@k8s-master rbd]# kubectl exec -it csirbd-restore-pod -- df -h
Filesystem      Size  Used Avail Use% Mounted on
overlay          17G  4.9G   13G  29% /
tmpfs            64M     0   64M   0% /dev
tmpfs           2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/sdb2        17G  4.9G   13G  29% /etc/hosts
shm              64M     0   64M   0% /dev/shm
/dev/rbd0       974M   32K  958M   1% /var/lib/www/html
tmpfs           3.8G   12K  3.8G   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs           2.0G     0  2.0G   0% /proc/acpi
tmpfs           2.0G     0  2.0G   0% /proc/scsi
tmpfs           2.0G     0  2.0G   0% /sys/firmware
[root@k8s-master rbd]# kubectl exec -it csirbd-restore-pod -- ls /var/lib/www/html
index.html  index1.html  lost+found
[root@k8s-master rbd]#



网站公告

今日签到

点亮在社区的每一天
去签到