From 32f0de090381541ce16e326d89f5bb4cb0969d88 Mon Sep 17 00:00:00 2001 From: Bala Harish Date: Thu, 29 Jan 2026 13:52:11 +0530 Subject: [PATCH] ci/docs: Create New Document for Cloudnative PG Backups Signed-off-by: Bala Harish --- .../version-4.4.x.json | 106 +++++ .../cloudnative-pg-backups.md | 414 ++++++++++++++++++ .../backup-and-restore/kubevirt-backup.md | 135 +++++- docs/sidebars.js | 6 + 4 files changed, 655 insertions(+), 6 deletions(-) create mode 100644 docs/i18n/en/docusaurus-plugin-content-docs/version-4.4.x.json create mode 100644 docs/main/Solutioning/backup-and-restore/cloudnative-pg-backups.md diff --git a/docs/i18n/en/docusaurus-plugin-content-docs/version-4.4.x.json b/docs/i18n/en/docusaurus-plugin-content-docs/version-4.4.x.json new file mode 100644 index 000000000..b5756ecf3 --- /dev/null +++ b/docs/i18n/en/docusaurus-plugin-content-docs/version-4.4.x.json @@ -0,0 +1,106 @@ +{ + "version.label": { + "message": "4.4.x", + "description": "The label for version 4.4.x" + }, + "sidebar.docs.category.OpenEBS Documentation": { + "message": "OpenEBS Documentation", + "description": "The label for category OpenEBS Documentation in sidebar docs" + }, + "sidebar.docs.category.Concepts": { + "message": "Concepts", + "description": "The label for category Concepts in sidebar docs" + }, + "sidebar.docs.category.Data Engines": { + "message": "Data Engines", + "description": "The label for category Data Engines in sidebar docs" + }, + "sidebar.docs.category.Quickstart Guide": { + "message": "Quickstart Guide", + "description": "The label for category Quickstart Guide in sidebar docs" + }, + "sidebar.docs.category.User Guides": { + "message": "User Guides", + "description": "The label for category User Guides in sidebar docs" + }, + "sidebar.docs.category.Local Storage": { + "message": "Local Storage", + "description": "The label for category Local Storage in sidebar docs" + }, + "sidebar.docs.category.Local PV Hostpath": { + "message": "Local PV Hostpath", + "description": "The label for category Local PV Hostpath in sidebar docs" + }, + "sidebar.docs.category.Configuration": { + "message": "Configuration", + "description": "The label for category Configuration in sidebar docs" + }, + "sidebar.docs.category.Advanced Operations": { + "message": "Advanced Operations", + "description": "The label for category Advanced Operations in sidebar docs" + }, + "sidebar.docs.category.XFS Quota": { + "message": "XFS Quota", + "description": "The label for category XFS Quota in sidebar docs" + }, + "sidebar.docs.category.Local PV LVM": { + "message": "Local PV LVM", + "description": "The label for category Local PV LVM in sidebar docs" + }, + "sidebar.docs.category.Local PV ZFS": { + "message": "Local PV ZFS", + "description": "The label for category Local PV ZFS in sidebar docs" + }, + "sidebar.docs.category.Additional Information": { + "message": "Additional Information", + "description": "The label for category Additional Information in sidebar docs" + }, + "sidebar.docs.category.Replicated Storage": { + "message": "Replicated Storage", + "description": "The label for category Replicated Storage in sidebar docs" + }, + "sidebar.docs.category.Replicated PV Mayastor": { + "message": "Replicated PV Mayastor", + "description": "The label for category Replicated PV Mayastor in sidebar docs" + }, + "sidebar.docs.category.Data Migration": { + "message": "Data Migration", + "description": "The label for category Data Migration in sidebar docs" + }, + "sidebar.docs.category.Migration using Velero": { + "message": "Migration using Velero", + "description": "The label for category Migration using Velero in sidebar docs" + }, + "sidebar.docs.category.Migration for Distributed DB": { + "message": "Migration for Distributed DB", + "description": "The label for category Migration for Distributed DB in sidebar docs" + }, + "sidebar.docs.category.Migration for Replicated DB": { + "message": "Migration for Replicated DB", + "description": "The label for category Migration for Replicated DB in sidebar docs" + }, + "sidebar.docs.category.Solutioning": { + "message": "Solutioning", + "description": "The label for category Solutioning in sidebar docs" + }, + "sidebar.docs.category.OpenEBS on K8s Platforms": { + "message": "OpenEBS on K8s Platforms", + "description": "The label for category OpenEBS on K8s Platforms in sidebar docs" + }, + "sidebar.docs.category.Read-Write-Many": { + "message": "Read-Write-Many", + "description": "The label for category Read-Write-Many in sidebar docs" + }, + "sidebar.docs.category.Backup and Restore": { + "message": "Backup and Restore", + "description": "The label for category Backup and Restore in sidebar docs" + }, + "sidebar.docs.category.Troubleshooting": { + "message": "Troubleshooting", + "description": "The label for category Troubleshooting in sidebar docs" + }, + "sidebar.docs.category.Support": { + "message": "Support", + "description": "The label for category Support in sidebar docs" + } +} \ No newline at end of file diff --git a/docs/main/Solutioning/backup-and-restore/cloudnative-pg-backups.md b/docs/main/Solutioning/backup-and-restore/cloudnative-pg-backups.md new file mode 100644 index 000000000..a55a4a107 --- /dev/null +++ b/docs/main/Solutioning/backup-and-restore/cloudnative-pg-backups.md @@ -0,0 +1,414 @@ +--- +id: cloudnative-backup +title: OpenEBS VolumeSnapshots for CloudNativePG Backups +keywords: + - OpenEBS VolumeSnapshots for CloudNativePG Backups + - VolumeSnapshots for CloudNativePG Backups + - CloudNativePG Backups +description: In this document, you learn about how to use OpenEBS VolumeSnapshots to back up and restore PostgreSQL databases. +--- + +## Overview + +As PostgreSQL deployments scale in Kubernetes environments, traditional logical backups such as `pg_dump` can become increasingly time-consuming and resource-intensive. These approaches often struggle to meet recovery time objectives (RTOs) for large or performance-sensitive databases. + +OpenEBS VolumeSnapshots offer a more efficient alternative by capturing the state of persistent volumes at a specific point in time. This snapshot-based approach enables near-instant backups with minimal performance impact, making it well suited for cloud-native database workloads that require fast and reliable recovery. + +By integrating OpenEBS VolumeSnapshots with CloudNativePG (CNPG), you can implement efficient, storage-level backups and restore PostgreSQL clusters directly from snapshots. This document explains how to configure the environment, set up snapshot classes, perform VolumeSnapshot-based backups, recover PostgreSQL clusters, and verify restored data in a Kubernetes environment. + +## Environment + +The following versions were used for this workflow: + +| Component | Version | +| :--- | :--- | +| CloudNativePG | v1.25.1 | +| OpenEBS | v4.2.0 | +| Kubernetes | v1.29.6 | +| kubectl-mayastor Plugin | v2.7.4+0 | +| kubectl cnpg plugin | v1.25.1 | + +## Prerequisites + +### Setup OpenEBS + +- **Install OpenEBS** + + Ensure that OpenEBS is installed in your cluster. Refer to the [OpenEBS Installation Documentation](../../quickstart-guide/installation.md) for step-by-step instructions. + +- **Install the `kubectl-mayastor` Plugin** + + Ensure that `kubectl-mayastor` plugin is installed. Refer to the [Mayastor Kubectl Plugin Documentation](../../user-guides/replicated-storage-user-guide/replicated-pv-mayastor/advanced-operations/kubectl-plugin.md) to install the plugin. + +- **Create a StorageClass** + +1. Create a file named `StorageClass.yaml`. + +**StorageClass.yaml** +```yaml +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: mayastor-1 +parameters: + protocol: nvmf + repl: "1" + thin: "true" # should be thin only +provisioner: io.openebs.csi-mayastor +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true +``` + +2. Apply the configuration. + +``` +kubectl create -f StorageClass.yaml +``` + +### Create a VolumeSnapshotClass + +1. Create a file named `VolumeSnapshotClass.yaml`. + +```yaml +apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshotClass +metadata: + name: csi-mayastor-snapshotclass + annotations: + snapshot.storage.kubernetes.io/is-default-class: "true" +driver: io.openebs.csi-mayastor +deletionPolicy: Delete +``` + +2. Apply the configuration. + +``` +kubectl create -f VolumeSnapshotClass.yaml +``` + +## CloudNativePG Operator and PostgreSQL Cluster Setup + +1. Install the CNPG operator using the official manifest. + +``` +kubectl apply --server-side -f https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.25/releases/cnpg-1.25.1.yaml +``` + +:::note +By default, the operator is installed in the `cnpg-system` namespace. +::: + +Refer to the [CloudNativePG Installation Documentation](https://cloudnative-pg.io/documentation/1.25/installation_upgrade/) for alternative installation methods. + +2. Install the kubectl CNPG plugin using Homebrew on macOS. + +``` +brew install kubectl-cnpg +``` + +Refer to the [CloudNativePG kubectl Plugin Documentation](https://cloudnative-pg.io/documentation/1.25/kubectl-plugin/#generation-of-installation-manifests) for installation instructions on Linux, Windows, or other platforms. + +## Deploying a PostgreSQL Cluster + +1. Create a namespace for PostgreSQL cluster. + +``` +kubectl create namespace cnpg-cluster +``` + +2. Create the PostgreSQL cluster custom resource. + +``` +kubectl create -f Cluster.yaml -n cnpg-cluster +``` + +**Cluster.yaml** +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: testcnpg-cluster +spec: + instances: 3 + primaryUpdateStrategy: unsupervised + # Persistent storage configuration + storage: + storageClass: mayastor-1 + size: 4Gi + walStorage: + storageClass: mayastor-1 + size: 4Gi + + # Backup properties + backup: + volumeSnapshot: + className: csi-mayastor-snapshotclass +``` + +3. Create the PostgreSQL cluster. + +``` +kubectl create -f Cluster.yaml -n cnpg-cluster +``` + +**Sample Output** + +``` +Cluster Summary +--------------- +Name: cnpg-cluster/testcnpg-cluster +System ID: 7486770939978866710 +PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.4 +Primary instance: testcnpg-cluster-1 +Primary start time: 2025-03-28 08:15:04 +0000 UTC (uptime 2m28s) +Status: Cluster in healthy state +Instances: 3 +Ready instances: 3 +Size: 126M +Current Write LSN: 0/6050170 (Timeline: 1 - WAL File: 000000010000000000000006) + + +Continuous Backup status +------------------------ +First Point of Recoverability: Not Available +Working WAL archiving: OK +WALs waiting to be archived: 0 +Last Archived WAL: 000000010000000000000005.00000060.backup @ 2025-03-28T08:15:52.520972Z +Last Failed WAL: - + + +Streaming Replication status +---------------------------- +Replication Slots Enabled + +Name Sent LSN Write LSN Flush LSN Replay LSN Write Lag Flush Lag Replay Lag State Sync State Sync Priority Replication Slot +---- -------- --------- --------- ---------- --------- --------- ---------- ----------- ---------- ------------- ---------------- +testcnpg-cluster-2 0/6050170 0/6050170 0/6050170 0/6050170 00:00:00 00:00:00 00:00:00 streaming async 0 active +testcnpg-cluster-3 0/6050170 0/6050170 0/6050170 0/6050170 00:00:00 00:00:00 00:00:00 streaming async 0 active + + +Instances status +---------------- +Name Current LSN Replication role Status QoS Manager Version Node +---- ----------- ------------------ ------ ---------- --------------- -------------- +testcnpg-cluster-1 0/6050170 Primary OK BestEffort 1.25.1 node-1-331287 +testcnpg-cluster-2 0/6050170 Standby (async) OK BestEffort 1.25.1 node-0-331287 +testcnpg-cluster-3 0/6050170 Standby (async) OK BestEffort 1.25.1 node-2-331287 +``` + +## Insert Sample Data into the PostgreSQL Database + +1. Connect to the PostgreSQL cluster. + +``` +kubectl cnpg psql testcnpg-cluster -n cnpg-cluster +``` + +2. Create database and insert sample data. + +``` +CREATE DATABASE demo; +\c demo; + +CREATE TABLE my_table ( + id SERIAL PRIMARY KEY, + name VARCHAR(255), + value INTEGER, + created_at TIMESTAMP DEFAULT NOW() +); + +INSERT INTO my_table (name, value) +SELECT + 'Record ' || generate_series(1, 100), + (random() * 1000)::INTEGER; + +SELECT COUNT(*) FROM my_table; +``` + +**Sample Output** + +``` +[demo]$ kubectl cnpg psql testcnpg-cluster -n cnpg-cluster +psql (17.4 (Debian 17.4-1.pgdg110+2)) +Type "help" for help. + +[postgres=#] +[postgres=#] +[postgres=#] create database demo; +CREATE DATABASE + +[postgres=#] \l demo + List of databases + Name | Owner | Encoding | Locale Provider | Collate | Ctype | Locale | ICU Rules | Access privileges +------+----------+----------+-----------------+---------+-------+--------+-----------+------------------- + demo | postgres | UTF8 | libc | C | C | | | +(1 row) + +[postgres=#] \c demo +You are now connected to database "demo" as user "postgres". + +[demo=#] +[demo=#] CREATE TABLE my_ta_ +``` + +## Backup Using VolumeSnapshots + +CloudNativePG supports two snapshot-based backup modes: + + - Online (Hot) Backups: Taken while PostgreSQL is running + - Offline (Cold) Backups: Taken while PostgreSQL instances are stopped + +- **Perform an Online Backup** + +Create an online VolumeSnapshot backup. + +``` +kubectl cnpg backup -m volumeSnapshot testcnpg-cluster -n cnpg-cluster +``` + +Backup behavior can be controlled using `spec.backup.volumeSnapshot` options such as `online`, `immediateCheckpoint`, and `waitForArchive`. + +- **Perform an Offline Backup** + +:::warning +Performing a cold backup with volumesnapshots targeting the primary will result in primary instance shutdown and write operation disruption. This also occurs in single-instance clusters, even without explicitly targeting the primary. +::: + +Create an offline VolumeSnapshot backup. + +``` +kubectl cnpg backup -m volumeSnapshot testcnpg-cluster -n cnpg-cluster --online=false +``` + +Check backup status. + +``` +kubectl get backup -n cnpg-cluster +``` + +:::note +For reliable recovery, cold backups are recommended over hot backups. By default, backups are performed on the most suitable replica, or on the primary instance if no replicas are available. This behavior can be modified to explicitly target the primary instance by setting `spec.backup.target="Primary"` in the cluster definition or `spec.target="Primary"` in the Backup custom resource (CRD). +::: + +## Recovery Using VolumeSnapshots + +1. List VolumeSnapshots in the cluster namespace. + +``` +kubectl get volumesnapshot -n cnpg-cluster +``` + +**Sample Output** +``` +NAME READYTOUSE SOURCEPVC SOURCESNAPSHOTCONTENT RESTORESIZE SNAPSHOTCLASS SNAPSHOTCONTENT CREATIONTIME AGE +testcnpg-cluster-20250328144442 true testcnpg-cluster-2 - 4Gi csi-mayastor-snapshotclass snapcontent-2e0a7dc1-94b9-475c-89eb-36e0486ca642 7m21s 7m22s +testcnpg-cluster-20250328144442-wal true testcnpg-cluster-2-wal - 4Gi csi-mayastor-snapshotclass snapcontent-70e3efac-b27f-48f3-97c0-e8cb4d788aef 7m21s 7m22s +``` + +2. Create a new cluster using the existing VolumeSnapshots. + +``` +kubectl create -f RecoverCluster.yaml -n cnpg-cluster +``` + +**RecoverCluster.yaml** +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-restore +spec: + instances: 3 + storage: + size: 4Gi + storageClass: mayastor-1 + walStorage: + size: 4Gi + storageClass: mayastor-1 # Storage Class with thin Prov + primaryUpdateStrategy: unsupervised + bootstrap: + recovery: + volumeSnapshots: + storage: + name: testcnpg-cluster-20250328144442 + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io + walStorage: + name: testcnpg-cluster-20250328144442-wal + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io + backup: + volumeSnapshot: + className: csi-mayastor-snapshotclass +``` + +``` +kubectl get all -n cnpg-cluster +``` + +**Sample Output** +``` +NAME READY STATUS RESTARTS AGE +pod/cluster-restore-1 1/1 Running 0 2m57s +pod/cluster-restore-2 1/1 Running 0 2m23s +pod/cluster-restore-3 1/1 Running 0 114s +pod/testcnpg-cluster-1 1/1 Running 0 74m +pod/testcnpg-cluster-2 1/1 Running 0 73m +pod/testcnpg-cluster-3 1/1 Running 0 73m + + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/cluster-restore-r ClusterIP 10.99.62.103 5432/TCP 3m7s +service/cluster-restore-ro ClusterIP 10.106.226.108 5432/TCP 3m7s +service/cluster-restore-rw ClusterIP 10.106.211.11 5432/TCP 3m7s +service/testcnpg-cluster-r ClusterIP 10.99.226.253 5432/TCP 74m +service/testcnpg-cluster-ro ClusterIP 10.106.164.228 5432/TCP 74m +service/testcnpg-cluster-rw ClusterIP 10.102.32.168 5432/TCP 74m +``` + +:::info +The recovery cluster must be created in the same namespace as the source cluster. +::: + +## Verify Restored Data + +1. Connect to the restored PostgreSQL cluster. + +``` +kubectl cnpg psql cluster-restore -n cnpg-cluster +``` + +2. Verify the restored data. + +``` +\c demo; +SELECT COUNT(*) FROM my_table; +``` + +**Sample Output** +``` +[demo]$ kubectl cnpg psql cluster-restore -n cnpg-cluster +psql (17.4 (Debian 17.4-1.pgdg110+2)) +Type "help" for help. + +[postgres=# \c demo +You are now connected to database "demo" as user "postgres". +[demo=# SELECT COUNT(*) FROM my_table; + count +------- + 10000 +(1 row) + +demo=# +``` + +Successful output confirms that the database was restored correctly from the VolumeSnapshots. + +## See Also + +- [Replicated PV Mayastor Installation on OpenShift](../openebs-on-kubernetes-platforms/openshift.md) +- [Replicated PV Mayastor Installation on Talos](../openebs-on-kubernetes-platforms/talos.md) +- [Kasten Backup and Restore using Replicated PV Mayastor Snapshots - FileSystem](../backup-and-restore/kasten-br-fs.md) +- [Velero Backup and Restore using Replicated PV Mayastor Snapshots - FileSystem](../backup-and-restore/velero-br-fs.md) +- [KubeVirt VM Backup and Restore using Replicated PV Mayastor VolumeSnapshots and Velero - FileSystem](../backup-and-restore/kubevirt-backup.md) \ No newline at end of file diff --git a/docs/main/Solutioning/backup-and-restore/kubevirt-backup.md b/docs/main/Solutioning/backup-and-restore/kubevirt-backup.md index 02cb4ecad..10e349fc0 100644 --- a/docs/main/Solutioning/backup-and-restore/kubevirt-backup.md +++ b/docs/main/Solutioning/backup-and-restore/kubevirt-backup.md @@ -44,7 +44,7 @@ To protect KubeVirt-based VMs, a robust backup strategy is essential. This docum 1. Create a file named `StorageClass.yaml`. **StorageClass.yaml** -``` +```yaml apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: @@ -69,7 +69,7 @@ kubectl create -f StorageClass.yaml 1. Create a file named `VolumeSnapshotClass.yaml`. -``` +```yaml apiVersion: snapshot.storage.k8s.io/v1 kind: VolumeSnapshotClass metadata: @@ -113,7 +113,7 @@ deployment.apps/virt-operator created 2. Create KubeVirt Custom Resource. ``` -kubectl create -f "https://github.com/kubevirt/kubevirt/releases/download/${VERSION}/kubevirt-cr.yaml " +kubectl create -f "https://github.com/kubevirt/kubevirt/releases/download/${VERSION}/kubevirt-cr.yaml" ``` **Sample Output** @@ -262,7 +262,7 @@ replicaset.apps/cdi-uploadproxy-856554cb9c 1 1 1 1. Create a file named `dv.yaml`. **dv.yaml** -``` +```yaml apiVersion: cdi.kubevirt.io/v1beta1 kind: DataVolume metadata: @@ -298,7 +298,7 @@ kubectl logs -f pod/importer-fedora 1. Create a file named `vm1_pvc.yaml` to use the PVC prepared by DataVolume as a root disk. **vm1_pvc.yaml** -``` +```yaml apiVersion: kubevirt.io/v1 kind: VirtualMachine metadata: @@ -568,9 +568,132 @@ velero plugin add quay.io/kubevirt/kubevirt-velero-plugin:v0.2.0 velero get plugins | grep kubevirt ``` -## Backup of KubeVirt VM +## Backing Up a KubeVirt VM + +1. Create a Velero backup object that includes the KubeVirt VM, CDI objects, and persistent volumes backed by OpenEBS. + +``` +velero backup create vm1backup1 --snapshot-volumes --include-namespaces=default --volume-snapshot-locations=default --storage-location=default --snapshot-move-data +``` + +2. Check Backup Status. + +``` +velero get backup vm1backup1 +``` + +**Sample Output** + +``` +NAME STATUS ERRORS WARNINGS CREATED EXPIRES STORAGE LOCATION SELECTOR +vm1backup1 Completed 0 0 2025-05-05 13:36:09 +0530 IST 29d default +``` + +3. After the backup is completed, delete the original VM (`vm1`) in the default namespace to demonstrate a successful restore. + +``` +kubectl delete vm vm1 +``` + +**Sample Output** + +``` +virtualmachine.kubevirt.io "vm1" deleted +``` + +4. Delete the DataVolume. + +``` +kubectl delete datavolumes.cdi.kubevirt.io fedora-1 +``` + +**Sample Output** + +``` +datavolume.cdi.kubevirt.io "fedora-1" deleted +``` + +:::note +When backing up a running virtual machine with the guest agent installed and the KubeVirt-Velero plugin enabled, Velero automatically executes backup hooks. These hooks freeze the guest file systems before the snapshot is taken and unfreeze them afterward, ensuring application-consistent snapshots. If the guest agent is not present, Velero performs a best-effort snapshot. +::: + +## Restoring a KubeVirt VM + +1. Create a new namespace to restore the virtual machine and associated resources. + +``` +kubectl create ns restoredvm +``` + +2. Create a Velero restore object. + +``` +velero restore create vm1restorenew --from-backup vm1backup1 --restore-volumes=true --namespace-mappings default:restoredvm +``` + +3. Check the `datadownload` status. + +``` +kubectl get datadownload -n velero +``` + +**Sample Output** + +``` +NAME STATUS STARTED BYTES DONE TOTAL BYTES STORAGE LOCATION AGE NODE +vm1restorenew-v56ft Completed 71m 8342339584 8584674304 default 71m node-0-347244 +``` + +Once the restore completes, Velero recreates: + + - The KubeVirt virtual machine (vm1) + - The associated DataVolume (fedora) + - All dependent Kubernetes resources + +These resources are restored into the restoredvm namespace. + +## Verification of Restored Data + +After the restore operation completes, verify that both the virtual machine and its data have been successfully recovered. + +1. Connect to the restored virtual machine using the console. +2. Navigate to the root user’s home directory. +3. Verify the presence of the sample data that was created before the backup. + +``` +virtctl console vm1 -n restoredvm +``` + +**Sample Output** +``` +Successfully connected to vm1 console. The escape sequence is ^] + +vm1 login: root +Password: +Last login: Mon May 5 08:03:43 on ttyS0 + +[root@vm1 ~]# ls +sampledata test1 test2 + +[root@vm1 ~]# cat sampledata +This is some sample data + +[root@vm1 ~]# logout + +Fedora Linux 40 (Cloud Edition) +Kernel 6.8.5-301.fc40.x86_64 on an x86_64 (ttyS0) + +eth0: 10.244.1.45 fe80::3ce0:f7ff:fe1e:53c6 +vm1 login: +``` + +This verification confirms that: + - The virtual machine configuration was restored correctly. + - The persistent storage contents were fully preserved. + - The backup and restore workflow functions end-to-end as expected. +Validating restored data is a critical step, as it ensures that the Velero backup accurately captured the VM state along with its underlying persistent volumes. ## See Also diff --git a/docs/sidebars.js b/docs/sidebars.js index aae8b1cea..16fbfdb0a 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -782,7 +782,13 @@ module.exports = { type: "doc", id: "Solutioning/backup-and-restore/kubevirt-backup", label: "KubeVirt VM Backup and Restore" + }, + { + type: "doc", + id: "Solutioning/backup-and-restore/cloudnative-backup", + label: "CloudNativePG Backups" } + ] }, ]