Skip to content

Commit

Permalink
Merge pull request #12919 from simondeziel/misc-backports-stable-5.0
Browse files Browse the repository at this point in the history
Storage: Misc backports (stable-5.0)
  • Loading branch information
tomponline authored Feb 20, 2024
2 parents 7958df7 + aa3712c commit 09a51b6
Show file tree
Hide file tree
Showing 3 changed files with 101 additions and 45 deletions.
26 changes: 16 additions & 10 deletions lxd/storage/drivers/driver_ceph_volumes.go
Original file line number Diff line number Diff line change
Expand Up @@ -1798,23 +1798,29 @@ func (d *ceph) RestoreVolume(vol Volume, snapshotName string, op *operations.Ope
return err
}

snapVol, err := vol.NewSnapshot(snapshotName)
if err != nil {
return err
}

// Map the RBD volume.
devPath, err := d.rbdMapVolume(snapVol)
devPath, err := d.rbdMapVolume(vol)
if err != nil {
return err
}

defer func() { _ = d.rbdUnmapVolume(snapVol, true) }()
defer func() { _ = d.rbdUnmapVolume(vol, true) }()

// Re-generate the UUID.
err = d.generateUUID(snapVol.ConfigBlockFilesystem(), devPath)
if err != nil {
return err
if vol.contentType == ContentTypeFS {
err = d.generateUUID(vol.ConfigBlockFilesystem(), devPath)
if err != nil {
return err
}
}

// For VM images, restore the filesystem volume too.
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
err := d.RestoreVolume(fsVol, snapshotName, op)
if err != nil {
return err
}
}

return nil
Expand Down
101 changes: 67 additions & 34 deletions lxd/storage/drivers/driver_lvm_volumes.go
Original file line number Diff line number Diff line change
Expand Up @@ -1170,76 +1170,109 @@ func (d *lvm) VolumeSnapshots(vol Volume, op *operations.Operation) ([]string, e

// RestoreVolume restores a volume from a snapshot.
func (d *lvm) RestoreVolume(vol Volume, snapshotName string, op *operations.Operation) error {
// Instantiate snapshot volume from snapshot name.
snapVol, err := vol.NewSnapshot(snapshotName)
if err != nil {
return err
}

revert := revert.New()
defer revert.Fail()
restoreThinPoolVolume := func(restoreVol Volume) (revert.Hook, error) {
// Instantiate snapshot volume from snapshot name.
snapVol, err := restoreVol.NewSnapshot(snapshotName)
if err != nil {
return nil, err
}

// If the pool uses thinpools, then the process for restoring a snapshot is as follows:
// 1. Rename the original volume to a temporary name (so we can revert later if needed).
// 2. Create a writable snapshot with the original name from the snapshot being restored.
// 3. Delete the renamed original volume.
if d.usesThinpool() {
_, err = d.UnmountVolume(vol, false, op)
_, err = d.UnmountVolume(restoreVol, false, op)
if err != nil {
return fmt.Errorf("Error unmounting LVM logical volume: %w", err)
return nil, fmt.Errorf("Error unmounting LVM logical volume: %w", err)
}

originalVolDevPath := d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, vol.name)
tmpVolName := fmt.Sprintf("%s%s", vol.name, tmpVolSuffix)
tmpVolDevPath := d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, tmpVolName)
originalVolDevPath := d.lvmDevPath(d.config["lvm.vg_name"], restoreVol.volType, restoreVol.contentType, restoreVol.name)
tmpVolName := fmt.Sprintf("%s%s", restoreVol.name, tmpVolSuffix)
tmpVolDevPath := d.lvmDevPath(d.config["lvm.vg_name"], restoreVol.volType, restoreVol.contentType, tmpVolName)

reverter := revert.New()
defer reverter.Fail()

// Rename original logical volume to temporary new name so we can revert if needed.
err = d.renameLogicalVolume(originalVolDevPath, tmpVolDevPath)
if err != nil {
return fmt.Errorf("Error temporarily renaming original LVM logical volume: %w", err)
return nil, fmt.Errorf("Error temporarily renaming original LVM logical volume: %w", err)
}

revert.Add(func() {
reverter.Add(func() {
// Rename the original volume back to the original name.
_ = d.renameLogicalVolume(tmpVolDevPath, originalVolDevPath)
})

// Create writable snapshot from source snapshot named as target volume.
_, err = d.createLogicalVolumeSnapshot(d.config["lvm.vg_name"], snapVol, vol, false, true)
_, err = d.createLogicalVolumeSnapshot(d.config["lvm.vg_name"], snapVol, restoreVol, false, true)
if err != nil {
return fmt.Errorf("Error restoring LVM logical volume snapshot: %w", err)
return nil, fmt.Errorf("Error restoring LVM logical volume snapshot: %w", err)
}

volDevPath := d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, vol.name)
volDevPath := d.lvmDevPath(d.config["lvm.vg_name"], restoreVol.volType, restoreVol.contentType, restoreVol.name)

revert.Add(func() {
reverter.Add(func() {
_ = d.removeLogicalVolume(volDevPath)
})

// If the volume's filesystem needs to have its UUID regenerated to allow mount then do so now.
if vol.contentType == ContentTypeFS && renegerateFilesystemUUIDNeeded(vol.ConfigBlockFilesystem()) {
_, err = d.activateVolume(vol)
if restoreVol.contentType == ContentTypeFS && renegerateFilesystemUUIDNeeded(restoreVol.ConfigBlockFilesystem()) {
_, err = d.activateVolume(restoreVol)
if err != nil {
return err
return nil, err
}

d.logger.Debug("Regenerating filesystem UUID", logger.Ctx{"dev": volDevPath, "fs": vol.ConfigBlockFilesystem()})
err = regenerateFilesystemUUID(vol.ConfigBlockFilesystem(), volDevPath)
d.logger.Debug("Regenerating filesystem UUID", logger.Ctx{"dev": volDevPath, "fs": restoreVol.ConfigBlockFilesystem()})
err = regenerateFilesystemUUID(restoreVol.ConfigBlockFilesystem(), volDevPath)
if err != nil {
return err
return nil, err
}
}

// Finally remove the original logical volume. Should always be the last step to allow revert.
err = d.removeLogicalVolume(d.lvmDevPath(d.config["lvm.vg_name"], vol.volType, vol.contentType, tmpVolName))
err = d.removeLogicalVolume(d.lvmDevPath(d.config["lvm.vg_name"], restoreVol.volType, restoreVol.contentType, tmpVolName))
if err != nil {
return fmt.Errorf("Error removing original LVM logical volume: %w", err)
return nil, fmt.Errorf("Error removing original LVM logical volume: %w", err)
}

revert.Success()
cleanup := reverter.Clone().Fail
reverter.Success()
return cleanup, nil
}

reverter := revert.New()
defer reverter.Fail()

// If the pool uses thinpools, then the process for restoring a snapshot is as follows:
// 1. Rename the original volume to a temporary name (so we can revert later if needed).
// 2. Create a writable snapshot with the original name from the snapshot being restored.
// 3. Delete the renamed original volume.
if d.usesThinpool() {
cleanup, err := restoreThinPoolVolume(vol)
if err != nil {
return err
}

reverter.Add(cleanup)

// For VMs, restore the filesystem volume.
if vol.IsVMBlock() {
fsVol := vol.NewVMBlockFilesystemVolume()
cleanup, err := restoreThinPoolVolume(fsVol)
if err != nil {
return err
}

reverter.Add(cleanup)
}

reverter.Success()
return nil
}

// Instantiate snapshot volume from snapshot name.
snapVol, err := vol.NewSnapshot(snapshotName)
if err != nil {
return err
}

// If the pool uses classic logical volumes, then the process for restoring a snapshot is as follows:
// 1. Ensure snapshot volumes have sufficient CoW capacity to allow restoration.
// 2. Mount source and target.
Expand Down Expand Up @@ -1328,7 +1361,7 @@ func (d *lvm) RestoreVolume(vol Volume, snapshotName string, op *operations.Oper
return fmt.Errorf("Error restoring LVM logical volume snapshot: %w", err)
}

revert.Success()
reverter.Success()
return nil
}

Expand Down
19 changes: 18 additions & 1 deletion test/suites/storage.sh
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,25 @@ test_storage() {
fi
lxc delete --force uuid1
lxc delete --force uuid2
lxc image delete testimage

# Test UUID re-generation in case of restore.
lxc init testimage uuid1 -s "${POOL}"
lxc snapshot uuid1
lxc start uuid1
if [ "$lxd_backend" = "lvm" ]; then
uuid="$(blkid -s UUID -o value -p /dev/"${POOL}"/containers_uuid1)"
elif [ "$lxd_backend" = "ceph" ]; then
uuid="$(blkid -s UUID -o value -p /dev/rbd/"${POOL}"/container_uuid1)"
fi
lxc restore uuid1 snap0
if [ "$lxd_backend" = "lvm" ]; then
[ "$(blkid -s UUID -o value -p /dev/"${POOL}"/containers_uuid1)" != "$uuid" ]
elif [ "$lxd_backend" = "ceph" ]; then
[ "$(blkid -s UUID -o value -p /dev/rbd/"${POOL}"/container_uuid1)" != "$uuid" ]
fi
lxc delete --force uuid1

lxc image delete testimage
lxc storage delete "$btrfs_storage_pool"
fi
ensure_import_testimage
Expand Down

0 comments on commit 09a51b6

Please sign in to comment.