From 8b0c8b6d255aa5323bb3205d7e3c9becfa4d9d1b Mon Sep 17 00:00:00 2001 From: "dmitry.lopatin" Date: Fri, 15 Nov 2024 23:36:16 +0300 Subject: [PATCH] fix(vd): add new handler Signed-off-by: dmitry.lopatin --- .../cvi/internal/source/object_ref_vd.go | 6 +- .../pkg/controller/vd/internal/inuse.go | 180 +++++++ .../pkg/controller/vd/internal/inuse_test.go | 492 ++++++++++++++++++ .../pkg/controller/vd/vd_controller.go | 1 + .../vi/internal/source/object_ref_vd.go | 6 +- .../controller/vm/internal/block_device.go | 32 +- .../vm/internal/block_devices_test.go | 79 +++ .../pkg/controller/watchers/cvi_filter.go | 3 +- .../pkg/controller/watchers/vi_filter.go | 3 +- 9 files changed, 783 insertions(+), 19 deletions(-) create mode 100644 images/virtualization-artifact/pkg/controller/vd/internal/inuse.go create mode 100644 images/virtualization-artifact/pkg/controller/vd/internal/inuse_test.go diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go index 68f7e726a..f5a2552b3 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go @@ -226,10 +226,8 @@ func (ds ObjectRefVirtualDisk) Validate(ctx context.Context, cvi *virtv2.Cluster } inUseCondition, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) - if inUseCondition.Status == metav1.ConditionTrue { - if inUseCondition.Reason == vdcondition.AllowedForImageUsage.String() { - return nil - } + if inUseCondition.Status == metav1.ConditionTrue && inUseCondition.Reason == vdcondition.AllowedForImageUsage.String() { + return nil } return NewVirtualDiskNotAllowedForUseError(vd.Name) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/inuse.go b/images/virtualization-artifact/pkg/controller/vd/internal/inuse.go new file mode 100644 index 000000000..03f49e0d3 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/inuse.go @@ -0,0 +1,180 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "fmt" + "slices" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + virtv1 "kubevirt.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" + "github.com/deckhouse/virtualization-controller/pkg/sdk/framework/helper" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" +) + +type InUseHandler struct { + client client.Client +} + +func NewInUseHandler(client client.Client) *InUseHandler { + return &InUseHandler{ + client: client, + } +} + +func (h InUseHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { + inUseCondition, ok := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) + if !ok { + cb := conditions.NewConditionBuilder(vdcondition.InUseType). + Status(metav1.ConditionUnknown). + Reason(conditions.ReasonUnknown). + Generation(vd.Generation) + conditions.SetCondition(cb, &vd.Status.Conditions) + inUseCondition = cb.Condition() + } + + allowUseForVM, allowUseForImage := false, false + + var vms virtv2.VirtualMachineList + err := h.client.List(ctx, &vms, &client.ListOptions{ + Namespace: vd.GetNamespace(), + }) + if err != nil { + return reconcile.Result{}, fmt.Errorf("error getting virtual machines: %w", err) + } + + for _, vm := range vms.Items { + if h.isVDAttachedToVM(vd.GetName(), vm) { + if vm.Status.Phase != virtv2.MachineStopped { + allowUseForVM = isVMReady(vm.Status.Conditions) + + if allowUseForVM { + break + } + } else { + kvvm, err := helper.FetchObject(ctx, types.NamespacedName{Name: vm.Name, Namespace: vm.Namespace}, h.client, &virtv1.VirtualMachine{}) + if err != nil { + return reconcile.Result{}, fmt.Errorf("error getting kvvms: %w", err) + } + + if kvvm != nil && kvvm.Status.StateChangeRequests != nil { + allowUseForVM = true + break + } + } + } + } + + var vis virtv2.VirtualImageList + err = h.client.List(ctx, &vis, &client.ListOptions{ + Namespace: vd.GetNamespace(), + }) + if err != nil { + return reconcile.Result{}, fmt.Errorf("error getting virtual images: %w", err) + } + + allowedPhases := []virtv2.ImagePhase{virtv2.ImageProvisioning, virtv2.ImagePending} + + for _, vi := range vis.Items { + if slices.Contains(allowedPhases, vi.Status.Phase) && vi.Spec.DataSource.Type == virtv2.DataSourceTypeObjectRef && vi.Spec.DataSource.ObjectRef != nil && vi.Spec.DataSource.ObjectRef.Kind == virtv2.VirtualDiskKind { + allowUseForImage = true + break + } + } + + var cvis virtv2.ClusterVirtualImageList + err = h.client.List(ctx, &cvis, &client.ListOptions{}) + if err != nil { + return reconcile.Result{}, fmt.Errorf("error getting cluster virtual images: %w", err) + } + for _, cvi := range cvis.Items { + if slices.Contains(allowedPhases, cvi.Status.Phase) && cvi.Spec.DataSource.Type == virtv2.DataSourceTypeObjectRef && cvi.Spec.DataSource.ObjectRef != nil && cvi.Spec.DataSource.ObjectRef.Kind == virtv2.VirtualDiskKind { + allowUseForImage = true + } + } + + cb := conditions.NewConditionBuilder(vdcondition.InUseType) + + switch { + case allowUseForVM && inUseCondition.Status == metav1.ConditionUnknown: + if inUseCondition.Reason != vdcondition.AllowedForVirtualMachineUsage.String() { + cb. + Generation(vd.Generation). + Status(metav1.ConditionTrue). + Reason(vdcondition.AllowedForVirtualMachineUsage). + Message("") + conditions.SetCondition(cb, &vd.Status.Conditions) + } + case allowUseForImage && inUseCondition.Status == metav1.ConditionUnknown: + if inUseCondition.Reason != vdcondition.AllowedForImageUsage.String() { + cb. + Generation(vd.Generation). + Status(metav1.ConditionTrue). + Reason(vdcondition.AllowedForImageUsage). + Message("") + conditions.SetCondition(cb, &vd.Status.Conditions) + } + default: + if inUseCondition.Reason == vdcondition.AllowedForVirtualMachineUsage.String() && !allowUseForVM || inUseCondition.Reason == vdcondition.AllowedForImageUsage.String() && !allowUseForImage { + cb. + Generation(vd.Generation). + Status(metav1.ConditionUnknown). + Reason(conditions.ReasonUnknown). + Message("") + conditions.SetCondition(cb, &vd.Status.Conditions) + } + + if allowUseForImage || allowUseForVM { + return reconcile.Result{Requeue: true}, nil + } + } + + return reconcile.Result{}, nil +} + +func (h InUseHandler) isVDAttachedToVM(vdName string, vm virtv2.VirtualMachine) bool { + for _, bda := range vm.Status.BlockDeviceRefs { + if bda.Kind == virtv2.DiskDevice && bda.Name == vdName { + return true + } + } + + return false +} + +func isVMReady(conditions []metav1.Condition) bool { + critConditions := []string{ + vmcondition.TypeIPAddressReady.String(), + vmcondition.TypeClassReady.String(), + } + + for _, c := range conditions { + if slices.Contains(critConditions, c.Type) && c.Status == metav1.ConditionFalse { + return false + } + } + + return true +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/inuse_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/inuse_test.go new file mode 100644 index 000000000..98d102532 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/vd/internal/inuse_test.go @@ -0,0 +1,492 @@ +/* +Copyright 2024 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + virtv1 "kubevirt.io/api/core/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" + virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" +) + +var _ = ginkgo.Describe("InUseHandler", func() { + var ( + scheme *runtime.Scheme + ctx context.Context + handler *InUseHandler + ) + + ginkgo.BeforeEach(func() { + scheme = runtime.NewScheme() + gomega.Expect(clientgoscheme.AddToScheme(scheme)).To(gomega.Succeed()) + gomega.Expect(virtv2.AddToScheme(scheme)).To(gomega.Succeed()) + gomega.Expect(virtv1.AddToScheme(scheme)).To(gomega.Succeed()) + + ctx = context.TODO() + }) + + ginkgo.Context("when VirtualDisk is not in use", func() { + ginkgo.It("must set status Unknown and reason Unknown", func() { + vd := &virtv2.VirtualDisk{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vd", + Namespace: "default", + }, + Status: virtv2.VirtualDiskStatus{ + Conditions: []metav1.Condition{}, + }, + } + + k8sClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vd).Build() + handler = NewInUseHandler(k8sClient) + + result, err := handler.Handle(ctx, vd) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(result).To(gomega.Equal(ctrl.Result{})) + + cond, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) + gomega.Expect(cond).ToNot(gomega.BeNil()) + gomega.Expect(cond.Status).To(gomega.Equal(metav1.ConditionUnknown)) + }) + }) + + ginkgo.Context("when VirtualDisk is used by running VirtualMachine", func() { + ginkgo.It("must set status True and reason AllowedForVirtualMachineUsage", func() { + vd := &virtv2.VirtualDisk{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vd", + Namespace: "default", + }, + Status: virtv2.VirtualDiskStatus{ + Conditions: []metav1.Condition{}, + }, + } + + vm := &virtv2.VirtualMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vm", + Namespace: "default", + }, + Spec: virtv2.VirtualMachineSpec{ + BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + { + Kind: virtv2.DiskDevice, + Name: vd.Name, + }, + }, + }, + Status: virtv2.VirtualMachineStatus{ + Phase: virtv2.MachineRunning, + BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + { + Kind: virtv2.DiskDevice, + Name: vd.Name, + }, + }, + }, + } + + k8sClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vd, vm).Build() + handler = NewInUseHandler(k8sClient) + + result, err := handler.Handle(ctx, vd) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(result).To(gomega.Equal(ctrl.Result{})) + + cond, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) + gomega.Expect(cond).ToNot(gomega.BeNil()) + gomega.Expect(cond.Status).To(gomega.Equal(metav1.ConditionTrue)) + gomega.Expect(cond.Reason).To(gomega.Equal(vdcondition.AllowedForVirtualMachineUsage.String())) + }) + }) + + ginkgo.Context("when VirtualDisk is used by not ready VirtualMachine", func() { + ginkgo.It("must set status True and reason AllowedForVirtualMachineUsage", func() { + vd := &virtv2.VirtualDisk{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vd", + Namespace: "default", + }, + Status: virtv2.VirtualDiskStatus{ + Conditions: []metav1.Condition{}, + }, + } + + vm := &virtv2.VirtualMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vm", + Namespace: "default", + }, + Status: virtv2.VirtualMachineStatus{ + Conditions: []metav1.Condition{ + { + Type: vmcondition.TypeMigrating.String(), + Status: metav1.ConditionFalse, + }, + { + Type: vmcondition.TypeIPAddressReady.String(), + Status: metav1.ConditionFalse, + }, + }, + BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + { + Kind: virtv2.DiskDevice, + Name: vd.Name, + }, + }, + }, + } + + k8sClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vd, vm).Build() + handler = NewInUseHandler(k8sClient) + + result, err := handler.Handle(ctx, vd) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(result).To(gomega.Equal(ctrl.Result{})) + + cond, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) + gomega.Expect(cond).ToNot(gomega.BeNil()) + gomega.Expect(cond.Status).To(gomega.Equal(metav1.ConditionUnknown)) + }) + }) + + ginkgo.Context("when VirtualDisk is used by VirtualImage", func() { + ginkgo.It("must set status True and reason AllowedForImageUsage", func() { + vd := &virtv2.VirtualDisk{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vd", + Namespace: "default", + }, + Status: virtv2.VirtualDiskStatus{ + Conditions: []metav1.Condition{}, + }, + } + + vi := &virtv2.VirtualImage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vi", + Namespace: "default", + }, + Spec: virtv2.VirtualImageSpec{ + DataSource: virtv2.VirtualImageDataSource{ + Type: virtv2.DataSourceTypeObjectRef, + ObjectRef: &virtv2.VirtualImageObjectRef{ + Kind: virtv2.VirtualDiskKind, + Name: "test-vd", + }, + }, + }, + Status: virtv2.VirtualImageStatus{ + Phase: virtv2.ImageProvisioning, + Conditions: []metav1.Condition{}, + }, + } + + k8sClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vd, vi).Build() + handler = NewInUseHandler(k8sClient) + + result, err := handler.Handle(ctx, vd) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(result).To(gomega.Equal(ctrl.Result{})) + + cond, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) + gomega.Expect(cond).ToNot(gomega.BeNil()) + gomega.Expect(cond.Status).To(gomega.Equal(metav1.ConditionTrue)) + gomega.Expect(cond.Reason).To(gomega.Equal(vdcondition.AllowedForImageUsage.String())) + }) + }) + + ginkgo.Context("when VirtualDisk is used by ClusterVirtualImage", func() { + ginkgo.It("must set status True and reason AllowedForImageUsage", func() { + vd := &virtv2.VirtualDisk{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vd", + Namespace: "default", + }, + Status: virtv2.VirtualDiskStatus{ + Conditions: []metav1.Condition{}, + }, + } + + cvi := &virtv2.ClusterVirtualImage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vi", + Namespace: "default", + }, + Spec: virtv2.ClusterVirtualImageSpec{ + DataSource: virtv2.ClusterVirtualImageDataSource{ + Type: virtv2.DataSourceTypeObjectRef, + ObjectRef: &virtv2.ClusterVirtualImageObjectRef{ + Kind: virtv2.VirtualDiskKind, + Name: "test-vd", + Namespace: "default", + }, + }, + }, + Status: virtv2.ClusterVirtualImageStatus{ + Phase: virtv2.ImageProvisioning, + Conditions: []metav1.Condition{}, + }, + } + + k8sClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vd, cvi).Build() + handler = NewInUseHandler(k8sClient) + + result, err := handler.Handle(ctx, vd) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(result).To(gomega.Equal(ctrl.Result{})) + + cond, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) + gomega.Expect(cond).ToNot(gomega.BeNil()) + gomega.Expect(cond.Status).To(gomega.Equal(metav1.ConditionTrue)) + gomega.Expect(cond.Reason).To(gomega.Equal(vdcondition.AllowedForImageUsage.String())) + }) + }) + + ginkgo.Context("when VirtualDisk is used by VirtualImage and VirtualMachine", func() { + ginkgo.It("must set status True and reason AllowedForVirtualMachineUsage", func() { + vd := &virtv2.VirtualDisk{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vd", + Namespace: "default", + }, + Status: virtv2.VirtualDiskStatus{ + Conditions: []metav1.Condition{}, + }, + } + + vi := &virtv2.VirtualImage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vi", + Namespace: "default", + }, + Spec: virtv2.VirtualImageSpec{ + DataSource: virtv2.VirtualImageDataSource{ + Type: virtv2.DataSourceTypeObjectRef, + ObjectRef: &virtv2.VirtualImageObjectRef{ + Kind: virtv2.VirtualDiskKind, + Name: "test-vd", + }, + }, + }, + Status: virtv2.VirtualImageStatus{ + Phase: virtv2.ImageProvisioning, + Conditions: []metav1.Condition{}, + }, + } + + vm := &virtv2.VirtualMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vm", + Namespace: "default", + }, + Status: virtv2.VirtualMachineStatus{ + BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + { + Kind: virtv2.DiskDevice, + Name: vd.Name, + }, + }, + }, + } + + k8sClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vd, vi, vm).Build() + handler = NewInUseHandler(k8sClient) + + result, err := handler.Handle(ctx, vd) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(result).To(gomega.Equal(ctrl.Result{})) + + cond, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) + gomega.Expect(cond).ToNot(gomega.BeNil()) + gomega.Expect(cond.Status).To(gomega.Equal(metav1.ConditionTrue)) + gomega.Expect(cond.Reason).To(gomega.Equal(vdcondition.AllowedForVirtualMachineUsage.String())) + }) + }) + + ginkgo.Context("when VirtualDisk is used by VirtualMachine after create image", func() { + ginkgo.It("must set status True and reason AllowedForVirtualMachineUsage", func() { + vd := &virtv2.VirtualDisk{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vd", + Namespace: "default", + }, + Status: virtv2.VirtualDiskStatus{ + Conditions: []metav1.Condition{ + { + Type: vdcondition.InUseType.String(), + Reason: conditions.ReasonUnknown.String(), + Status: metav1.ConditionUnknown, + }, + }, + }, + } + + vm := &virtv2.VirtualMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vm", + Namespace: "default", + }, + Status: virtv2.VirtualMachineStatus{ + BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + { + Kind: virtv2.DiskDevice, + Name: vd.Name, + }, + }, + }, + } + + k8sClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vd, vm).Build() + handler = NewInUseHandler(k8sClient) + + result, err := handler.Handle(ctx, vd) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(result).To(gomega.Equal(ctrl.Result{})) + + cond, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) + gomega.Expect(cond).ToNot(gomega.BeNil()) + gomega.Expect(cond.Status).To(gomega.Equal(metav1.ConditionTrue)) + gomega.Expect(cond.Reason).To(gomega.Equal(vdcondition.AllowedForVirtualMachineUsage.String())) + }) + }) + + ginkgo.Context("when VirtualDisk is used by VirtualImage after running VM", func() { + ginkgo.It("must set status True and reason AllowedForImageUsage", func() { + vd := &virtv2.VirtualDisk{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vd", + Namespace: "default", + }, + Status: virtv2.VirtualDiskStatus{ + Conditions: []metav1.Condition{ + { + Type: vdcondition.InUseType.String(), + Reason: conditions.ReasonUnknown.String(), + Status: metav1.ConditionUnknown, + }, + }, + }, + } + + vi := &virtv2.VirtualImage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vi", + Namespace: "default", + }, + Spec: virtv2.VirtualImageSpec{ + DataSource: virtv2.VirtualImageDataSource{ + Type: virtv2.DataSourceTypeObjectRef, + ObjectRef: &virtv2.VirtualImageObjectRef{ + Kind: virtv2.VirtualDiskKind, + Name: "test-vd", + }, + }, + }, + Status: virtv2.VirtualImageStatus{ + Phase: virtv2.ImageProvisioning, + Conditions: []metav1.Condition{}, + }, + } + + k8sClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vd, vi).Build() + handler = NewInUseHandler(k8sClient) + + result, err := handler.Handle(ctx, vd) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(result).To(gomega.Equal(ctrl.Result{})) + + cond, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) + gomega.Expect(cond).ToNot(gomega.BeNil()) + gomega.Expect(cond.Status).To(gomega.Equal(metav1.ConditionTrue)) + gomega.Expect(cond.Reason).To(gomega.Equal(vdcondition.AllowedForImageUsage.String())) + }) + }) + + ginkgo.Context("when VirtualDisk is not in use after create image", func() { + ginkgo.It("must set status Unknown and reason Unknown", func() { + vd := &virtv2.VirtualDisk{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vd", + Namespace: "default", + }, + Status: virtv2.VirtualDiskStatus{ + Conditions: []metav1.Condition{ + { + Type: vdcondition.InUseType.String(), + Reason: vdcondition.AllowedForImageUsage.String(), + Status: metav1.ConditionTrue, + }, + }, + }, + } + + k8sClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vd).Build() + handler = NewInUseHandler(k8sClient) + + result, err := handler.Handle(ctx, vd) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(result).To(gomega.Equal(ctrl.Result{})) + + cond, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) + gomega.Expect(cond).ToNot(gomega.BeNil()) + gomega.Expect(cond.Status).To(gomega.Equal(metav1.ConditionUnknown)) + }) + }) + + ginkgo.Context("when VirtualDisk is not in use after running VM", func() { + ginkgo.It("must set status Unknown and reason Unknown", func() { + vd := &virtv2.VirtualDisk{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vd", + Namespace: "default", + }, + Status: virtv2.VirtualDiskStatus{ + Conditions: []metav1.Condition{ + { + Type: vdcondition.InUseType.String(), + Reason: vdcondition.AllowedForVirtualMachineUsage.String(), + Status: metav1.ConditionTrue, + }, + }, + }, + } + + k8sClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vd).Build() + handler = NewInUseHandler(k8sClient) + + result, err := handler.Handle(ctx, vd) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(result).To(gomega.Equal(ctrl.Result{})) + + cond, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) + gomega.Expect(cond).ToNot(gomega.BeNil()) + gomega.Expect(cond.Status).To(gomega.Equal(metav1.ConditionUnknown)) + }) + }) +}) diff --git a/images/virtualization-artifact/pkg/controller/vd/vd_controller.go b/images/virtualization-artifact/pkg/controller/vd/vd_controller.go index ab1a093a4..b9cc60d90 100644 --- a/images/virtualization-artifact/pkg/controller/vd/vd_controller.go +++ b/images/virtualization-artifact/pkg/controller/vd/vd_controller.go @@ -84,6 +84,7 @@ func NewController( internal.NewDeletionHandler(sources), internal.NewAttacheeHandler(mgr.GetClient()), internal.NewStatsHandler(stat, importer, uploader), + internal.NewInUseHandler(mgr.GetClient()), ) vdController, err := controller.New(ControllerName, mgr, controller.Options{ diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go index a7cc0b738..c11aee59f 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go @@ -379,10 +379,8 @@ func (ds ObjectRefVirtualDisk) Validate(ctx context.Context, vi *virtv2.VirtualI } inUseCondition, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) - if inUseCondition.Status == metav1.ConditionTrue { - if inUseCondition.Reason == vdcondition.AllowedForImageUsage.String() { - return nil - } + if inUseCondition.Status == metav1.ConditionTrue && inUseCondition.Reason == vdcondition.AllowedForImageUsage.String() { + return nil } return NewVirtualDiskNotAllowedForUseError(vd.Name) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/block_device.go b/images/virtualization-artifact/pkg/controller/vm/internal/block_device.go index 755dd174d..ef79719d4 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/block_device.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/block_device.go @@ -164,6 +164,19 @@ func (h *BlockDeviceHandler) Handle(ctx context.Context, s state.VirtualMachineS return reconcile.Result{RequeueAfter: 60 * time.Second}, nil } + vds, err := s.VirtualDisksByName(ctx) + if err != nil { + return reconcile.Result{}, err + } + + if !h.checkAllowVirtualDisks(vds) { + mgr.Update(cb.Status(metav1.ConditionFalse). + Reason(vmcondition.ReasonBlockDevicesNotReady). + Message("virtual disks are not allowed for use in the virtual machine, it may be used to create an images").Condition()) + changed.Status.Conditions = mgr.Generate() + return reconcile.Result{}, nil + } + mgr.Update(cb.Status(metav1.ConditionTrue). Reason(vmcondition.ReasonBlockDevicesReady). Condition()) @@ -171,6 +184,17 @@ func (h *BlockDeviceHandler) Handle(ctx context.Context, s state.VirtualMachineS return reconcile.Result{}, nil } +func (h *BlockDeviceHandler) checkAllowVirtualDisks(vds map[string]*virtv2.VirtualDisk) bool { + for _, vd := range vds { + inUseCondition, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) + if inUseCondition.Status != metav1.ConditionTrue || inUseCondition.Reason != vdcondition.AllowedForVirtualMachineUsage.String() { + return false + } + } + + return true +} + func (h *BlockDeviceHandler) Name() string { return nameBlockDeviceHandler } @@ -302,13 +326,7 @@ func (h *BlockDeviceHandler) countReadyBlockDevices(vm *virtv2.VirtualMachine, s } readyCondition, _ := conditions.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) if readyCondition.Status == metav1.ConditionTrue { - inUseCondition, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) - if inUseCondition.Status == metav1.ConditionTrue && inUseCondition.Reason == vdcondition.AllowedForVirtualMachineUsage.String() { - ready++ - } else { - msg := fmt.Sprintf("virtual disk %s is not allowed for use in the virtual machine, it may be used to create an image", vd.Name) - warnings = append(warnings, msg) - } + ready++ } else { msg := fmt.Sprintf("virtual disk %s is waiting for the it's pvc to be bound", vd.Name) warnings = append(warnings, msg) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/block_devices_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/block_devices_test.go index ec2e3480b..f8144d245 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/block_devices_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/block_devices_test.go @@ -25,6 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -133,6 +134,84 @@ var _ = Describe("BlockDeviceHandler", func() { }) }) + Context("BlockDevices are not ready, because VirtualDisk is not allowed", func() { + It("return false", func() { + anyVd := &virtv2.VirtualDisk{ + ObjectMeta: metav1.ObjectMeta{Name: "anyVd"}, + Status: virtv2.VirtualDiskStatus{ + Phase: virtv2.DiskReady, + Target: virtv2.DiskTarget{PersistentVolumeClaim: "pvc-foo"}, + Conditions: []metav1.Condition{ + { + Type: vdcondition.ReadyType.String(), + Reason: vdcondition.Ready.String(), + Status: metav1.ConditionTrue, + }, + { + Type: vdcondition.InUseType.String(), + Reason: conditions.ReasonUnknown.String(), + Status: metav1.ConditionUnknown, + }, + }, + }, + } + + vds := map[string]*virtv2.VirtualDisk{ + vdFoo.Name: vdFoo, + vdBar.Name: vdBar, + anyVd.Name: anyVd, + } + + allowed := h.checkAllowVirtualDisks(vds) + Expect(allowed).To(BeFalse()) + }) + }) + + Context("BlockDevices are not ready, because VirtualDisk using for create image", func() { + It("return false", func() { + anyVd := &virtv2.VirtualDisk{ + ObjectMeta: metav1.ObjectMeta{Name: "anyVd"}, + Status: virtv2.VirtualDiskStatus{ + Phase: virtv2.DiskReady, + Target: virtv2.DiskTarget{PersistentVolumeClaim: "pvc-foo"}, + Conditions: []metav1.Condition{ + { + Type: vdcondition.ReadyType.String(), + Reason: vdcondition.Ready.String(), + Status: metav1.ConditionTrue, + }, + { + Type: vdcondition.InUseType.String(), + Reason: vdcondition.AllowedForImageUsage.String(), + Status: metav1.ConditionTrue, + }, + }, + }, + } + + vds := map[string]*virtv2.VirtualDisk{ + vdFoo.Name: vdFoo, + vdBar.Name: vdBar, + anyVd.Name: anyVd, + } + + allowed := h.checkAllowVirtualDisks(vds) + Expect(allowed).To(BeFalse()) + }) + }) + + Context("BlockDevices are ready, because VirtualDisk is allowed", func() { + It("return false", func() { + vds := map[string]*virtv2.VirtualDisk{ + vdFoo.Name: vdFoo, + vdBar.Name: vdBar, + } + + allowed := h.checkAllowVirtualDisks(vds) + Expect(allowed).To(BeTrue()) + }) + }) + Context("Image is not ready", func() { It("VirtualImage not ready: cannot start, no warnings", func() { vi.Status.Phase = virtv2.ImagePending diff --git a/images/virtualization-artifact/pkg/controller/watchers/cvi_filter.go b/images/virtualization-artifact/pkg/controller/watchers/cvi_filter.go index 4b3f46125..5cc4d2ef3 100644 --- a/images/virtualization-artifact/pkg/controller/watchers/cvi_filter.go +++ b/images/virtualization-artifact/pkg/controller/watchers/cvi_filter.go @@ -52,6 +52,5 @@ func (f ClusterVirtualImageFilter) FilterUpdateEvents(e event.UpdateEvent) bool return false } - // Triggered only if the resource phase changed to Ready. - return oldCVI.Status.Phase != newCVI.Status.Phase && newCVI.Status.Phase == virtv2.ImageReady + return oldCVI.Status.Phase != newCVI.Status.Phase } diff --git a/images/virtualization-artifact/pkg/controller/watchers/vi_filter.go b/images/virtualization-artifact/pkg/controller/watchers/vi_filter.go index 04895fcd8..cccde70ba 100644 --- a/images/virtualization-artifact/pkg/controller/watchers/vi_filter.go +++ b/images/virtualization-artifact/pkg/controller/watchers/vi_filter.go @@ -52,6 +52,5 @@ func (f VirtualImageFilter) FilterUpdateEvents(e event.UpdateEvent) bool { return false } - // Triggered only if the resource phase changed to Ready. - return oldVI.Status.Phase != newVI.Status.Phase && newVI.Status.Phase == virtv2.ImageReady + return oldVI.Status.Phase != newVI.Status.Phase }