From 1477278487c23991ffb23edcd21298947592bd83 Mon Sep 17 00:00:00 2001 From: Chris Chiu Date: Mon, 25 Nov 2024 19:37:05 +0800 Subject: [PATCH] Use VMCache instead of VMICache to judge if the NAD is in use The VMI status will be none if the VM is turned off from the GUI. The VMI information can't not be relied to make sure if particular NAD is attched to any VM or not. Use VMCache instead to prevent from deleting the NAD if the VM is turned off. Signed-off-by: Chris Chiu --- cmd/webhook/main.go | 8 +++++- pkg/utils/vmi.go | 42 ++++++++++++++++++++++++++--- pkg/webhook/nad/validator.go | 11 +++++--- pkg/webhook/vlanconfig/validator.go | 27 +++++-------------- 4 files changed, 59 insertions(+), 29 deletions(-) diff --git a/cmd/webhook/main.go b/cmd/webhook/main.go index 94de69de5..1d170b0ad 100644 --- a/cmd/webhook/main.go +++ b/cmd/webhook/main.go @@ -118,7 +118,7 @@ func run(ctx context.Context, cfg *rest.Config, options *config.Options) error { if err := webhookServer.RegisterValidators( clusternetwork.NewCnValidator(c.vcCache), - nad.NewNadValidator(c.vmiCache), + nad.NewNadValidator(c.vmiCache, c.vmCache), vlanconfig.NewVlanConfigValidator(c.nadCache, c.vcCache, c.vsCache, c.vmiCache), ); err != nil { return fmt.Errorf("failed to register validators: %v", err) @@ -135,6 +135,7 @@ func run(ctx context.Context, cfg *rest.Config, options *config.Options) error { type caches struct { nadCache ctlcniv1.NetworkAttachmentDefinitionCache + vmCache ctlkubevirtv1.VirtualMachineCache vmiCache ctlkubevirtv1.VirtualMachineInstanceCache vcCache ctlnetworkv1.VlanConfigCache vsCache ctlnetworkv1.VlanStatusCache @@ -155,6 +156,7 @@ func newCaches(ctx context.Context, cfg *rest.Config, threadiness int) (*caches, starters = append(starters, coreFactory) // must declare cache before starting informers c := &caches{ + vmCache: kubevirtFactory.Kubevirt().V1().VirtualMachine().Cache(), vmiCache: kubevirtFactory.Kubevirt().V1().VirtualMachineInstance().Cache(), nadCache: cniFactory.K8s().V1().NetworkAttachmentDefinition().Cache(), vcCache: harvesterNetworkFactory.Network().V1beta1().VlanConfig().Cache(), @@ -164,6 +166,7 @@ func newCaches(ctx context.Context, cfg *rest.Config, threadiness int) (*caches, } // Indexer must be added before starting the informer, otherwise panic `cannot add indexers to running index` happens c.vmiCache.AddIndexer(indexeres.VMByNetworkIndex, vmiByNetwork) + c.vmCache.AddIndexer(indexeres.VMByNetworkIndex, indexeres.VMByNetwork) if err := start.All(ctx, threadiness, starters...); err != nil { return nil, err @@ -192,3 +195,6 @@ func vmiByNetwork(obj *kubevirtv1.VirtualMachineInstance) ([]string, error) { } return networkNameList, nil } + +// already in ./vendor/github.com/harvester/harvester/pkg/indexeres/indexer.go +// VMByNetwork diff --git a/pkg/utils/vmi.go b/pkg/utils/vmi.go index 3595b439e..797fa9e2d 100644 --- a/pkg/utils/vmi.go +++ b/pkg/utils/vmi.go @@ -12,10 +12,11 @@ import ( type VmiGetter struct { VmiCache ctlkubevirtv1.VirtualMachineInstanceCache + VMCache ctlkubevirtv1.VirtualMachineCache } // WhoUseNad requires adding network indexer to the vmi cache before invoking it -func (v *VmiGetter) WhoUseNad(nad *nadv1.NetworkAttachmentDefinition, nodesFilter mapset.Set[string]) ([]*kubevirtv1.VirtualMachineInstance, error) { +func (v *VmiGetter) WhoUseNad(nad *nadv1.NetworkAttachmentDefinition, nodesFilter mapset.Set[string]) ([]string, error) { // multus network name can be or / // ref: https://github.com/kubevirt/client-go/blob/148fa0d1c7e83b7a56606a7ca92394ba6768c9ac/api/v1/schema.go#L1436-L1439 networkName := fmt.Sprintf("%s/%s", nad.Namespace, nad.Name) @@ -36,7 +37,11 @@ func (v *VmiGetter) WhoUseNad(nad *nadv1.NetworkAttachmentDefinition, nodesFilte } if nodesFilter == nil || nodesFilter.Cardinality() == 0 { - return vmis, nil + vmiNamePreList := make([]string, 0, len(vmis)) + for _, vmi := range vmis { + vmiNamePreList = append(vmiNamePreList, vmi.Namespace+"/"+vmi.Name) + } + return vmiNamePreList, nil } afterFilter := make([]*kubevirtv1.VirtualMachineInstance, 0, len(vmis)) @@ -47,5 +52,36 @@ func (v *VmiGetter) WhoUseNad(nad *nadv1.NetworkAttachmentDefinition, nodesFilte } } - return afterFilter, nil + vmiNameList := make([]string, 0) + if len(afterFilter) > 0 { + for _, vmi := range afterFilter { + vmiNameList = append(vmiNameList, vmi.Namespace+"/"+vmi.Name) + } + } + + vms, err := v.VMCache.GetByIndex(indexeres.VMByNetworkIndex, networkName) + if err != nil { + return nil, err + } + + for _, vm := range vms { + if vm.Namespace != nad.Namespace { + continue + } + vmiNameList = append(vmiNameList, vm.Namespace+"/"+vm.Name) + } + + vmsTmp, err := v.VMCache.GetByIndex(indexeres.VMByNetworkIndex, nad.Name) + if err != nil { + return nil, err + } + + for _, vm := range vmsTmp { + if vm.Namespace != nad.Namespace { + continue + } + vmiNameList = append(vmiNameList, vm.Namespace+"/"+vm.Name) + } + + return vmiNameList, nil } diff --git a/pkg/webhook/nad/validator.go b/pkg/webhook/nad/validator.go index ce92c14a7..cf2a3c9ca 100644 --- a/pkg/webhook/nad/validator.go +++ b/pkg/webhook/nad/validator.go @@ -25,13 +25,16 @@ const ( type Validator struct { admission.DefaultValidator vmiCache ctlkubevirtv1.VirtualMachineInstanceCache + vmCache ctlkubevirtv1.VirtualMachineCache } var _ admission.Validator = &Validator{} -func NewNadValidator(vmiCache ctlkubevirtv1.VirtualMachineInstanceCache) *Validator { +func NewNadValidator(vmiCache ctlkubevirtv1.VirtualMachineInstanceCache, + vmCache ctlkubevirtv1.VirtualMachineCache) *Validator { return &Validator{ vmiCache: vmiCache, + vmCache: vmCache, } } @@ -126,7 +129,7 @@ func (v *Validator) checkRoute(config string) error { } func (v *Validator) checkVmi(nad *cniv1.NetworkAttachmentDefinition) error { - vmiGetter := utils.VmiGetter{VmiCache: v.vmiCache} + vmiGetter := utils.VmiGetter{VmiCache: v.vmiCache, VMCache: v.vmCache} vmis, err := vmiGetter.WhoUseNad(nad, nil) if err != nil { return err @@ -135,9 +138,9 @@ func (v *Validator) checkVmi(nad *cniv1.NetworkAttachmentDefinition) error { if len(vmis) > 0 { vmiNameList := make([]string, 0, len(vmis)) for _, vmi := range vmis { - vmiNameList = append(vmiNameList, vmi.Namespace+"/"+vmi.Name) + vmiNameList = append(vmiNameList, vmi) } - return fmt.Errorf("it's still used by VM(s) %s which must be stopped at first", strings.Join(vmiNameList, ", ")) + return fmt.Errorf("VM(s) %s still attached", strings.Join(vmiNameList, ", ")) } return nil diff --git a/pkg/webhook/vlanconfig/validator.go b/pkg/webhook/vlanconfig/validator.go index b8f284ad8..1edd01efb 100644 --- a/pkg/webhook/vlanconfig/validator.go +++ b/pkg/webhook/vlanconfig/validator.go @@ -14,7 +14,6 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - kubevirtv1 "kubevirt.io/api/core/v1" "github.com/harvester/harvester/pkg/util" @@ -137,15 +136,6 @@ func getAffectedNodes(oldCn, newCn string, oldNodes, newNodes mapset.Set[string] func (v *Validator) Delete(_ *admission.Request, oldObj runtime.Object) error { vc := oldObj.(*networkv1.VlanConfig) - nodes, err := getMatchNodes(vc) - if err != nil { - return fmt.Errorf(deleteErr, vc.Name, err) - } - - if err := v.checkVmi(vc, nodes); err != nil { - return fmt.Errorf(deleteErr, vc.Name, err) - } - nads, err := v.nadCache.List(util.HarvesterSystemNamespaceName, labels.Set(map[string]string{ utils.KeyClusterNetworkLabel: vc.Spec.ClusterNetwork, }).AsSelector()) @@ -154,10 +144,12 @@ func (v *Validator) Delete(_ *admission.Request, oldObj runtime.Object) error { } if len(nads) > 0 { + nadStrList := make([]string, 0, len(nads)) for _, nad := range nads { - if nad.DeletionTimestamp == nil && nad.Annotations[StorageNetworkAnnotation] == "true" { - return fmt.Errorf(deleteErr, vc.Name, fmt.Errorf(`storage network nad %s is still attached`, nad.Name)) + if nad.DeletionTimestamp == nil { + nadStrList = append(nadStrList, nad.Name) } + return fmt.Errorf(deleteErr, vc.Name, fmt.Errorf(`delete attached nad %s first`, strings.Join(nadStrList, ", "))) } } @@ -218,20 +210,13 @@ func (v *Validator) checkVmi(vc *networkv1.VlanConfig, nodes mapset.Set[string]) } vmiGetter := utils.VmiGetter{VmiCache: v.vmiCache} - vmis := make([]*kubevirtv1.VirtualMachineInstance, 0) + vmiStrList := make([]string, 0) for _, nad := range nads { vmisTemp, err := vmiGetter.WhoUseNad(nad, nodes) if err != nil { return err } - vmis = append(vmis, vmisTemp...) - } - - if len(vmis) > 0 { - vmiStrList := make([]string, len(vmis)) - for i, vmi := range vmis { - vmiStrList[i] = vmi.Namespace + "/" + vmi.Name - } + vmiStrList = append(vmiStrList, vmisTemp...) return fmt.Errorf("it's blocked by VM(s) %s which must be stopped at first", strings.Join(vmiStrList, ", ")) }