diff --git a/go.mod b/go.mod index 5e63881ffc..487198fed6 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,7 @@ require ( github.com/davecgh/go-spew v1.1.1 github.com/evanphx/json-patch/v5 v5.6.0 github.com/fsnotify/fsnotify v1.7.0 + github.com/go-logr/zapr v1.2.4 github.com/golang/protobuf v1.5.4 github.com/google/uuid v1.6.0 github.com/hashicorp/go-version v1.6.0 diff --git a/go.sum b/go.sum index 9622e3860c..c3c7a6ace7 100644 --- a/go.sum +++ b/go.sum @@ -88,6 +88,7 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -675,12 +676,16 @@ go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v8 go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= diff --git a/pkg/common/cns-lib/vsphere/utils.go b/pkg/common/cns-lib/vsphere/utils.go index 673a3a210c..c9180d14d3 100644 --- a/pkg/common/cns-lib/vsphere/utils.go +++ b/pkg/common/cns-lib/vsphere/utils.go @@ -455,7 +455,7 @@ func GetCandidateDatastoresInCluster(ctx context.Context, vc *VirtualCenter, clu if len(sharedDatastores) == 0 && len(vsanDirectDatastores) == 0 { return nil, nil, fmt.Errorf("no candidates datastores found in the Kubernetes cluster") } - log.Infof("Found shared datastores: %+v and vSAN Direct datastores: %+v", sharedDatastores, + log.Debugf("Found shared datastores: %+v and vSAN Direct datastores: %+v", sharedDatastores, vsanDirectDatastores) return sharedDatastores, vsanDirectDatastores, nil } diff --git a/pkg/csi/service/common/authmanager.go b/pkg/csi/service/common/authmanager.go index bd195462c9..e507f4a6e7 100644 --- a/pkg/csi/service/common/authmanager.go +++ b/pkg/csi/service/common/authmanager.go @@ -180,7 +180,7 @@ func (authManager *AuthManager) refreshFSEnabledClustersToDsMap() { defer authManager.rwMutex.Unlock() authManager.fsEnabledClusterToDsMap = newFsEnabledClusterToDsMap - log.Infof("auth manager: newFsEnabledClusterToDsMap is updated to %v for vCenter %q", + log.Debugf("auth manager: newFsEnabledClusterToDsMap is updated to %v for vCenter %q", newFsEnabledClusterToDsMap, vcenterHost) } else { log.Warnf("auth manager: failed to get updated datastoreMapForFileVolumes for vCenter %q, Err: %v", @@ -502,7 +502,7 @@ func getFSEnabledClustersWithPriv(ctx context.Context, vc *cnsvsphere.VirtualCen log.Debugf("vSAN file service is enabled for cluster: %+v and vCenter: %q.", cluster, vc.Config.Host) } else { - log.Infof("vSAN file service is disabled for cluster: %+v and vCenter: %q.", + log.Debugf("vSAN file service is disabled for cluster: %+v and vCenter: %q.", cluster, vc.Config.Host) } } diff --git a/pkg/kubernetes/kubernetes.go b/pkg/kubernetes/kubernetes.go index 5a8805f0be..a27671edea 100644 --- a/pkg/kubernetes/kubernetes.go +++ b/pkg/kubernetes/kubernetes.go @@ -72,14 +72,14 @@ func GetKubeConfig(ctx context.Context) (*restclient.Config, error) { var err error kubecfgPath := getKubeConfigPath(ctx) if kubecfgPath != "" { - log.Infof("k8s client using kubeconfig from %s", kubecfgPath) + log.Debugf("k8s client using kubeconfig from %s", kubecfgPath) config, err = clientcmd.BuildConfigFromFlags("", kubecfgPath) if err != nil { log.Errorf("BuildConfigFromFlags failed %v", err) return nil, err } } else { - log.Info("k8s client using in-cluster config") + log.Debug("k8s client using in-cluster config") config, err = restclient.InClusterConfig() if err != nil { log.Errorf("InClusterConfig failed %v", err) @@ -102,24 +102,24 @@ func getKubeConfigPath(ctx context.Context) string { flagValue := kubecfgFlag.Value.String() if flagValue != "" { kubecfgPath = flagValue - log.Infof("Kubeconfig path obtained from kubeconfig flag: %q", kubecfgPath) + log.Debugf("Kubeconfig path obtained from kubeconfig flag: %q", kubecfgPath) } else { - log.Info("Kubeconfig flag is set but empty, checking environment variable value") + log.Debug("Kubeconfig flag is set but empty, checking environment variable value") } } else { - log.Info("Kubeconfig flag not set, checking environment variable value") + log.Debug("Kubeconfig flag not set, checking environment variable value") } if kubecfgPath == "" { // Get the Kubeconfig path from the environment variable kubecfgPath = os.Getenv(clientcmd.RecommendedConfigPathEnvVar) - log.Infof("Kubeconfig path obtained from environment variable %q: %q", + log.Debugf("Kubeconfig path obtained from environment variable %q: %q", clientcmd.RecommendedConfigPathEnvVar, kubecfgPath) } // Final logging of the Kubeconfig path used if kubecfgPath == "" { - log.Info("No Kubeconfig path found, either from environment variable or flag") + log.Debug("No Kubeconfig path found, either from environment variable or flag") } else { - log.Infof("Final Kubeconfig path used: %q", kubecfgPath) + log.Debugf("Final Kubeconfig path used: %q", kubecfgPath) } return kubecfgPath } @@ -431,7 +431,7 @@ func getClientThroughput(ctx context.Context, isSupervisorClient bool) (float32, burst = value } } - log.Infof("Setting client QPS to %f and Burst to %d.", qps, burst) + log.Debugf("Setting client QPS to %f and Burst to %d.", qps, burst) return qps, burst } diff --git a/pkg/syncer/fullsync.go b/pkg/syncer/fullsync.go index 871f6623ab..d566f14b55 100644 --- a/pkg/syncer/fullsync.go +++ b/pkg/syncer/fullsync.go @@ -25,6 +25,8 @@ import ( "sync" "time" + "k8s.io/apimachinery/pkg/api/resource" + "github.com/container-storage-interface/spec/lib/go/csi" "github.com/davecgh/go-spew/spew" "github.com/vmware/govmomi/cns" @@ -314,12 +316,7 @@ func CsiFullSync(ctx context.Context, metadataSyncer *metadataSyncInformer, vc s cnsVolumeMap[vol.VolumeId.Id] = vol } log.Infof("calling validateAndCorrectVolumeInfoSnapshotDetails with %d volumes", len(cnsVolumeMap)) - err = validateAndCorrectVolumeInfoSnapshotDetails(ctx, cnsVolumeMap) - if err != nil { - log.Errorf("FullSync for VC %s: Error while sync CNSVolumeinfo snapshot details, failed with err=%+v", - vc, err.Error()) - return err - } + validateAndCorrectVolumeInfoSnapshotDetails(ctx, cnsVolumeMap) } vcHostObj, vcHostObjFound := metadataSyncer.configInfo.Cfg.VirtualCenter[vc] if !vcHostObjFound { @@ -767,9 +764,11 @@ func volumeInfoCRFullSync(ctx context.Context, metadataSyncer *metadataSyncInfor // in cns with the size in cnsvolumeinfo. if found discrepancy in order to correct the values // update the cnsvolumeinfo. func validateAndCorrectVolumeInfoSnapshotDetails(ctx context.Context, - cnsVolumeMap map[string]cnstypes.CnsVolume) error { + cnsVolumeMap map[string]cnstypes.CnsVolume) { log := logger.GetLogger(ctx) volumeInfoCRList := volumeInfoService.ListAllVolumeInfos() + alreadySyncedVolumeCount := 0 + outOfSyncVolumeCount := 0 for _, volumeInfo := range volumeInfoCRList { cnsvolumeinfo := &cnsvolumeinfov1alpha1.CNSVolumeInfo{} err := runtime.DefaultUnstructuredConverter.FromUnstructured(volumeInfo.(*unstructured.Unstructured).Object, @@ -779,54 +778,75 @@ func validateAndCorrectVolumeInfoSnapshotDetails(ctx context.Context, continue } if cnsVol, ok := cnsVolumeMap[cnsvolumeinfo.Spec.VolumeID]; ok { - log.Infof("validate volume info for storage details for volume %s", cnsVol.VolumeId.Id) - var aggregatedSnapshotCapacity int64 + log.Debugf("validate volume info for storage details for volume %s", cnsVol.VolumeId.Id) + var aggregatedSnapshotCapacityInMB int64 if cnsVol.BackingObjectDetails != nil && cnsVol.BackingObjectDetails.(*cnstypes.CnsBlockBackingDetails) != nil { val, ok := cnsVol.BackingObjectDetails.(*cnstypes.CnsBlockBackingDetails) if ok { - aggregatedSnapshotCapacity = val.AggregatedSnapshotCapacityInMb + aggregatedSnapshotCapacityInMB = val.AggregatedSnapshotCapacityInMb } - log.Infof("Received aggregatedSnapshotCapacity %d for volume %q", - aggregatedSnapshotCapacity, cnsVol.VolumeId.Id) - if cnsvolumeinfo.Spec.AggregatedSnapshotSize == nil || aggregatedSnapshotCapacity != - cnsvolumeinfo.Spec.AggregatedSnapshotSize.Value() { - // use current time as snapshot completion time is not available in fullsync. - log.Infof("Update aggregatedSnapshotCapacity for volume %q", cnsVol.VolumeId.Id) - currentTime := time.Now() - cnsSnapInfo := &volumes.CnsSnapshotInfo{ - SourceVolumeID: cnsvolumeinfo.Spec.VolumeID, - SnapshotLatestOperationCompleteTime: time.Now(), - AggregatedSnapshotCapacityInMb: aggregatedSnapshotCapacity, - } - log.Infof("unable to get snapshot operation completion time for volumeID %q "+ - "will use current time %v instead", cnsvolumeinfo.Spec.VolumeID, currentTime) - patch, err := common.GetValidatedCNSVolumeInfoPatch(ctx, cnsSnapInfo) - if err != nil { - log.Errorf("unable to get VolumeInfo patch for %q. Error: %+v", - cnsvolumeinfo.Spec.VolumeID, err) - return err - } - patchBytes, err := json.Marshal(patch) - if err != nil { - log.Errorf("error while create VolumeInfo patch for volume %q. Error while marshaling: %+v", - cnsvolumeinfo.Spec.VolumeID, err) - return err - } - err = volumeInfoService.PatchVolumeInfo(ctx, cnsvolumeinfo.Spec.VolumeID, patchBytes, - allowedRetriesToPatchCNSVolumeInfo) - if err != nil { - log.Errorf("failed to patch CNSVolumeInfo instance to update snapshot details."+ - "for volume %q. Error: %+v", cnsvolumeinfo.Spec.VolumeID, err) - return err + log.Debugf("Received aggregatedSnapshotCapacity %dMB for volume %s from CNS", + aggregatedSnapshotCapacityInMB, cnsVol.VolumeId.Id) + if cnsvolumeinfo.Spec.AggregatedSnapshotSize != nil && cnsvolumeinfo.Spec.ValidAggregatedSnapshotSize { + aggregatedSnapshotCapacity := resource.NewQuantity(aggregatedSnapshotCapacityInMB*common.MbInBytes, + resource.BinarySI) + if aggregatedSnapshotCapacity.Value() == cnsvolumeinfo.Spec.AggregatedSnapshotSize.Value() { + log.Debugf("volume %s Aggregated Snapshot capacity %s in CnsVolumeInfo is in sync with CNS", + cnsVol.VolumeId.Id, aggregatedSnapshotCapacity.String()) + alreadySyncedVolumeCount++ + continue } - log.Infof("Updated CNSvolumeInfo with Snapshot details successfully for volume %q", - cnsvolumeinfo.Spec.VolumeID) + log.Infof("Aggregated Snapshot size mismatch for volume %s, %s in CnsVolumeInfo and %dMB in CNS", + cnsVol.VolumeId.Id, cnsvolumeinfo.Spec.AggregatedSnapshotSize.String(), + aggregatedSnapshotCapacityInMB) + } + // Nothing to do if it's invalid in CNS and CNSVolumeInfo + if !cnsvolumeinfo.Spec.ValidAggregatedSnapshotSize && aggregatedSnapshotCapacityInMB == -1 { + log.Infof("volume %s aggregated snapshot capacity not present in CNS and CNSVolumeInfo", + cnsVol.VolumeId.Id) + continue + } + // implies the cnsvolumeinfo has capacity mismatch with CNS queryVolume result OR + // existing cnsvolumeinfo aggregated snapshot is not set or invalid + log.Infof("Update aggregatedSnapshotCapacity for volume %s to %dMB", + cnsVol.VolumeId.Id, aggregatedSnapshotCapacityInMB) + // use current time as snapshot completion time is not available in fullsync. + currentTime := time.Now() + cnsSnapInfo := &volumes.CnsSnapshotInfo{ + SourceVolumeID: cnsvolumeinfo.Spec.VolumeID, + SnapshotLatestOperationCompleteTime: time.Now(), + AggregatedSnapshotCapacityInMb: aggregatedSnapshotCapacityInMB, + } + log.Infof("snapshot operation completion time unavailable for volumeID %s, will"+ + " use current time %v instead", cnsvolumeinfo.Spec.VolumeID, currentTime.String()) + patch, err := common.GetValidatedCNSVolumeInfoPatch(ctx, cnsSnapInfo) + if err != nil { + log.Errorf("unable to get VolumeInfo patch for %s. Error: %+v. Continuing..", + cnsvolumeinfo.Spec.VolumeID, err) + continue + } + patchBytes, err := json.Marshal(patch) + if err != nil { + log.Errorf("error while create VolumeInfo patch for volume %s. Error while marshaling: %+v. Continuing..", + cnsvolumeinfo.Spec.VolumeID, err) + continue + } + err = volumeInfoService.PatchVolumeInfo(ctx, cnsvolumeinfo.Spec.VolumeID, patchBytes, + allowedRetriesToPatchCNSVolumeInfo) + if err != nil { + log.Errorf("failed to patch CNSVolumeInfo instance to update snapshot details."+ + "for volume %s. Error: %+v. Continuing..", cnsvolumeinfo.Spec.VolumeID, err) + continue } + log.Infof("Updated CNSvolumeInfo with Snapshot details successfully for volume %s", + cnsvolumeinfo.Spec.VolumeID) + outOfSyncVolumeCount++ } } } - return nil + log.Infof("Number of volumes with synced aggregated snapshot size with CNS %d", alreadySyncedVolumeCount) + log.Infof("Number of volumes with out-of-sync aggregated snapshot size with CNS %d", outOfSyncVolumeCount) } // fullSyncCreateVolumes creates volumes with given array of createSpec. @@ -1214,7 +1234,7 @@ func fullSyncGetVolumeSpecs(ctx context.Context, vCenterVersion string, pvList [ log.Infof("FullSync for VC %s: update is required for volume: %q", vc, volumeHandle) operationType = "updateVolume" } else { - log.Infof("FullSync for VC %s: update is not required for volume: %q", vc, volumeHandle) + log.Debugf("FullSync for VC %s: update is not required for volume: %q", vc, volumeHandle) } } switch operationType { diff --git a/pkg/syncer/metadatasyncer.go b/pkg/syncer/metadatasyncer.go index 2e2fbf192b..e94df4d92f 100644 --- a/pkg/syncer/metadatasyncer.go +++ b/pkg/syncer/metadatasyncer.go @@ -27,6 +27,8 @@ import ( "sync" "time" + "sigs.k8s.io/vsphere-csi-driver/v3/pkg/syncer/storagepool" + "github.com/davecgh/go-spew/spew" "github.com/fsnotify/fsnotify" cnstypes "github.com/vmware/govmomi/cns/types" @@ -46,6 +48,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" + "github.com/go-logr/zapr" + cr_log "sigs.k8s.io/controller-runtime/pkg/log" cnsoperatorv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator" storagepolicyv1alpha2 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/storagepolicy/v1alpha2" "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/migration" @@ -69,7 +73,6 @@ import ( csinodetopologyv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/internalapis/csinodetopology/v1alpha1" "sigs.k8s.io/vsphere-csi-driver/v3/pkg/internalapis/featurestates" k8s "sigs.k8s.io/vsphere-csi-driver/v3/pkg/kubernetes" - "sigs.k8s.io/vsphere-csi-driver/v3/pkg/syncer/storagepool" ) var ( @@ -204,6 +207,7 @@ func getPVtoBackingDiskObjectIdIntervalInMin(ctx context.Context) int { func InitMetadataSyncer(ctx context.Context, clusterFlavor cnstypes.CnsClusterFlavor, configInfo *cnsconfig.ConfigurationInfo) error { log := logger.GetLogger(ctx) + cr_log.SetLogger(zapr.NewLogger(log.Desugar())) var err error log.Infof("Initializing MetadataSyncer") metadataSyncer := newInformer() @@ -938,7 +942,7 @@ func cnsvolumeoperationrequestCRAdded(obj interface{}) { cnsvolumeoperationrequest.CRDSingular, err) return } - log.Infof("cnsvolumeoperationrequestCRAdded: Received a CR added event for cnsvolumeoperationrequestObj %v", + log.Debugf("cnsvolumeoperationrequestCRAdded: Received a CR added event for cnsvolumeoperationrequestObj %+v", cnsvolumeoperationrequestObj) // Check for the below set of conditions: // 1. Cnsvolumeoperationrequest object's StorageQuotaDetails should not be nil @@ -950,23 +954,24 @@ func cnsvolumeoperationrequestCRAdded(obj interface{}) { // For CRs mapping to Volumes which are in Bound state, the Reserved Value will be zero. // So, we can safely skip the CRAdded event. if cnsvolumeoperationrequestObj.Status.StorageQuotaDetails.Reserved.Value() == 0 { - log.Infof("cnsvolumeoperationrequestCRAdded: Received a cnsvolumeoperationrequest CR added event "+ + log.Debugf("cnsvolumeoperationrequestCRAdded: Received a cnsvolumeoperationrequest CR added event "+ "for %q, with the reserved capacity as zero(0). Skipping the informer event", cnsvolumeoperationrequestObj.Name) return } - isSnapshot := checkOperationRequestCRForSnapshot(ctx, cnsvolumeoperationrequestObj.Name) + cnsVolumeOperationRequestName := cnsvolumeoperationrequestObj.Name + isSnapshot := checkOperationRequestCRForSnapshot(ctx, cnsVolumeOperationRequestName) storagePolicyUsageInstanceName := "" if isSnapshot { // Fetch StoragePolicyUsage instance for storageClass associated with the snapshot. - log.Infof("Received a CR added event for snapshot operation with snapshotID %q", - cnsvolumeoperationrequestObj.Status.SnapshotID) + log.Infof("Received a CR added event for snapshot operation on volumeID: %s CnsVolumeOperationRequest: %s", + cnsvolumeoperationrequestObj.Status.VolumeID, cnsVolumeOperationRequestName) storagePolicyUsageInstanceName = cnsvolumeoperationrequestObj.Status.StorageQuotaDetails. StorageClassName + "-" + storagepolicyv1alpha2.NameSuffixForSnapshot } else { // Fetch StoragePolicyUsage instance for storageClass associated with the volume. - log.Infof("Received a CR added event for pvc operation with volumeID %q", - cnsvolumeoperationrequestObj.Status.VolumeID) + log.Infof("Received a CR added event for pvc operation with volumeID: %s CnsVolumeOperationRequest: %s", + cnsvolumeoperationrequestObj.Status.VolumeID, cnsVolumeOperationRequestName) storagePolicyUsageInstanceName = cnsvolumeoperationrequestObj.Status.StorageQuotaDetails. StorageClassName + "-" + storagepolicyv1alpha2.NameSuffixForPVC } @@ -1004,9 +1009,10 @@ func cnsvolumeoperationrequestCRAdded(obj interface{}) { log.Errorf("updateStoragePolicyUsage failed. err: %v", err) return } - log.Infof("cnsvolumeoperationrequestCRAdded: Successfully increased the reserved field by %v bytes "+ - "for storagepolicyusage CR: %q", cnsvolumeoperationrequestObj.Status.StorageQuotaDetails.Reserved.Value(), - patchedStoragePolicyUsageCR.Name) + log.Infof("cnsvolumeoperationrequestCRAdded: Successfully increased the reserved field by %s "+ + "for storagepolicyusage CR: %s for CnsVolumeOperationRequest: %s", + cnsvolumeoperationrequestObj.Status.StorageQuotaDetails.Reserved.String(), + patchedStoragePolicyUsageCR.Name, cnsVolumeOperationRequestName) return } else { // This is a case where, the storagePolicyUsage CR does not have Status field. @@ -1029,8 +1035,8 @@ func cnsvolumeoperationrequestCRAdded(obj interface{}) { log.Errorf("updateStoragePolicyUsage failed. err: %v", err) return } - log.Infof("cnsvolumeoperationrequestCRAdded: Successfully increased the reserved field by %v bytes "+ - "for storagepolicyusage CR: %q", cnsvolumeoperationrequestObj.Status.StorageQuotaDetails.Reserved.Value(), + log.Infof("cnsvolumeoperationrequestCRAdded: Successfully increased the reserved field by %s "+ + "for storagepolicyusage CR: %q", cnsvolumeoperationrequestObj.Status.StorageQuotaDetails.Reserved.String(), patchedStoragePolicyUsageCR.Name) return } @@ -1062,15 +1068,20 @@ func cnsvolumeoperationrequestCRDeleted(obj interface{}) { log.Errorf("Failed to create CnsOperator client. Err: %+v", err) return } - isSnapshot := checkOperationRequestCRForSnapshot(ctx, cnsvolumeoperationrequestObj.Name) + cnsVolumeOperationRequestName := cnsvolumeoperationrequestObj.Name + isSnapshot := checkOperationRequestCRForSnapshot(ctx, cnsVolumeOperationRequestName) storagePolicyUsageInstanceName := "" if isSnapshot { // Fetch StoragePolicyUsage instance for storageClass associated with the snapshot. log.Infof("cnsvolumeoperationrequestCRDeleted: Delete event receieved for snapshot operation "+ - "with snapshoID %q ", cnsvolumeoperationrequestObj.Status.SnapshotID) + "with snapshoID %s CnsVolumeOperationRequest: %s", + cnsvolumeoperationrequestObj.Status.SnapshotID, cnsVolumeOperationRequestName) storagePolicyUsageInstanceName = cnsvolumeoperationrequestObj.Status.StorageQuotaDetails. StorageClassName + "-" + storagepolicyv1alpha2.NameSuffixForSnapshot } else { + log.Infof("cnsvolumeoperationrequestCRDeleted: Delete event receieved for PVC operation "+ + "with volumeID %s CnsVolumeOperationRequest: %s", + cnsvolumeoperationrequestObj.Status.VolumeID, cnsVolumeOperationRequestName) // Fetch StoragePolicyUsage instance for storageClass associated with the volume. storagePolicyUsageInstanceName = cnsvolumeoperationrequestObj.Status.StorageQuotaDetails.StorageClassName + "-" + storagepolicyv1alpha2.NameSuffixForPVC @@ -1081,7 +1092,7 @@ func cnsvolumeoperationrequestCRDeleted(obj interface{}) { Name: storagePolicyUsageInstanceName}, storagePolicyUsageCR) if err != nil { - log.Errorf("failed to fetch %s instance with name %q from supervisor namespace %q. Error: %+v", + log.Errorf("failed to fetch %s instance with name %s from supervisor namespace %s. Error: %+v", storagepolicyv1alpha2.CRDSingular, storagePolicyUsageInstanceName, cnsvolumeoperationrequestObj.Status.StorageQuotaDetails.Namespace, err) return @@ -1098,9 +1109,10 @@ func cnsvolumeoperationrequestCRDeleted(obj interface{}) { log.Errorf("updateStoragePolicyUsage failed. err: %v", err) return } - log.Infof("cnsvolumeoperationrequestCRDeleted: Successfully decreased the reserved field by %v bytes "+ - "for storagepolicyusage CR: %q", cnsvolumeoperationrequestObj.Status.StorageQuotaDetails.Reserved.Value(), - patchedStoragePolicyUsageCR.Name) + log.Infof("cnsvolumeoperationrequestCRDeleted: Successfully decreased the reserved field by %s "+ + "for storagepolicyusage CR: %s CnsVolumeOperationRequest: %s", + cnsvolumeoperationrequestObj.Status.StorageQuotaDetails.Reserved.String(), + patchedStoragePolicyUsageCR.Name, cnsVolumeOperationRequestName) } } @@ -1149,17 +1161,19 @@ func cnsvolumeoperationrequestCRUpdated(oldObj interface{}, newObj interface{}) log.Errorf("Failed to create CnsOperator client. Err: %+v", err) return } - - isSnapshot := checkOperationRequestCRForSnapshot(ctx, newcnsvolumeoperationrequestObj.Name) + cnsVolumeOperationRequestName := newcnsvolumeoperationrequestObj.Name + isSnapshot := checkOperationRequestCRForSnapshot(ctx, cnsVolumeOperationRequestName) storagePolicyUsageInstanceName := "" if isSnapshot { - log.Infof("Update event receieved for snapshot operation with snapshoID %q ", - newcnsvolumeoperationrequestObj.Status.SnapshotID) + log.Infof("Update event receieved for snapshot operation on volumeID %s CnsVolumeOperationRequest: %s", + newcnsvolumeoperationrequestObj.Status.VolumeID, cnsVolumeOperationRequestName) // Fetch StoragePolicyUsage instance for storageClass associated with the snapshot. storagePolicyUsageInstanceName = newcnsvolumeoperationrequestObj.Status.StorageQuotaDetails. StorageClassName + "-" + storagepolicyv1alpha2.NameSuffixForSnapshot } else { + log.Infof("Update event receieved for volume operation on volumeID %s CnsVolumeOperationRequest: %s", + newcnsvolumeoperationrequestObj.Status.VolumeID, cnsVolumeOperationRequestName) // Fetch StoragePolicyUsage instance for storageClass associated with the volume. storagePolicyUsageInstanceName = newcnsvolumeoperationrequestObj.Status.StorageQuotaDetails.StorageClassName + "-" + storagepolicyv1alpha2.NameSuffixForPVC @@ -1171,7 +1185,7 @@ func cnsvolumeoperationrequestCRUpdated(oldObj interface{}, newObj interface{}) Name: storagePolicyUsageInstanceName}, storagePolicyUsageCR) if err != nil { - log.Errorf("failed to fetch %s instance with name %q from supervisor namespace %q. Error: %+v", + log.Errorf("failed to fetch %s instance with name %s from supervisor namespace %s. Error: %+v", storagepolicyv1alpha2.CRDSingular, storagePolicyUsageInstanceName, newcnsvolumeoperationrequestObj.Status.StorageQuotaDetails.Namespace, err) return @@ -1188,7 +1202,8 @@ func cnsvolumeoperationrequestCRUpdated(oldObj interface{}, newObj interface{}) // CnsVolumeOperationRequest "reserved" field if storagePolicyUsageCR.Status.ResourceTypeLevelQuotaUsage != nil { // Move forward only if StoragePolicyUsage CR has Status.QuotaUsage fields not nil - log.Infof("Increase the reserved value in storagePolicyUsage CR %s", storagePolicyUsageCR.Name) + log.Infof("Increase the reserved value in storagePolicyUsage CR %s by %d", + storagePolicyUsageCR.Name, newcnsvolumeoperationrequestObj.Status.StorageQuotaDetails.Reserved.Value()) patchedStoragePolicyUsageCR.Status.ResourceTypeLevelQuotaUsage.Reserved.Add( *newcnsvolumeoperationrequestObj.Status.StorageQuotaDetails.Reserved) err := PatchStoragePolicyUsage(ctx, cnsOperatorClient, storagePolicyUsageCR, @@ -1197,9 +1212,10 @@ func cnsvolumeoperationrequestCRUpdated(oldObj interface{}, newObj interface{}) log.Errorf("updateStoragePolicyUsage failed. err: %v", err) return } - log.Infof("cnsvolumeoperationrequestCRUpdated: Successfully increased the reserved field by %v bytes "+ - "for storagepolicyusage CR: %q", newcnsvolumeoperationrequestObj.Status.StorageQuotaDetails.Reserved.Value(), - patchedStoragePolicyUsageCR.Name) + log.Infof("cnsvolumeoperationrequestCRUpdated: Successfully increased the reserved field by %s "+ + "for storagepolicyusage CR: %s CnsVolumeOperationRequest: %s", + newcnsvolumeoperationrequestObj.Status.StorageQuotaDetails.Reserved.String(), + patchedStoragePolicyUsageCR.Name, cnsVolumeOperationRequestName) } } else { // This is a case where CSI Driver container decreases the value of "reserved" value in @@ -1208,42 +1224,51 @@ func cnsvolumeoperationrequestCRUpdated(oldObj interface{}, newObj interface{}) // needs to be decreased and the "used" field needs to be increased in StoragePolicyUsage CR. // 2. when the latest CNS task tracked by the CNSVolumeOperationRequest errors out: in this case, // just the "reserved" field in StoragePolicyUsage needs to be decreased. - log.Infof("Decrease the reserved value in storagePolicyUsage CR %s", storagePolicyUsageCR.Name) + log.Infof("Decrease the reserved value in storagePolicyUsage CR %s by %s CnsVolumeOperationRequest: %s", + storagePolicyUsageCR.Name, oldcnsvolumeoperationrequestObj.Status.StorageQuotaDetails.Reserved.String(), + cnsVolumeOperationRequestName) patchedStoragePolicyUsageCR.Status.ResourceTypeLevelQuotaUsage.Reserved.Sub( *oldcnsvolumeoperationrequestObj.Status.StorageQuotaDetails.Reserved) var increaseUsed bool - log.Info("skip increase `used` capacity, for snapshot operation cnsvolumeinfo informer will increase it") if !isSnapshot { // Fetch the latest task of the CNSVolumeOperationRequest instance and increment // "used" only when the task is successful. latestOps := newcnsvolumeoperationrequestObj.Status.LatestOperationDetails latestOp := latestOps[len(latestOps)-1] - log.Infof("Decrease the used value in storagePolicyUsage CR %s", storagePolicyUsageCR.Name) if latestOp.TaskStatus == cnsvolumeoperationrequest.TaskInvocationStatusSuccess { - log.Debugf("Latest task %q in %s instance %q succeeded. Incrementing \"used\" "+ + log.Debugf("Latest task %s in %s instance %s succeeded. Incrementing \"used\" "+ "field in storagepolicyusage CR", latestOp.TaskID, cnsvolumeoperationrequest.CRDSingular, newcnsvolumeoperationrequestObj.Name) + log.Infof("Increase the used value in storagePolicyUsage CR %s by %s "+ + " since the operation was successful CnsVolumeOperationRequest: %s", + storagePolicyUsageCR.Name, + oldcnsvolumeoperationrequestObj.Status.StorageQuotaDetails.Reserved.String(), + cnsVolumeOperationRequestName) patchedStoragePolicyUsageCR.Status.ResourceTypeLevelQuotaUsage.Used.Add( *oldcnsvolumeoperationrequestObj.Status.StorageQuotaDetails.Reserved) increaseUsed = true } + } else { + log.Debug("skip increase `used` capacity, for snapshot operation cnsvolumeinfo informer will increase it") } err := PatchStoragePolicyUsage(ctx, cnsOperatorClient, storagePolicyUsageCR, patchedStoragePolicyUsageCR) if err != nil { - log.Errorf("patching operation failed for StoragePolicyUsage CR: %q in namespace: %q. err: %v", + log.Errorf("patching operation failed for StoragePolicyUsage CR: %s in namespace: %s. err: %v", patchedStoragePolicyUsageCR.Name, patchedStoragePolicyUsageCR.Namespace, err) return } log.Infof("cnsvolumeoperationrequestCRUpdated: Successfully decreased the reserved field "+ - "by %v bytes for storagepolicyusage CR: %q in namespace: %q", - oldcnsvolumeoperationrequestObj.Status.StorageQuotaDetails.Reserved.Value(), - patchedStoragePolicyUsageCR.Name, patchedStoragePolicyUsageCR.Namespace) + "by %s for storagepolicyusage CR: %s in namespace: %s CnsVolumeOperationRequest: %s", + oldcnsvolumeoperationrequestObj.Status.StorageQuotaDetails.Reserved.String(), + patchedStoragePolicyUsageCR.Name, patchedStoragePolicyUsageCR.Namespace, + cnsVolumeOperationRequestName) if increaseUsed { log.Infof("cnsvolumeoperationrequestCRUpdated: Successfully increased the used field "+ - "by %v bytes for storagepolicyusage CR: %q in namespace: %q", - oldcnsvolumeoperationrequestObj.Status.StorageQuotaDetails.Reserved.Value(), - patchedStoragePolicyUsageCR.Name, patchedStoragePolicyUsageCR.Namespace) + "by %s for storagepolicyusage CR: %s in namespace: %s CnsVolumeOperationRequest: %s", + oldcnsvolumeoperationrequestObj.Status.StorageQuotaDetails.Reserved.String(), + patchedStoragePolicyUsageCR.Name, patchedStoragePolicyUsageCR.Namespace, + cnsVolumeOperationRequestName) } } } @@ -3292,9 +3317,9 @@ func storagePolicyUsageCRSync(ctx context.Context, metadataSyncer *metadataSyncI spuKey := strings.Join([]string{storagePolicyUsage.Spec.StorageClassName, storagePolicyUsage.Spec.StoragePolicyId, storagePolicyUsage.Namespace}, "-") if usedQty, ok := spuAggregatedSumMap[spuKey]; ok { - log.Infof("storagePolicyUsageCRSync: The used capacity field for StoragepolicyUsage CR: %q "+ - "in namespace: %q Total AggregatedSnapshotSize Sum In MB is: %v", storagePolicyUsage.Name, - storagePolicyUsage.Namespace, usedQty.Value()) + log.Infof("storagePolicyUsageCRSync: The used capacity field for StoragepolicyUsage CR: %s "+ + "in namespace: %s Total AggregatedSnapshotSize Sum is: %s", storagePolicyUsage.Name, + storagePolicyUsage.Namespace, usedQty.String()) totalUsedQty = usedQty updateSpu = true } @@ -3305,26 +3330,26 @@ func storagePolicyUsageCRSync(ctx context.Context, metadataSyncer *metadataSyncI // Compare the expected total used capacity vs the actual used capacity value in storagePolicyUsage CR patchedStoragePolicyUsage.Status.ResourceTypeLevelQuotaUsage.Used = totalUsedQty currentUsedCapacity := storagePolicyUsage.Status.ResourceTypeLevelQuotaUsage.Used - if !reflect.DeepEqual(currentUsedCapacity.Value(), totalUsedQty.Value()) { - log.Infof("storagePolicyUsageCRSync: The used capacity field for StoragepolicyUsage CR: %q in namespace: %q "+ - "is not matching with the total capacity of all the k8s volumes in Bound state. Current: %v . "+ - "Expected: %v", storagePolicyUsage.Name, storagePolicyUsage.Namespace, - currentUsedCapacity.Value(), totalUsedQty.Value()) + if currentUsedCapacity.Value() != totalUsedQty.Value() { + log.Infof("storagePolicyUsageCRSync: The used capacity field for StoragepolicyUsage CR: %s in namespace: %s "+ + "is not matching with the total capacity of all the k8s volumes in Bound state. Current: %s , "+ + "Expected: %s", storagePolicyUsage.Name, storagePolicyUsage.Namespace, + currentUsedCapacity.String(), totalUsedQty.String()) err := PatchStoragePolicyUsage(ctx, cnsOperatorClient, &storagePolicyUsage, &patchedStoragePolicyUsage) if err != nil { - log.Errorf("storagePolicyUsageCRSync: Patching operation failed for StoragePolicyUsage CR: %q in "+ - "namespace: %q. err: %v", storagePolicyUsage.Name, patchedStoragePolicyUsage.Namespace, err) - return + log.Errorf("storagePolicyUsageCRSync: Patching operation failed for StoragePolicyUsage CR: %s in "+ + "namespace: %s. err: %v. Continuing..", storagePolicyUsage.Name, patchedStoragePolicyUsage.Namespace, err) + continue } - log.Infof("storagePolicyUsageCRSync: Successfully updated the used field from %v to %v for StoragepolicyUsage "+ - "CR: %q in namespace: %q", currentUsedCapacity.Value(), - totalUsedQty.Value(), patchedStoragePolicyUsage.Name, patchedStoragePolicyUsage.Namespace) + log.Infof("storagePolicyUsageCRSync: Successfully updated the used field from %s to %s for StoragepolicyUsage "+ + "CR: %s in namespace: %s", currentUsedCapacity.String(), + totalUsedQty.String(), patchedStoragePolicyUsage.Name, patchedStoragePolicyUsage.Namespace) } else { - log.Infof("storagePolicyUsageCRSync: The used capacity field for StoragepolicyUsage CR: %q in namespace: %q "+ - "field is matching with the total capacity. Used: %v Skipping the Patch operation", + log.Infof("storagePolicyUsageCRSync: The used capacity field for StoragepolicyUsage CR: %s in namespace: %s "+ + "field is matching with the total capacity. Used: %s Skipping the Patch operation", storagePolicyUsage.Name, storagePolicyUsage.Namespace, - storagePolicyUsage.Status.ResourceTypeLevelQuotaUsage.Used.Value()) + storagePolicyUsage.Status.ResourceTypeLevelQuotaUsage.Used.String()) } } else { patchedStoragePolicyUsage.Status = storagepolicyv1alpha2.StoragePolicyUsageStatus{ @@ -3335,12 +3360,12 @@ func storagePolicyUsageCRSync(ctx context.Context, metadataSyncer *metadataSyncI err := PatchStoragePolicyUsage(ctx, cnsOperatorClient, &storagePolicyUsage, &patchedStoragePolicyUsage) if err != nil { - log.Errorf("storagePolicyUsageCRSync: Patching operation failed for StoragePolicyUsage CR: %q in "+ - "namespace: %q. err: %v", storagePolicyUsage.Name, patchedStoragePolicyUsage.Namespace, err) - return + log.Errorf("storagePolicyUsageCRSync: Patching operation failed for StoragePolicyUsage CR: %s in "+ + "namespace: %s. err: %v. Continuing..", storagePolicyUsage.Name, patchedStoragePolicyUsage.Namespace, err) + continue } - log.Infof("storagePolicyUsageCRSync: Successfully updated the used field to %v for StoragepolicyUsage "+ - "CR: %q in namespace: %q", totalUsedQty.Value(), patchedStoragePolicyUsage.Name, + log.Infof("storagePolicyUsageCRSync: Successfully updated the used field to %s for StoragepolicyUsage "+ + "CR: %s in namespace: %s", totalUsedQty.String(), patchedStoragePolicyUsage.Name, patchedStoragePolicyUsage.Namespace) } } @@ -3490,10 +3515,10 @@ func cnsVolumeInfoCRUpdated(oldObj interface{}, newObj interface{}, metadataSync return } if diffSnapshotSize.Value() > 0 { - log.Infof("cnsVolumeInfoCRUpdated: aggregated snapshot size increased by %v, increased Used "+ - "field for storagepolicyusage CR: %q", diffSnapshotSize, patchedStoragePolicyUsageCR.Name) + log.Infof("cnsVolumeInfoCRUpdated: aggregated snapshot size increased by %s, increased Used "+ + "field for storagepolicyusage CR: %s", diffSnapshotSize.String(), patchedStoragePolicyUsageCR.Name) } else if diffSnapshotSize.Value() < 0 { - log.Infof("cnsVolumeInfoCRUpdated: aggregated snapshot size decreased by %v, decreased Used "+ - "field for storagepolicyusage CR: %q", diffSnapshotSize, patchedStoragePolicyUsageCR.Name) + log.Infof("cnsVolumeInfoCRUpdated: aggregated snapshot size decreased by %s, decreased Used "+ + "field for storagepolicyusage CR: %s", diffSnapshotSize.String(), patchedStoragePolicyUsageCR.Name) } } diff --git a/pkg/syncer/storagepool/intended_state.go b/pkg/syncer/storagepool/intended_state.go index 7c7759f4b1..f5e7931bc3 100644 --- a/pkg/syncer/storagepool/intended_state.go +++ b/pkg/syncer/storagepool/intended_state.go @@ -217,7 +217,7 @@ func isRemoteVsan(ctx context.Context, dsprops *dsProps, return false, nil } - log.Infof("vSAN Datastore %s is remote to this cluster", dsprops.dsName) + log.Debugf("vSAN Datastore %s is remote to this cluster", dsprops.dsName) return true, nil } @@ -228,7 +228,7 @@ func newIntendedVsanSNAState(ctx context.Context, scWatchCntlr *StorageClassWatc nodes := make([]string, 0) nodes = append(nodes, node) - log.Infof("creating vsan sna sp %q", node) + log.Debugf("creating vsan sna sp %q", node) compatSC := make([]string, 0) for _, scName := range vsan.compatSC { if scWatchCntlr.isHostLocal(scName) { @@ -356,7 +356,7 @@ func (c *SpController) applyIntendedState(ctx context.Context, state *intendedSt } else { // StoragePool already exists, so Update it. We don't expect // ConflictErrors since updates are synchronized with a lock. - log.Infof("Updating StoragePool instance for %s", state.spName) + log.Debugf("Updating StoragePool instance for %s", state.spName) sp := state.updateUnstructuredStoragePool(ctx, sp) newSp, err := spClient.Resource(*spResource).Update(ctx, sp, metav1.UpdateOptions{}) if err != nil { diff --git a/pkg/syncer/storagepool/listener.go b/pkg/syncer/storagepool/listener.go index ec3d5c5efc..c3265635f3 100644 --- a/pkg/syncer/storagepool/listener.go +++ b/pkg/syncer/storagepool/listener.go @@ -122,7 +122,7 @@ func initListener(ctx context.Context, scWatchCntlr *StorageClassWatch, err := property.WaitForUpdatesEx(ctx, p, filter, func(updates []types.ObjectUpdate) bool { ctx := logger.NewContextWithLogger(ctx) log = logger.GetLogger(ctx) - log.Infof("Got %d property collector update(s)", len(updates)) + log.Debugf("Got %d property collector update(s)", len(updates)) reconcileAllScheduled := false for _, update := range updates { propChange := update.ChangeSet diff --git a/pkg/syncer/storagepool/util.go b/pkg/syncer/storagepool/util.go index f4f0c54e4d..6f1ff3fcb1 100644 --- a/pkg/syncer/storagepool/util.go +++ b/pkg/syncer/storagepool/util.go @@ -90,7 +90,7 @@ func getDatastoreProperties(ctx context.Context, d *cnsvsphere.DatastoreInfo) *d freeSpace: resource.NewQuantity(ds.Summary.FreeSpace, resource.DecimalSI), } - log.Infof("Datastore %s properties: %v", d.Info.Name, p) + log.Debugf("Datastore %s properties: %v", d.Info.Name, p) return &p }