diff --git a/tests/e2e/no_hci_mesh_rwx_singlevc_topology.go b/tests/e2e/no_hci_mesh_rwx_singlevc_topology.go index 60316de095..1c7c2ca8f8 100644 --- a/tests/e2e/no_hci_mesh_rwx_singlevc_topology.go +++ b/tests/e2e/no_hci_mesh_rwx_singlevc_topology.go @@ -575,10 +575,6 @@ var _ = ginkgo.Describe("[rwx-nohci-singlevc-positive] RWX-Topology-NoHciMesh-Si labelsMap, pvclaim, nodeSelectorTerms, execRWXCommandPod, nginxImage, true, deploymentList[0], 0) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ginkgo.By("Verify volume metadata for deployment pod, pvc and pv") - err = waitAndVerifyCnsVolumeMetadata(ctx, pv.Spec.CSI.VolumeHandle, pvclaim, pv, &pods.Items[0]) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) /* @@ -1137,7 +1133,7 @@ var _ = ginkgo.Describe("[rwx-nohci-singlevc-positive] RWX-Topology-NoHciMesh-Si // allowed topology consider for node selector terms is rack3 allowedTopologyForPod = getTopologySelector(topologyAffinityDetails, topologyCategories, topologyLength, leafNode, leafNodeTag2) - nodeSelectorTerms, err = getNodeSelectorMapForDeploymentPods(allowedTopologies) + nodeSelectorTerms, err = getNodeSelectorMapForDeploymentPods(allowedTopologyForPod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -1153,6 +1149,12 @@ var _ = ginkgo.Describe("[rwx-nohci-singlevc-positive] RWX-Topology-NoHciMesh-Si //taking pvclaims 0th index because we are creating only single RWX PVC in this case pvclaim := pvclaims[0] + defer func() { + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(pv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() ginkgo.By("Create 3 standalone Pods using the same PVC") podList, err := createStandalonePodsForRWXVolume(client, ctx, namespace, nodeSelectorTerms, pvclaim, false, @@ -1169,12 +1171,6 @@ var _ = ginkgo.Describe("[rwx-nohci-singlevc-positive] RWX-Topology-NoHciMesh-Si ginkgo.By("Verify PVC Bound state and CNS side verification") pvs, err := checkVolumeStateAndPerformCnsVerification(ctx, client, pvclaims, "", datastoreurl) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = e2eVSphere.waitForCNSVolumeToBeDeleted(pv.Spec.CSI.VolumeHandle) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() //taking pvs 0th index because we are creating only single RWX PVC in this case pv = pvs[0] @@ -1827,7 +1823,7 @@ var _ = ginkgo.Describe("[rwx-nohci-singlevc-positive] RWX-Topology-NoHciMesh-Si //rack-2 or zone-2 or cluster-2 allowedTopologyForPod = getTopologySelector(topologyAffinityDetails, topologyCategories, topologyLength, leafNode, leafNodeTag1) - nodeSelectorTerms, err = getNodeSelectorMapForDeploymentPods(allowedTopologies) + nodeSelectorTerms, err = getNodeSelectorMapForDeploymentPods(allowedTopologyForPod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } diff --git a/tests/e2e/topology_aware_node_poweroff.go b/tests/e2e/topology_aware_node_poweroff.go index 0baca08405..3013b62e24 100644 --- a/tests/e2e/topology_aware_node_poweroff.go +++ b/tests/e2e/topology_aware_node_poweroff.go @@ -59,6 +59,25 @@ var _ = ginkgo.Describe("[csi-topology-vanilla] Topology-Aware-Provisioning-With if !(len(nodeList.Items) > 0) { framework.Failf("Unable to find ready and schedulable Node") } + + // delete nginx-sc storage class incase any stale entry left + sc, err := client.StorageV1().StorageClasses().Get(ctx, defaultNginxStorageClassName, metav1.GetOptions{}) + if err == nil && sc != nil { + gomega.Expect(client.StorageV1().StorageClasses().Delete(ctx, sc.Name, + *metav1.NewDeleteOptions(0))).NotTo(gomega.HaveOccurred()) + } + + framework.Logf("Delete service %s incase stale entry left", servicename) + serviceList, err := client.CoreV1().Services(namespace).List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if len(serviceList.Items) != 0 { + for _, service := range serviceList.Items { + if service.Name == servicename { + err = client.CoreV1().Services(namespace).Delete(ctx, servicename, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + } }) // 1. Create a Storage Class with spec containing valid region and zone in diff --git a/tests/e2e/util.go b/tests/e2e/util.go index 7caeec550b..7fd530a229 100644 --- a/tests/e2e/util.go +++ b/tests/e2e/util.go @@ -986,7 +986,13 @@ func createStatefulSetWithOneReplica(client clientset.Interface, manifestPath st service, err := manifest.SvcFromManifest(mkpath("service.yaml")) gomega.Expect(err).NotTo(gomega.HaveOccurred()) service, err = client.CoreV1().Services(namespace).Create(ctx, service, metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if err != nil { + if strings.Contains(err.Error(), "already exists") { + framework.Logf("services 'nginx' already exists") + } else { + fmt.Errorf("Failed to create nginx service") + } + } *statefulSet.Spec.Replicas = 1 _, err = client.AppsV1().StatefulSets(namespace).Create(ctx, statefulSet, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2274,8 +2280,8 @@ func verifyPodLocation(pod *v1.Pod, nodeList *v1.NodeList, zoneValue string, reg func getTopologyFromPod(pod *v1.Pod, nodeList *v1.NodeList) (string, string, error) { for _, node := range nodeList.Items { if pod.Spec.NodeName == node.Name { - podRegion := node.Labels[v1.LabelZoneRegion] - podZone := node.Labels[v1.LabelZoneFailureDomain] + podRegion := node.Labels[regionKey] + podZone := node.Labels[zoneKey] return podRegion, podZone, nil } } diff --git a/tests/e2e/vsphere.go b/tests/e2e/vsphere.go index 61a4240179..6e16b22d4b 100644 --- a/tests/e2e/vsphere.go +++ b/tests/e2e/vsphere.go @@ -1179,11 +1179,6 @@ func (vs *vSphere) verifyPreferredDatastoreMatch(volumeID string, dsUrls []strin for _, dsUrl := range dsUrls { if actualDatastoreUrl == dsUrl { flag = true - if rwxAccessMode { - if !strings.HasPrefix(dsUrl, "ds:///vmfs/volumes/vsan:") { - return false - } - } return flag } }