Skip to content

Commit

Permalink
Merge pull request kubernetes#121067 from carlory/cleanup-e2enode-fra…
Browse files Browse the repository at this point in the history
…mework-equal

e2e_node: stop using deprecated framework.ExpectEqual
  • Loading branch information
k8s-ci-robot authored Oct 10, 2023
2 parents 38c6bd8 + d5d7fb5 commit 09edfe4
Show file tree
Hide file tree
Showing 17 changed files with 91 additions and 73 deletions.
6 changes: 3 additions & 3 deletions test/e2e_node/apparmor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -229,10 +229,10 @@ func createPodWithAppArmor(ctx context.Context, f *framework.Framework, profile

func expectSoftRejection(status v1.PodStatus) {
args := []interface{}{"PodStatus: %+v", status}
framework.ExpectEqual(status.Phase, v1.PodPending, args...)
framework.ExpectEqual(status.Reason, "AppArmor", args...)
gomega.Expect(status.Phase).To(gomega.Equal(v1.PodPending), args...)
gomega.Expect(status.Reason).To(gomega.Equal("AppArmor"), args...)
gomega.Expect(status.Message).To(gomega.ContainSubstring("AppArmor"), args...)
framework.ExpectEqual(status.ContainerStatuses[0].State.Waiting.Reason, "Blocked", args...)
gomega.Expect(status.ContainerStatuses[0].State.Waiting.Reason).To(gomega.Equal("Blocked"), args...)
}

func isAppArmorEnabled() bool {
Expand Down
12 changes: 6 additions & 6 deletions test/e2e_node/checkpoint_container.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,11 +103,9 @@ var _ = SIGDescribe("Checkpoint Container [NodeFeature:CheckpointContainer]", fu
framework.ExpectNoError(err)
isReady, err := testutils.PodRunningReady(p)
framework.ExpectNoError(err)
framework.ExpectEqual(
isReady,
true,
"pod should be ready",
)
if !isReady {
framework.Failf("pod %q should be ready", p.Name)
}

framework.Logf(
"About to checkpoint container %q on %q",
Expand Down Expand Up @@ -199,7 +197,9 @@ var _ = SIGDescribe("Checkpoint Container [NodeFeature:CheckpointContainer]", fu
}
}
for fileName := range checkForFiles {
framework.ExpectEqual(checkForFiles[fileName], true)
if !checkForFiles[fileName] {
framework.Failf("File %q not found in checkpoint archive %q", fileName, item)
}
}
// cleanup checkpoint archive
os.RemoveAll(item)
Expand Down
2 changes: 1 addition & 1 deletion test/e2e_node/container_log_rotation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ var _ = SIGDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive]", func()
ginkgo.It("should be rotated and limited to a fixed amount of files", func(ctx context.Context) {

ginkgo.By("get container log path")
framework.ExpectEqual(len(logRotationPod.Status.ContainerStatuses), 1, "log rotation pod should have one container")
gomega.Expect(logRotationPod.Status.ContainerStatuses).To(gomega.HaveLen(1), "log rotation pod should have one container")
id := kubecontainer.ParseContainerID(logRotationPod.Status.ContainerStatuses[0].ContainerID).ID
r, _, err := getCRIClient()
framework.ExpectNoError(err, "should connect to CRI and obtain runtime service clients and image service client")
Expand Down
2 changes: 1 addition & 1 deletion test/e2e_node/cpu_manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ func runGuPodTest(ctx context.Context, f *framework.Framework, cpuCount int) {
cpus, err := cpuset.Parse(strings.TrimSpace(logs))
framework.ExpectNoError(err, "parsing cpuset from logs for [%s] of pod [%s]", cnt.Name, pod.Name)

framework.ExpectEqual(cpus.Size(), cpuCount, "expected cpu set size == %d, got %q", cpuCount, cpus.String())
gomega.Expect(cpus.Size()).To(gomega.Equal(cpuCount), "expected cpu set size == %d, got %q", cpuCount, cpus.String())
}

ginkgo.By("by deleting the pods and waiting for container removal")
Expand Down
19 changes: 12 additions & 7 deletions test/e2e_node/critical_pod_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ import (
admissionapi "k8s.io/pod-security-admission/api"

"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
)

const (
Expand Down Expand Up @@ -82,9 +83,9 @@ var _ = SIGDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod]
framework.ExpectNoError(err)
for _, p := range updatedPodList.Items {
if p.Name == nonCriticalBestEffort.Name {
framework.ExpectEqual(p.Status.Phase, v1.PodRunning, fmt.Sprintf("pod: %v should not be preempted with status: %#v", p.Name, p.Status))
gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodRunning), "pod: %v should not be preempted with status: %#v", p.Name, p.Status)
} else {
framework.ExpectEqual(p.Status.Phase, v1.PodSucceeded, fmt.Sprintf("pod: %v should be preempted with status: %#v", p.Name, p.Status))
gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodSucceeded), "pod: %v should be preempted with status: %#v", p.Name, p.Status)
}
}
})
Expand Down Expand Up @@ -125,7 +126,7 @@ var _ = SIGDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod]
framework.ExpectNoError(err)
for _, p := range updatedPodList.Items {
ginkgo.By(fmt.Sprintf("verify that the non-critical pod %q is preempted and has the DisruptionTarget condition", klog.KObj(&p)))
framework.ExpectEqual(p.Status.Phase, v1.PodSucceeded, fmt.Sprintf("pod: %v should be preempted with status: %#v", p.Name, p.Status))
gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodSucceeded), "pod: %v should be preempted with status: %#v", p.Name, p.Status)
if condition := e2epod.FindPodConditionByType(&p.Status, v1.DisruptionTarget); condition == nil {
framework.Failf("pod %q should have the condition: %q, pod status: %v", klog.KObj(&p), v1.DisruptionTarget, p.Status)
}
Expand All @@ -149,7 +150,7 @@ func getNodeCPUAndMemoryCapacity(ctx context.Context, f *framework.Framework) v1
nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err)
// Assuming that there is only one node, because this is a node e2e test.
framework.ExpectEqual(len(nodeList.Items), 1)
gomega.Expect(nodeList.Items).To(gomega.HaveLen(1))
capacity := nodeList.Items[0].Status.Allocatable
return v1.ResourceList{
v1.ResourceCPU: capacity[v1.ResourceCPU],
Expand All @@ -161,7 +162,7 @@ func getNodeName(ctx context.Context, f *framework.Framework) string {
nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err)
// Assuming that there is only one node, because this is a node e2e test.
framework.ExpectEqual(len(nodeList.Items), 1)
gomega.Expect(nodeList.Items).To(gomega.HaveLen(1))
return nodeList.Items[0].GetName()
}

Expand Down Expand Up @@ -190,9 +191,13 @@ func getTestPod(critical bool, name string, resources v1.ResourceRequirements, n
}
pod.Spec.PriorityClassName = scheduling.SystemNodeCritical

framework.ExpectEqual(kubelettypes.IsCriticalPod(pod), true, "pod should be a critical pod")
if !kubelettypes.IsCriticalPod(pod) {
framework.Failf("pod %q should be a critical pod", pod.Name)
}
} else {
framework.ExpectEqual(kubelettypes.IsCriticalPod(pod), false, "pod should not be a critical pod")
if kubelettypes.IsCriticalPod(pod) {
framework.Failf("pod %q should not be a critical pod", pod.Name)
}
}
return pod
}
36 changes: 18 additions & 18 deletions test/e2e_node/device_plugin_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -197,8 +197,8 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
framework.Logf("len(v1alphaPodResources.PodResources):%+v", len(v1alphaPodResources.PodResources))
framework.Logf("len(v1PodResources.PodResources):%+v", len(v1PodResources.PodResources))

framework.ExpectEqual(len(v1alphaPodResources.PodResources), 2)
framework.ExpectEqual(len(v1PodResources.PodResources), 2)
gomega.Expect(v1alphaPodResources.PodResources).To(gomega.HaveLen(2))
gomega.Expect(v1PodResources.PodResources).To(gomega.HaveLen(2))

var v1alphaResourcesForOurPod *kubeletpodresourcesv1alpha1.PodResources
for _, res := range v1alphaPodResources.GetPodResources() {
Expand All @@ -217,26 +217,26 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
gomega.Expect(v1alphaResourcesForOurPod).NotTo(gomega.BeNil())
gomega.Expect(v1ResourcesForOurPod).NotTo(gomega.BeNil())

framework.ExpectEqual(v1alphaResourcesForOurPod.Name, pod1.Name)
framework.ExpectEqual(v1ResourcesForOurPod.Name, pod1.Name)
gomega.Expect(v1alphaResourcesForOurPod.Name).To(gomega.Equal(pod1.Name))
gomega.Expect(v1ResourcesForOurPod.Name).To(gomega.Equal(pod1.Name))

framework.ExpectEqual(v1alphaResourcesForOurPod.Namespace, pod1.Namespace)
framework.ExpectEqual(v1ResourcesForOurPod.Namespace, pod1.Namespace)
gomega.Expect(v1alphaResourcesForOurPod.Namespace).To(gomega.Equal(pod1.Namespace))
gomega.Expect(v1ResourcesForOurPod.Namespace).To(gomega.Equal(pod1.Namespace))

framework.ExpectEqual(len(v1alphaResourcesForOurPod.Containers), 1)
framework.ExpectEqual(len(v1ResourcesForOurPod.Containers), 1)
gomega.Expect(v1alphaResourcesForOurPod.Containers).To(gomega.HaveLen(1))
gomega.Expect(v1ResourcesForOurPod.Containers).To(gomega.HaveLen(1))

framework.ExpectEqual(v1alphaResourcesForOurPod.Containers[0].Name, pod1.Spec.Containers[0].Name)
framework.ExpectEqual(v1ResourcesForOurPod.Containers[0].Name, pod1.Spec.Containers[0].Name)
gomega.Expect(v1alphaResourcesForOurPod.Containers[0].Name).To(gomega.Equal(pod1.Spec.Containers[0].Name))
gomega.Expect(v1ResourcesForOurPod.Containers[0].Name).To(gomega.Equal(pod1.Spec.Containers[0].Name))

framework.ExpectEqual(len(v1alphaResourcesForOurPod.Containers[0].Devices), 1)
framework.ExpectEqual(len(v1ResourcesForOurPod.Containers[0].Devices), 1)
gomega.Expect(v1alphaResourcesForOurPod.Containers[0].Devices).To(gomega.HaveLen(1))
gomega.Expect(v1ResourcesForOurPod.Containers[0].Devices).To(gomega.HaveLen(1))

framework.ExpectEqual(v1alphaResourcesForOurPod.Containers[0].Devices[0].ResourceName, SampleDeviceResourceName)
framework.ExpectEqual(v1ResourcesForOurPod.Containers[0].Devices[0].ResourceName, SampleDeviceResourceName)
gomega.Expect(v1alphaResourcesForOurPod.Containers[0].Devices[0].ResourceName).To(gomega.Equal(SampleDeviceResourceName))
gomega.Expect(v1ResourcesForOurPod.Containers[0].Devices[0].ResourceName).To(gomega.Equal(SampleDeviceResourceName))

framework.ExpectEqual(len(v1alphaResourcesForOurPod.Containers[0].Devices[0].DeviceIds), 1)
framework.ExpectEqual(len(v1ResourcesForOurPod.Containers[0].Devices[0].DeviceIds), 1)
gomega.Expect(v1alphaResourcesForOurPod.Containers[0].Devices[0].DeviceIds).To(gomega.HaveLen(1))
gomega.Expect(v1ResourcesForOurPod.Containers[0].Devices[0].DeviceIds).To(gomega.HaveLen(1))
})

// simulate container restart, while all other involved components (kubelet, device plugin) stay stable. To do so, in the container
Expand All @@ -260,7 +260,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
ginkgo.By("Confirming that after a container restart, fake-device assignment is kept")
devIDRestart1, err := parseLog(ctx, f, pod1.Name, pod1.Name, deviceIDRE)
framework.ExpectNoError(err, "getting logs for pod %q", pod1.Name)
framework.ExpectEqual(devIDRestart1, devID1)
gomega.Expect(devIDRestart1).To(gomega.Equal(devID1))

// crosscheck from the device assignment is preserved and stable from perspective of the kubelet.
// needs to match the container perspective.
Expand Down Expand Up @@ -373,7 +373,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
ginkgo.By("Confirming that after a container restart, fake-device assignment is kept")
devIDRestart1, err := parseLog(ctx, f, pod1.Name, pod1.Name, deviceIDRE)
framework.ExpectNoError(err, "getting logs for pod %q", pod1.Name)
framework.ExpectEqual(devIDRestart1, devID1)
gomega.Expect(devIDRestart1).To(gomega.Equal(devID1))

ginkgo.By("Restarting Kubelet")
restartKubelet(true)
Expand Down
44 changes: 28 additions & 16 deletions test/e2e_node/eviction_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -368,7 +368,9 @@ var _ = SIGDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive]
})
ginkgo.BeforeEach(func(ctx context.Context) {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
if err != nil && !apierrors.IsAlreadyExists(err) {
framework.ExpectNoError(err, "failed to create priority class")
}
})
ginkgo.AfterEach(func(ctx context.Context) {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(ctx, highPriorityClassName, metav1.DeleteOptions{})
Expand Down Expand Up @@ -426,7 +428,9 @@ var _ = SIGDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disru
})
ginkgo.BeforeEach(func(ctx context.Context) {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
if err != nil && !apierrors.IsAlreadyExists(err) {
framework.ExpectNoError(err, "failed to create priority class")
}
})
ginkgo.AfterEach(func(ctx context.Context) {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(ctx, highPriorityClassName, metav1.DeleteOptions{})
Expand Down Expand Up @@ -480,7 +484,9 @@ var _ = SIGDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Disruptive][No
})
ginkgo.BeforeEach(func(ctx context.Context) {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
if err != nil && !apierrors.IsAlreadyExists(err) {
framework.ExpectNoError(err, "failed to create priority class")
}
})
ginkgo.AfterEach(func(ctx context.Context) {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(ctx, highPriorityClassName, metav1.DeleteOptions{})
Expand Down Expand Up @@ -731,7 +737,7 @@ func verifyEvictionOrdering(ctx context.Context, f *framework.Framework, testSpe
}

if priorityPod.Status.Phase == v1.PodFailed {
framework.ExpectEqual(priorityPod.Status.Reason, eviction.Reason, "pod %s failed; expected Status.Reason to be %s, but got %s",
gomega.Expect(priorityPod.Status.Reason).To(gomega.Equal(eviction.Reason), "pod %s failed; expected Status.Reason to be %s, but got %s",
priorityPod.Name, eviction.Reason, priorityPod.Status.Reason)
}

Expand Down Expand Up @@ -779,41 +785,47 @@ func verifyEvictionEvents(ctx context.Context, f *framework.Framework, testSpecs
}.AsSelector().String()
podEvictEvents, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{FieldSelector: selector})
framework.ExpectNoError(err, "getting events")
framework.ExpectEqual(len(podEvictEvents.Items), 1, "Expected to find 1 eviction event for pod %s, got %d", pod.Name, len(podEvictEvents.Items))
gomega.Expect(podEvictEvents.Items).To(gomega.HaveLen(1), "Expected to find 1 eviction event for pod %s, got %d", pod.Name, len(podEvictEvents.Items))
event := podEvictEvents.Items[0]

if expectedStarvedResource != noStarvedResource {
// Check the eviction.StarvedResourceKey
starved, found := event.Annotations[eviction.StarvedResourceKey]
framework.ExpectEqual(found, true, "Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found",
pod.Name, expectedStarvedResource)
if !found {
framework.Failf("Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found",
pod.Name, expectedStarvedResource)
}
starvedResource := v1.ResourceName(starved)
framework.ExpectEqual(starvedResource, expectedStarvedResource, "Expected to the starved_resource annotation on pod %s to contain %s, but got %s instead",
gomega.Expect(starvedResource).To(gomega.Equal(expectedStarvedResource), "Expected to the starved_resource annotation on pod %s to contain %s, but got %s instead",
pod.Name, expectedStarvedResource, starvedResource)

// We only check these keys for memory, because ephemeral storage evictions may be due to volume usage, in which case these values are not present
if expectedStarvedResource == v1.ResourceMemory {
// Check the eviction.OffendingContainersKey
offendersString, found := event.Annotations[eviction.OffendingContainersKey]
framework.ExpectEqual(found, true, "Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found",
pod.Name)
if !found {
framework.Failf("Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found",
pod.Name)
}
offendingContainers := strings.Split(offendersString, ",")
framework.ExpectEqual(len(offendingContainers), 1, "Expected to find the offending container's usage in the %s annotation, but no container was found",
gomega.Expect(offendingContainers).To(gomega.HaveLen(1), "Expected to find the offending container's usage in the %s annotation, but no container was found",
eviction.OffendingContainersKey)
framework.ExpectEqual(offendingContainers[0], pod.Spec.Containers[0].Name, "Expected to find the offending container: %s's usage in the %s annotation, but found %s instead",
gomega.Expect(offendingContainers[0]).To(gomega.Equal(pod.Spec.Containers[0].Name), "Expected to find the offending container: %s's usage in the %s annotation, but found %s instead",
pod.Spec.Containers[0].Name, eviction.OffendingContainersKey, offendingContainers[0])

// Check the eviction.OffendingContainersUsageKey
offendingUsageString, found := event.Annotations[eviction.OffendingContainersUsageKey]
framework.ExpectEqual(found, true, "Expected to find an annotation on the eviction event for pod %s containing the offending containers' usage, but it was not found",
pod.Name)
if !found {
framework.Failf("Expected to find an annotation on the eviction event for pod %s containing the offending containers' usage, but it was not found",
pod.Name)
}
offendingContainersUsage := strings.Split(offendingUsageString, ",")
framework.ExpectEqual(len(offendingContainersUsage), 1, "Expected to find the offending container's usage in the %s annotation, but found %+v",
gomega.Expect(offendingContainersUsage).To(gomega.HaveLen(1), "Expected to find the offending container's usage in the %s annotation, but found %+v",
eviction.OffendingContainersUsageKey, offendingContainersUsage)
usageQuantity, err := resource.ParseQuantity(offendingContainersUsage[0])
framework.ExpectNoError(err, "parsing pod %s's %s annotation as a quantity", pod.Name, eviction.OffendingContainersUsageKey)
request := pod.Spec.Containers[0].Resources.Requests[starvedResource]
framework.ExpectEqual(usageQuantity.Cmp(request), 1, "Expected usage of offending container: %s in pod %s to exceed its request %s",
gomega.Expect(usageQuantity.Cmp(request)).To(gomega.Equal(1), "Expected usage of offending container: %s in pod %s to exceed its request %s",
usageQuantity.String(), pod.Name, request.String())
}
}
Expand Down
4 changes: 1 addition & 3 deletions test/e2e_node/hugepages_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -214,9 +214,7 @@ var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeSpecialFeature:H
framework.ExpectNoError(err, "while getting node status")

ginkgo.By("Verifying that the node now supports huge pages with size 3Mi")
value, ok := node.Status.Capacity["hugepages-3Mi"]
framework.ExpectEqual(ok, true, "capacity should contain resource hugepages-3Mi")
framework.ExpectEqual(value.String(), "9Mi", "huge pages with size 3Mi should be supported")
gomega.Expect(node.Status.Capacity).To(gomega.HaveKeyWithValue("hugepages-3Mi", resource.MustParse("9Mi")), "capacity should contain resource hugepages-3Mi and huge pages with size 3Mi should be supported")

ginkgo.By("restarting the node and verifying that huge pages with size 3Mi are not supported")
restartKubelet(true)
Expand Down
3 changes: 2 additions & 1 deletion test/e2e_node/kubeletconfig/kubeletconfig.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import (
"strconv"
"time"

"github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/util/wait"
kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
"k8s.io/kubernetes/pkg/cluster/ports"
Expand Down Expand Up @@ -139,7 +140,7 @@ func pollConfigz(ctx context.Context, timeout time.Duration, pollInterval time.D
output := string(buf[:n])
proxyRegexp := regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)")
match := proxyRegexp.FindStringSubmatch(output)
framework.ExpectEqual(len(match), 2)
gomega.Expect(match).To(gomega.HaveLen(2))
port, err := strconv.Atoi(match[1])
framework.ExpectNoError(err)
framework.Logf("http requesting node kubelet /configz")
Expand Down
Loading

0 comments on commit 09edfe4

Please sign in to comment.