diff --git a/e2e/create-pvc_test.go b/e2e/create-pvc_test.go index 9ab42ea7..7c6ec2a7 100644 --- a/e2e/create-pvc_test.go +++ b/e2e/create-pvc_test.go @@ -8,26 +8,27 @@ import ( "strings" "time" - "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/klog/v2" - - "k8s.io/client-go/tools/clientcmd" - cdicli "kubevirt.io/csi-driver/pkg/generated/containerized-data-importer/client-go/clientset/versioned" - kubecli "kubevirt.io/csi-driver/pkg/generated/kubevirt/client-go/clientset/versioned" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/spf13/pflag" k8sv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/klog/v2" + cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" + + cdicli "kubevirt.io/csi-driver/pkg/generated/containerized-data-importer/client-go/clientset/versioned" + kubecli "kubevirt.io/csi-driver/pkg/generated/kubevirt/client-go/clientset/versioned" ) +const hostNameLabelKey = "kubernetes.io/hostname" + var virtClient *kubecli.Clientset func defaultInfraClientConfig(flags *pflag.FlagSet) clientcmd.ClientConfig { @@ -49,8 +50,6 @@ func defaultInfraClientConfig(flags *pflag.FlagSet) clientcmd.ClientConfig { var _ = Describe("CreatePVC", func() { - const hostNameLabelKey = "kubernetes.io/hostname" - var tmpDir string var tenantClient *kubernetes.Clientset var infraClient *kubernetes.Clientset @@ -85,7 +84,7 @@ var _ = Describe("CreatePVC", func() { _ = os.RemoveAll(tmpDir) }) - DescribeTable("creates a pvc and attaches to pod", Label("pvcCreation"), func(volumeMode k8sv1.PersistentVolumeMode, podCreationFunc func(string) *k8sv1.Pod) { + DescribeTable("creates a pvc and attaches to pod", Label("pvcCreation"), func(volumeMode k8sv1.PersistentVolumeMode, storageOpt storageOption, attachCmd string) { pvcName := "test-pvc" storageClassName := "kubevirt" pvc := pvcSpec(pvcName, storageClassName, "10Mi") @@ -96,16 +95,122 @@ var _ = Describe("CreatePVC", func() { Expect(err).ToNot(HaveOccurred()) By("creating a pod that attaches pvc") + podSpec := createPod("test-pod", + withCommand(attachCmd), + storageOpt(pvc.Name)) + runPod( tenantClient.CoreV1(), namespace, - podCreationFunc(pvc.Name)) + podSpec, + true) }, - Entry("Filesystem volume mode", k8sv1.PersistentVolumeFilesystem, attacherPodFs), - Entry("Block volume mode", k8sv1.PersistentVolumeBlock, attacherPodBlock), + Entry("Filesystem volume mode", Label("FS"), k8sv1.PersistentVolumeFilesystem, withFileSystem, fsAttachCommand), + Entry("Block volume mode", Label("Block"), k8sv1.PersistentVolumeBlock, withBlock, blockAttachCommand), ) - DescribeTable("creates a pvc, attaches to pod, re-attach to another pod", Label("pvcCreation"), func(volumeMode k8sv1.PersistentVolumeMode, podCreationFunc func(string) *k8sv1.Pod) { + It("should creates a RW-Many block pvc and attaches to pod", Label("pvcCreation", "RWX", "Block"), func() { + pvcName := "test-pvc" + storageClassName := "kubevirt" + pvc := pvcSpec(pvcName, storageClassName, "10Mi") + volumeMode := k8sv1.PersistentVolumeBlock + pvc.Spec.VolumeMode = &volumeMode + pvc.Spec.AccessModes = []k8sv1.PersistentVolumeAccessMode{k8sv1.ReadWriteMany} + + By("creating a pvc") + _, err := tenantClient.CoreV1().PersistentVolumeClaims(namespace).Create(context.Background(), pvc, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + + By("creating a pod that attaches pvc") + const ( + labelKey = "app" + writerLabelValue = "writer" + ) + + writerPod := runPod( + tenantClient.CoreV1(), + namespace, + createPod("writer-pod", + withBlock(pvc.Name), + withCommand(blockWriteCommand+" && sleep 60"), + withLabel(labelKey, writerLabelValue), + ), + false, + ) + + GinkgoWriter.Printf("[DEBUG] writer pod node: %s\n", writerPod.Spec.NodeName) + + By("creating a different pod that reads from pvc") + Eventually(func(g Gomega) { + readerPod := runPod( + tenantClient.CoreV1(), + namespace, + createPod("reader-pod", + withCommand(blockReadCommand), + withBlock(pvc.Name), + withPodAntiAffinity(labelKey, writerLabelValue), + ), + true, + ) + + defer deletePod(tenantClient.CoreV1(), namespace, readerPod.Name) + GinkgoWriter.Printf("[DEBUG] reader pod node: %s\n", readerPod.Spec.NodeName) + + s := tenantClient.CoreV1().Pods(namespace).GetLogs(readerPod.Name, &k8sv1.PodLogOptions{}) + reader, err := s.Stream(context.Background()) + g.Expect(err).ToNot(HaveOccurred()) + defer reader.Close() + buf := new(bytes.Buffer) + n, err := buf.ReadFrom(reader) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(n).To(BeEquivalentTo(len("testing\n"))) + out := buf.String() + g.Expect(strings.TrimSpace(out)).To(Equal("testing")) + + }).WithTimeout(120 * time.Second).WithPolling(20 * time.Second).Should(Succeed()) + + deletePod(tenantClient.CoreV1(), namespace, writerPod.Name) + }) + + It("should reject a RW-Many file-system pvc and attaches to pod", Label("pvcCreation", "RWX", "FS"), func() { + const pvcName = "test-pvc" + storageClassName := "kubevirt" + pvc := pvcSpec(pvcName, storageClassName, "10Mi") + volumeMode := k8sv1.PersistentVolumeFilesystem + pvc.Spec.VolumeMode = &volumeMode + pvc.Spec.AccessModes = []k8sv1.PersistentVolumeAccessMode{k8sv1.ReadWriteMany} + + By("creating a pvc") + _, err := tenantClient.CoreV1().PersistentVolumeClaims(namespace).Create(context.Background(), pvc, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + + By("creating a pod that attaches pvc") + runPodAndExpectPending( + tenantClient.CoreV1(), + namespace, + createPod("test-pod", withFileSystem(pvc.Name), withCommand(fsAttachCommand))) + + Eventually(func(g Gomega) bool { + //Ensure we don't see couldn't find device by serial id in pod event log. + events, err := tenantClient.CoreV1().Events(namespace).List(context.Background(), metav1.ListOptions{FieldSelector: fmt.Sprintf("involvedObject.name=%s", pvcName), TypeMeta: metav1.TypeMeta{Kind: "PersistentVolumeClaim"}}) + g.Expect(err).ToNot(HaveOccurred()) + + foundError := false + GinkgoWriter.Println("PVC Events:") + for _, evt := range events.Items { + GinkgoWriter.Println(evt.Message) + if strings.Contains(evt.Message, "non-block volume with RWX access mode is not supported") { + foundError = true + } + } + + return foundError + }).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(BeTrue()) + + }) + + DescribeTable("creates a pvc, attaches to pod, re-attach to another pod", Label("pvcCreation"), func(volumeMode k8sv1.PersistentVolumeMode, storageOpt storageOption, attachCmd string) { nodes, err := tenantClient.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) Expect(err).ToNot(HaveOccurred()) // select at least two node names @@ -123,23 +228,25 @@ var _ = Describe("CreatePVC", func() { _, err = tenantClient.CoreV1().PersistentVolumeClaims(namespace).Create(context.Background(), pvc, metav1.CreateOptions{}) Expect(err).ToNot(HaveOccurred()) - podSpec := podCreationFunc(pvc.Name) - podSpec.Spec.NodeSelector = map[string]string{hostNameLabelKey: host1} + podSpec := createPod("test-pod", + storageOpt(pvc.Name), + withCommand(attachCmd), + withNodeSelector(hostNameLabelKey, host1)) By(fmt.Sprintf("creating a pod that attaches pvc on node %s", host1)) - pod := runPod(tenantClient.CoreV1(), namespace, podSpec) + pod := runPod(tenantClient.CoreV1(), namespace, podSpec, true) deletePod(tenantClient.CoreV1(), namespace, pod.Name) pod.Spec.NodeSelector = map[string]string{hostNameLabelKey: host2} By(fmt.Sprintf("creating a pod that attaches pvc on node %s", host2)) - anotherPod := runPod(tenantClient.CoreV1(), namespace, podSpec) + anotherPod := runPod(tenantClient.CoreV1(), namespace, podSpec, true) deletePod(tenantClient.CoreV1(), namespace, anotherPod.Name) }, - Entry("Filesystem volume mode", k8sv1.PersistentVolumeFilesystem, attacherPodFs), - Entry("Block volume mode", k8sv1.PersistentVolumeBlock, attacherPodBlock), + Entry("Filesystem volume mode", Label("FS"), k8sv1.PersistentVolumeFilesystem, withFileSystem, fsAttachCommand), + Entry("Block volume mode", Label("Block"), k8sv1.PersistentVolumeBlock, withBlock, blockAttachCommand), ) - DescribeTable("verify persistence - creates a pvc, attaches to writer pod, re-attach to a reader pod", Label("pvcCreation"), func(volumeMode k8sv1.PersistentVolumeMode, podWriterFunc func(string) *k8sv1.Pod, podReaderFunc func(string) *k8sv1.Pod) { + DescribeTable("verify persistence - creates a pvc, attaches to writer pod, re-attach to a reader pod", Label("pvcCreation"), func(volumeMode k8sv1.PersistentVolumeMode, storageOpt storageOption, writeCmd, readCmd string) { By("creating a pvc") pvc := pvcSpec("test-pvc", "kubevirt", "10Mi") pvc.Spec.VolumeMode = &volumeMode @@ -147,11 +254,18 @@ var _ = Describe("CreatePVC", func() { Expect(err).ToNot(HaveOccurred()) By("creating a pod that writes to pvc on node") - writerPod := runPod(tenantClient.CoreV1(), namespace, podWriterFunc(pvc.Name)) + rPod := createPod("writer-pod", + withCommand(writeCmd), + storageOpt(pvc.Name), + ) + writerPod := runPod(tenantClient.CoreV1(), namespace, rPod, true) deletePod(tenantClient.CoreV1(), namespace, writerPod.Name) By("creating a different pod that reads from pvc") - readerPod := runPod(tenantClient.CoreV1(), namespace, podReaderFunc(pvc.Name)) + wPod := createPod("reader-pod", + withCommand(readCmd), + storageOpt(pvc.Name)) + readerPod := runPod(tenantClient.CoreV1(), namespace, wPod, true) s := tenantClient.CoreV1().Pods(namespace).GetLogs(readerPod.Name, &k8sv1.PodLogOptions{}) reader, err := s.Stream(context.Background()) Expect(err).ToNot(HaveOccurred()) @@ -165,11 +279,11 @@ var _ = Describe("CreatePVC", func() { Expect(strings.TrimSpace(out)).To(Equal("testing")) deletePod(tenantClient.CoreV1(), namespace, readerPod.Name) }, - Entry("Filesystem volume mode", k8sv1.PersistentVolumeFilesystem, writerPodFs, readerPodFs), - Entry("Block volume mode", k8sv1.PersistentVolumeBlock, writerPodBlock, readerPodBlock), + Entry("Filesystem volume mode", Label("FS"), k8sv1.PersistentVolumeFilesystem, withFileSystem, fsWriteCommand, fsReadCommand), + Entry("Block volume mode", Label("Block"), k8sv1.PersistentVolumeBlock, withBlock, blockWriteCommand, blockReadCommand), ) - DescribeTable("multi attach - creates 3 pvcs, attach all 3 to pod, detach all 3 from the pod", Label("pvcCreation"), func(volumeMode k8sv1.PersistentVolumeMode, podCreationFunc func(string) *k8sv1.Pod) { + DescribeTable("multi attach - creates 3 pvcs, attach all 3 to pod, detach all 3 from the pod", Label("pvcCreation"), func(volumeMode k8sv1.PersistentVolumeMode, storageOpt storageOption, attachCmd string) { By("creating a pvc") pvc1 := pvcSpec("test-pvc1", "kubevirt", "10Mi") pvc1.Spec.VolumeMode = &volumeMode @@ -185,18 +299,20 @@ var _ = Describe("CreatePVC", func() { Expect(err).ToNot(HaveOccurred()) By("creating a pod that uses 3 PVCs") - podSpec := podCreationFunc(pvc1.Name) - addPvc(podSpec, pvc2.Name, "/pv2") - addPvc(podSpec, pvc3.Name, "/pv3") + podSpec := createPod("test-pod", + withCommand(attachCmd), + storageOpt(pvc1.Name), + withPVC(pvc2.Name, "/pv2"), + withPVC(pvc3.Name, "/pv3")) - pod := runPod(tenantClient.CoreV1(), namespace, podSpec) + pod := runPod(tenantClient.CoreV1(), namespace, podSpec, true) deletePod(tenantClient.CoreV1(), namespace, pod.Name) }, - Entry("Filesystem volume mode", k8sv1.PersistentVolumeFilesystem, attacherPodFs), - Entry("Block volume mode", k8sv1.PersistentVolumeBlock, attacherPodBlock), + Entry("Filesystem volume mode", Label("FS"), k8sv1.PersistentVolumeFilesystem, withFileSystem, fsAttachCommand), + Entry("Block volume mode", Label("Block"), k8sv1.PersistentVolumeBlock, withBlock, blockAttachCommand), ) - DescribeTable("multi attach - create multiple pods pvcs on same node, and each pod should connect to a different PVC", Label("pvcCreation"), func(volumeMode k8sv1.PersistentVolumeMode, podCreationFunc func(string) *k8sv1.Pod) { + DescribeTable("multi attach - create multiple pods pvcs on same node, and each pod should connect to a different PVC", Label("pvcCreation"), func(volumeMode k8sv1.PersistentVolumeMode, storageOpt storageOption, attachCmd string) { nodes, err := tenantClient.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) Expect(err).ToNot(HaveOccurred()) host := nodes.Items[0].Labels[hostNameLabelKey] @@ -215,11 +331,13 @@ var _ = Describe("CreatePVC", func() { podList := make([]*k8sv1.Pod, 0) for _, pvc := range pvcList { - podSpec := podCreationFunc(pvc.Name) - podSpec.Spec.NodeSelector = map[string]string{hostNameLabelKey: host} + podSpec := createPod("test-pod", + storageOpt(pvc.Name), + withCommand(attachCmd), + withNodeSelector(hostNameLabelKey, host)) By(fmt.Sprintf("creating a pod that attaches pvc on node %s", host)) - pod := runPod(tenantClient.CoreV1(), namespace, podSpec) + pod := runPod(tenantClient.CoreV1(), namespace, podSpec, true) podList = append(podList, pod) } Eventually(func() bool { @@ -240,11 +358,11 @@ var _ = Describe("CreatePVC", func() { deletePod(tenantClient.CoreV1(), namespace, pod.Name) } }, - Entry("Filesystem volume mode", k8sv1.PersistentVolumeFilesystem, attacherPodFs), - Entry("Block volume mode", k8sv1.PersistentVolumeBlock, attacherPodBlock), + Entry("Filesystem volume mode", Label("FS"), k8sv1.PersistentVolumeFilesystem, withFileSystem, fsAttachCommand), + Entry("Block volume mode", Label("Block"), k8sv1.PersistentVolumeBlock, withBlock, blockAttachCommand), ) - DescribeTable("Verify infra cluster cleanup", Label("pvc cleanup"), func(volumeMode k8sv1.PersistentVolumeMode, podCreationFunc func(string) *k8sv1.Pod) { + DescribeTable("Verify infra cluster cleanup", Label("pvc cleanup"), func(volumeMode k8sv1.PersistentVolumeMode, storageOpt storageOption, attachCmd string) { pvcName := "test-pvc" storageClassName := "kubevirt" pvc := pvcSpec(pvcName, storageClassName, "10Mi") @@ -258,8 +376,11 @@ var _ = Describe("CreatePVC", func() { }, time.Second*30, time.Second).Should(Equal(k8sv1.ClaimBound)) volumeName := pvc.Spec.VolumeName - podSpec := podCreationFunc(pvc.Name) - pod := runPod(tenantClient.CoreV1(), namespace, podSpec) + podSpec := createPod("test-pod", + storageOpt(pvc.Name), + withCommand(attachCmd)) + + pod := runPod(tenantClient.CoreV1(), namespace, podSpec, true) pod, err = tenantClient.CoreV1().Pods(namespace).Get(context.Background(), pod.Name, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) Expect(pod.Status.Phase).To(BeElementOf(k8sv1.PodSucceeded, k8sv1.PodRunning)) @@ -283,8 +404,8 @@ var _ = Describe("CreatePVC", func() { return errors.IsNotFound(err) }, 1*time.Minute, 2*time.Second).Should(BeTrue(), "infra pvc should disappear") }, - Entry("Filesystem volume mode", k8sv1.PersistentVolumeFilesystem, attacherPodFs), - Entry("Block volume mode", k8sv1.PersistentVolumeBlock, attacherPodBlock), + Entry("Filesystem volume mode", Label("FS"), k8sv1.PersistentVolumeFilesystem, withFileSystem, fsAttachCommand), + Entry("Block volume mode", Label("Block"), k8sv1.PersistentVolumeBlock, withBlock, blockAttachCommand), ) Context("Should prevent access to volumes from infra cluster", func() { @@ -393,7 +514,8 @@ var _ = Describe("CreatePVC", func() { } tenantPVC, err = tenantClient.CoreV1().PersistentVolumeClaims(namespace).Create(context.Background(), tenantPVC, metav1.CreateOptions{}) Expect(err).ToNot(HaveOccurred()) - pod := writerPodFs(tenantPVC.Name) + pod := createPod("reader-pod", withFileSystem(tenantPVC.Name), withCommand(fsWriteCommand)) + By("Creating pod that attempts to use the specially crafted PVC") pod, err = tenantClient.CoreV1().Pods(namespace).Create(context.Background(), pod, metav1.CreateOptions{}) Expect(err).ToNot(HaveOccurred()) @@ -418,174 +540,6 @@ var _ = Describe("CreatePVC", func() { }) }) -func writerPodFs(volumeName string) *k8sv1.Pod { - return podWithFilesystemPvcSpec("writer-pod", - volumeName, - []string{"sh"}, - []string{"-c", "echo testing > /opt/test.txt"}) -} - -func readerPodFs(pvcName string) *k8sv1.Pod { - return podWithFilesystemPvcSpec("reader-pod", - pvcName, - []string{"sh"}, - []string{"-c", "cat /opt/test.txt"}) -} - -func writerPodBlock(volumeName string) *k8sv1.Pod { - return podWithBlockPvcSpec("writer-pod", - volumeName, - []string{"sh"}, - []string{"-c", "echo testing > /dev/csi"}) -} - -func readerPodBlock(pvcName string) *k8sv1.Pod { - return podWithBlockPvcSpec("reader-pod", - pvcName, - []string{"sh"}, - []string{"-c", "head -c 8 /dev/csi"}) -} - -func attacherPodFs(pvcName string) *k8sv1.Pod { - return podWithFilesystemPvcSpec("test-pod", - pvcName, - []string{"sh"}, - []string{"-c", "ls -la /opt && echo kubevirt-csi-driver && mktemp /opt/test-XXXXXX"}) -} - -func attacherPodBlock(pvcName string) *k8sv1.Pod { - return podWithBlockPvcSpec("test-pod", - pvcName, - []string{"sh"}, - []string{"-c", "ls -al /dev/csi"}) -} - -func podWithoutPVCSpec(podName string, cmd, args []string) *k8sv1.Pod { - image := "busybox" - return &k8sv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: podName, - }, - Spec: k8sv1.PodSpec{ - SecurityContext: &k8sv1.PodSecurityContext{ - SeccompProfile: &k8sv1.SeccompProfile{ - Type: k8sv1.SeccompProfileTypeRuntimeDefault, - }, - }, - RestartPolicy: k8sv1.RestartPolicyNever, - Containers: []k8sv1.Container{ - { - SecurityContext: &k8sv1.SecurityContext{ - Capabilities: &k8sv1.Capabilities{ - Drop: []k8sv1.Capability{ - "ALL", - }, - }, - }, - Name: podName, - Image: image, - Command: cmd, - Args: args, - }, - }, - // add toleration so we can use control node for tests - Tolerations: []k8sv1.Toleration{ - { - Key: "node-role.kubernetes.io/master", - Operator: k8sv1.TolerationOpExists, - Effect: k8sv1.TaintEffectNoSchedule, - }, - { - Key: "node-role.kubernetes.io/control-plane", - Operator: k8sv1.TolerationOpExists, - Effect: k8sv1.TaintEffectNoSchedule, - }, - }, - }, - } -} - -func podWithBlockPvcSpec(podName, pvcName string, cmd, args []string) *k8sv1.Pod { - podSpec := podWithoutPVCSpec(podName, cmd, args) - volumeName := "blockpv" - podSpec.Spec.Volumes = append(podSpec.Spec.Volumes, k8sv1.Volume{ - Name: volumeName, - VolumeSource: k8sv1.VolumeSource{ - PersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{ - ClaimName: pvcName, - }, - }, - }) - podSpec.Spec.Containers[0].VolumeDevices = []k8sv1.VolumeDevice{ - { - Name: volumeName, - DevicePath: "/dev/csi", - }, - } - return podSpec -} - -func podWithFilesystemPvcSpec(podName, pvcName string, cmd, args []string) *k8sv1.Pod { - podSpec := podWithoutPVCSpec(podName, cmd, args) - volumeName := "fspv" - podSpec.Spec.Volumes = append(podSpec.Spec.Volumes, k8sv1.Volume{ - Name: volumeName, - VolumeSource: k8sv1.VolumeSource{ - PersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{ - ClaimName: pvcName, - }, - }, - }) - podSpec.Spec.Containers[0].VolumeMounts = []k8sv1.VolumeMount{ - { - Name: volumeName, - MountPath: "/opt", - }, - } - return podSpec -} - -func addPvc(podSpec *k8sv1.Pod, pvcName string, mountPath string) *k8sv1.Pod { - volumeName := pvcName - podSpec.Spec.Volumes = append( - podSpec.Spec.Volumes, - k8sv1.Volume{ - Name: volumeName, - VolumeSource: k8sv1.VolumeSource{ - PersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{ - ClaimName: pvcName, - }, - }, - }) - if len(podSpec.Spec.Containers[0].VolumeMounts) > 0 { - podSpec = addVolumeMount(podSpec, volumeName, mountPath) - } - if len(podSpec.Spec.Containers[0].VolumeDevices) > 0 { - podSpec = addVolumeDevice(podSpec, volumeName) - } - return podSpec -} - -func addVolumeMount(podSpec *k8sv1.Pod, volumeName string, mountPath string) *k8sv1.Pod { - podSpec.Spec.Containers[0].VolumeMounts = append( - podSpec.Spec.Containers[0].VolumeMounts, - k8sv1.VolumeMount{ - Name: volumeName, - MountPath: mountPath, - }) - return podSpec -} - -func addVolumeDevice(podSpec *k8sv1.Pod, volumeName string) *k8sv1.Pod { - podSpec.Spec.Containers[0].VolumeDevices = append( - podSpec.Spec.Containers[0].VolumeDevices, - k8sv1.VolumeDevice{ - Name: volumeName, - DevicePath: fmt.Sprintf("/dev/%s", volumeName), - }) - return podSpec -} - func pvcSpec(pvcName, storageClassName, size string) *k8sv1.PersistentVolumeClaim { quantity, err := resource.ParseQuantity(size) Expect(err).ToNot(HaveOccurred()) @@ -606,23 +560,21 @@ func pvcSpec(pvcName, storageClassName, size string) *k8sv1.PersistentVolumeClai return pvc } -func runPod(client v1.CoreV1Interface, namespace string, pod *k8sv1.Pod) *k8sv1.Pod { +func runPod(client v1.CoreV1Interface, namespace string, pod *k8sv1.Pod, waitComplete bool) *k8sv1.Pod { pod, err := client.Pods(namespace).Create(context.Background(), pod, metav1.CreateOptions{}) Expect(err).ToNot(HaveOccurred()) + expectedPhase := k8sv1.PodSucceeded + if !waitComplete { + expectedPhase = k8sv1.PodRunning + } By("Wait for pod to reach a completed phase") - Eventually(func() error { + Eventually(func(g Gomega) k8sv1.PodPhase { pod, err = client.Pods(namespace).Get(context.Background(), pod.Name, metav1.GetOptions{}) - if err != nil { - return err - } - // TODO: change command and wait for completed/succeeded - if pod.Status.Phase != k8sv1.PodSucceeded { - return fmt.Errorf("Pod in phase %s, expected Succeeded", pod.Status.Phase) - } + g.Expect(err).ToNot(HaveOccurred()) + return pod.Status.Phase + }, 3*time.Minute, 5*time.Second).Should(Equal(expectedPhase), "Pod should reach Succeeded state") - return nil - }, 3*time.Minute, 5*time.Second).Should(Succeed(), "Pod should reach Succeeded state") //Ensure we don't see couldn't find device by serial id in pod event log. events, err := client.Events(namespace).List(context.Background(), metav1.ListOptions{FieldSelector: fmt.Sprintf("involvedObject.name=%s", pod.Name), TypeMeta: metav1.TypeMeta{Kind: "Pod"}}) Expect(err).ToNot(HaveOccurred()) @@ -633,6 +585,18 @@ func runPod(client v1.CoreV1Interface, namespace string, pod *k8sv1.Pod) *k8sv1. return pod } +func runPodAndExpectPending(client v1.CoreV1Interface, namespace string, pod *k8sv1.Pod) { + pod, err := client.Pods(namespace).Create(context.Background(), pod, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func(g Gomega) k8sv1.PodPhase { + pod, err = client.Pods(namespace).Get(context.Background(), pod.Name, metav1.GetOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + + return pod.Status.Phase + }).WithTimeout(60*time.Second).WithPolling(5*time.Second).Should(Equal(k8sv1.PodPending), "Pod should never reach Succeeded state") +} + func deletePod(client v1.CoreV1Interface, ns, podName string) { By("Delete pod") zero := int64(0) diff --git a/e2e/create_pod_helper_test.go b/e2e/create_pod_helper_test.go new file mode 100644 index 00000000..1a7416d7 --- /dev/null +++ b/e2e/create_pod_helper_test.go @@ -0,0 +1,187 @@ +package e2e_test + +import ( + "fmt" + + k8sv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + fsCommonFile = "/opt/test.txt" + fsWriteCommand = "echo testing > " + fsCommonFile + fsReadCommand = "cat " + fsCommonFile + fsAttachCommand = "ls -la /opt && echo kubevirt-csi-driver && mktemp /opt/test-XXXXXX" + + blockCommonFile = "/dev/csi" + blockWriteCommand = "echo testing > " + blockCommonFile + blockReadCommand = "head -c 8 " + blockCommonFile + blockAttachCommand = "ls -al /dev/csi" +) + +type podOption func(pod *k8sv1.Pod) +type storageOption func(string) podOption + +func createPod(podName string, opts ...podOption) *k8sv1.Pod { + pod := &k8sv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: podName, + }, + Spec: k8sv1.PodSpec{ + SecurityContext: &k8sv1.PodSecurityContext{ + SeccompProfile: &k8sv1.SeccompProfile{ + Type: k8sv1.SeccompProfileTypeRuntimeDefault, + }, + }, + RestartPolicy: k8sv1.RestartPolicyNever, + Containers: []k8sv1.Container{ + { + SecurityContext: &k8sv1.SecurityContext{ + Capabilities: &k8sv1.Capabilities{ + Drop: []k8sv1.Capability{ + "ALL", + }, + }, + }, + Name: podName, + Image: "busybox", + }, + }, + // add toleration so we can use control node for tests + Tolerations: []k8sv1.Toleration{ + { + Key: "node-role.kubernetes.io/master", + Operator: k8sv1.TolerationOpExists, + Effect: k8sv1.TaintEffectNoSchedule, + }, + { + Key: "node-role.kubernetes.io/control-plane", + Operator: k8sv1.TolerationOpExists, + Effect: k8sv1.TaintEffectNoSchedule, + }, + }, + }, + } + + for _, o := range opts { + o(pod) + } + + return pod +} + +func withCommand(cmd string) podOption { + return func(pod *k8sv1.Pod) { + pod.Spec.Containers[0].Command = []string{"sh"} + pod.Spec.Containers[0].Args = []string{"-c", cmd} + } +} + +func withBlock(pvcName string) podOption { + const volumeName = "blockpv" + return func(pod *k8sv1.Pod) { + pod.Spec.Volumes = append(pod.Spec.Volumes, getVolume(volumeName, pvcName)) + pod.Spec.Containers[0].VolumeDevices = []k8sv1.VolumeDevice{ + { + Name: volumeName, + DevicePath: "/dev/csi", + }, + } + } +} + +func withFileSystem(pvcName string) podOption { + const volumeName = "fspv" + return func(pod *k8sv1.Pod) { + pod.Spec.Volumes = append(pod.Spec.Volumes, getVolume(volumeName, pvcName)) + pod.Spec.Containers[0].VolumeMounts = []k8sv1.VolumeMount{ + { + Name: volumeName, + MountPath: "/opt", + }, + } + } +} + +func withNodeSelector(key, value string) podOption { + return func(pod *k8sv1.Pod) { + if pod.Spec.NodeSelector == nil { + pod.Spec.NodeSelector = make(map[string]string) + } + pod.Spec.NodeSelector[key] = value + } +} + +func withLabel(key, value string) podOption { + return func(pod *k8sv1.Pod) { + if pod.Labels == nil { + pod.Labels = make(map[string]string) + } + pod.Labels[key] = value + } +} + +func withPodAntiAffinity(key, value string) podOption { + return func(pod *k8sv1.Pod) { + pod.Spec.Affinity = &k8sv1.Affinity{ + PodAntiAffinity: &k8sv1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []k8sv1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: key, + Operator: metav1.LabelSelectorOpIn, + Values: []string{value}, + }, + }, + }, + TopologyKey: hostNameLabelKey, + }, + }, + }, + } + } +} + +func getVolume(volumeName, pvcName string) k8sv1.Volume { + return k8sv1.Volume{ + Name: volumeName, + VolumeSource: k8sv1.VolumeSource{ + PersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvcName, + }, + }, + } +} + +func withPVC(pvcName string, mountPath string) podOption { + return func(pod *k8sv1.Pod) { + pod.Spec.Volumes = append(pod.Spec.Volumes, getVolume(pvcName, pvcName)) + if len(pod.Spec.Containers[0].VolumeMounts) > 0 { + addVolumeMount(pod, pvcName, mountPath) + } + if len(pod.Spec.Containers[0].VolumeDevices) > 0 { + addVolumeDevice(pod, pvcName) + } + + } +} + +func addVolumeMount(podSpec *k8sv1.Pod, volumeName string, mountPath string) { + podSpec.Spec.Containers[0].VolumeMounts = append( + podSpec.Spec.Containers[0].VolumeMounts, + k8sv1.VolumeMount{ + Name: volumeName, + MountPath: mountPath, + }) +} + +func addVolumeDevice(podSpec *k8sv1.Pod, volumeName string) { + podSpec.Spec.Containers[0].VolumeDevices = append( + podSpec.Spec.Containers[0].VolumeDevices, + k8sv1.VolumeDevice{ + Name: volumeName, + DevicePath: fmt.Sprintf("/dev/%s", volumeName), + }) +} diff --git a/e2e/snapshot_test.go b/e2e/snapshot_test.go index 0f9a749b..3cd55fbe 100644 --- a/e2e/snapshot_test.go +++ b/e2e/snapshot_test.go @@ -6,6 +6,7 @@ import ( "os" "time" + snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" k8sv1 "k8s.io/api/core/v1" @@ -13,9 +14,8 @@ import ( "k8s.io/apimachinery/pkg/util/rand" "k8s.io/client-go/kubernetes" "k8s.io/utils/ptr" - snapcli "kubevirt.io/csi-driver/pkg/generated/external-snapshotter/client-go/clientset/versioned" - snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" + snapcli "kubevirt.io/csi-driver/pkg/generated/external-snapshotter/client-go/clientset/versioned" ) var _ = Describe("Snapshot", func() { @@ -59,7 +59,7 @@ var _ = Describe("Snapshot", func() { _ = os.RemoveAll(tmpDir) }) - DescribeTable("creates a pvc and attaches to pod, then create snapshot", Label("pvcCreation"), func(volumeMode k8sv1.PersistentVolumeMode, podCreationFunc, podReaderFunc func(string) *k8sv1.Pod) { + DescribeTable("creates a pvc and attaches to pod, then create snapshot", Label("pvcCreation", "snapshot"), func(volumeMode k8sv1.PersistentVolumeMode, storageOpt storageOption, writeCmd, readCmd string) { pvcName := "test-pvc" storageClassName := "kubevirt" pvc := pvcSpec(pvcName, storageClassName, "10Mi") @@ -70,10 +70,15 @@ var _ = Describe("Snapshot", func() { Expect(err).ToNot(HaveOccurred()) By("creating a pod that attaches pvc") + wPod := createPod("writer-pod", + withCommand(writeCmd), + storageOpt(pvc.Name), + ) runPod( tenantClient.CoreV1(), namespace, - podCreationFunc(pvc.Name)) + wPod, + true) By("creating a snapshot") snapshotName := "test-snapshot" @@ -125,13 +130,17 @@ var _ = Describe("Snapshot", func() { Expect(err).ToNot(HaveOccurred()) By("creating a pod that attaches the restored pvc, and checks the changes are there") + rPod := createPod("reader-pod", + withCommand(readCmd), + storageOpt(pvc.Name)) runPod( tenantClient.CoreV1(), namespace, - podReaderFunc(pvc.Name)) + rPod, + true) }, - Entry("Filesystem volume mode", k8sv1.PersistentVolumeFilesystem, writerPodFs, readerPodFs), - Entry("Block volume mode", k8sv1.PersistentVolumeBlock, writerPodBlock, readerPodBlock), + Entry("Filesystem volume mode", Label("FS"), k8sv1.PersistentVolumeFilesystem, withFileSystem, fsWriteCommand, fsReadCommand), + Entry("Block volume mode", Label("Block"), k8sv1.PersistentVolumeBlock, withBlock, blockWriteCommand, blockReadCommand), ) }) diff --git a/hack/cluster-sync-split.sh b/hack/cluster-sync-split.sh index 944617b8..2bc9e683 100755 --- a/hack/cluster-sync-split.sh +++ b/hack/cluster-sync-split.sh @@ -101,5 +101,8 @@ _kubectl apply --kustomize ./deploy/controller-infra/dev-overlay # ****************************************************** # Wait for driver to rollout # ****************************************************** +_kubectl_tenant rollout restart ds/kubevirt-csi-node -n $CSI_DRIVER_NAMESPACE +_kubectl rollout restart deployment/kubevirt-csi-controller -n $TENANT_CLUSTER_NAMESPACE + _kubectl_tenant rollout status ds/kubevirt-csi-node -n $CSI_DRIVER_NAMESPACE --timeout=10m _kubectl rollout status deployment/kubevirt-csi-controller -n $TENANT_CLUSTER_NAMESPACE --timeout=10m diff --git a/hack/cluster-up.sh b/hack/cluster-up.sh index cf7dcae5..8723fc37 100755 --- a/hack/cluster-up.sh +++ b/hack/cluster-up.sh @@ -16,7 +16,7 @@ echo "Creating $TENANT_CLUSTER_NAMESPACE" ./kubevirtci create-cluster echo "Waiting for $TENANT_CLUSTER_NAMESPACE vmis to be ready" -./kubevirtci kubectl wait --for=condition=Ready vmi -l capk.cluster.x-k8s.io/kubevirt-machine-namespace=$TENANT_CLUSTER_NAMESPACE -n $TENANT_CLUSTER_NAMESPACE +./kubevirtci kubectl wait --for=condition=Ready vmi -l capk.cluster.x-k8s.io/kubevirt-machine-namespace=$TENANT_CLUSTER_NAMESPACE -n $TENANT_CLUSTER_NAMESPACE --timeout=300s echo "Installing networking (calico)" ./kubevirtci install-calico diff --git a/hack/generate_clients.sh b/hack/generate_clients.sh index 80421fc7..26313ae3 100755 --- a/hack/generate_clients.sh +++ b/hack/generate_clients.sh @@ -4,7 +4,6 @@ set -o nounset set -o pipefail go install k8s.io/code-generator/cmd/client-gen@latest -go get kubevirt.io/api client-gen --input-base="kubevirt.io/api/" --input="core/v1" --output-package="kubevirt.io/csi-driver/pkg/generated/kubevirt/client-go/clientset" --output-base="../../" --clientset-name="versioned" --go-header-file hack/boilerplate.go.txt go get kubevirt.io/containerized-data-importer-api diff --git a/hack/run-e2e.sh b/hack/run-e2e.sh index 055aca66..8535bac6 100755 --- a/hack/run-e2e.sh +++ b/hack/run-e2e.sh @@ -30,8 +30,24 @@ if [ ! -f "${KUBECTL_PATH}" ]; then curl -L "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" -o ${KUBECTL_PATH} chmod u+x ${KUBECTL_PATH} fi - + +GINKGO_LABELS="" +if [[ -n ${LABELS} ]]; then + GINKGO_LABELS="--ginkgo.label-filter=${LABELS}" +fi + rm -rf $TEST_WORKING_DIR mkdir -p $TEST_WORKING_DIR -$BIN_DIR/e2e.test -ginkgo.v -test.v -ginkgo.no-color --kubectl-path $KUBECTL_PATH --clusterctl-path $CLUSTERCTL_PATH --virtctl-path $VIRTCTL_PATH --working-dir $TEST_WORKING_DIR --dump-path $DUMP_PATH --infra-kubeconfig=$KUBECONFIG --infra-cluster-namespace=${INFRA_CLUSTER_NAMESPACE} +$BIN_DIR/e2e.test \ + -ginkgo.v \ + -test.v \ + -ginkgo.no-color \ + --kubectl-path "${KUBECTL_PATH}" \ + --clusterctl-path "${CLUSTERCTL_PATH}" \ + --virtctl-path "${VIRTCTL_PATH}" \ + --working-dir "${TEST_WORKING_DIR}" \ + --dump-path "${DUMP_PATH}" \ + --infra-kubeconfig="${KUBECONFIG}" \ + --infra-cluster-namespace="${INFRA_CLUSTER_NAMESPACE}" \ + ${GINKGO_LABELS} diff --git a/hack/run-k8s-e2e.sh b/hack/run-k8s-e2e.sh index dc324daf..3ab2e779 100755 --- a/hack/run-k8s-e2e.sh +++ b/hack/run-k8s-e2e.sh @@ -48,7 +48,7 @@ function ensure_synced { function create_test_driver_cm { echo "Creating test-driver CM" - ./kubevirtci kubectl create configmap -n $TENANT_CLUSTER_NAMESPACE $test_driver_cm --from-file=./hack/test-driver.yaml + ./kubevirtci kubectl create configmap -n $TENANT_CLUSTER_NAMESPACE $test_driver_cm --from-file=./hack/test-driver.yaml --from-file=./hack/test-driver-rwx.yaml } function create_capk_secret { @@ -95,17 +95,46 @@ spec: - -c - | cd /tmp - curl --location https://dl.k8s.io/v1.26.0/kubernetes-test-linux-amd64.tar.gz | tar --strip-components=3 -zxf - kubernetes/test/bin/e2e.test kubernetes/test/bin/ginkgo + curl --location https://dl.k8s.io/v1.26.0/kubernetes-test-linux-amd64.tar.gz | tar --strip-components=3 -zxf - kubernetes/test/bin/e2e.test kubernetes/test/bin/ginkgo chmod +x e2e.test curl -LO "https://dl.k8s.io/release/v1.26.0/bin/linux/amd64/kubectl" chmod +x kubectl - echo \$TEST_DRIVER_PATH - ./e2e.test -kubeconfig \${KUBECONFIG} -kubectl-path ./kubectl -ginkgo.v -ginkgo.timeout=2h -ginkgo.focus='External.Storage.*csi.kubevirt.io.*' -ginkgo.skip='CSI Ephemeral-volume*' -ginkgo.skip='SELinuxMountReadWriteOncePod.*' -storage.testdriver=\${TEST_DRIVER_PATH}/test-driver.yaml -provider=local -report-dir=/tmp - ret=\$? + echo \${TEST_DRIVER_PATH} + ./e2e.test -kubeconfig \${KUBECONFIG} \ + -kubectl-path ./kubectl \ + -ginkgo.v \ + -ginkgo.timeout=2h \ + -ginkgo.focus='External.Storage.*csi.kubevirt.io.*' \ + -ginkgo.skip='CSI Ephemeral-volume*' \ + -ginkgo.skip='SELinuxMountReadWriteOncePod.*' \ + -storage.testdriver=\${TEST_DRIVER_PATH}/test-driver.yaml \ + -provider=local -report-dir=/tmp + ret1=\$? + if [[ \${ret1} -ne 0 ]]; then + echo "kubernetes e2e test failed" + fi + ./e2e.test -kubeconfig \${KUBECONFIG} \ + -kubectl-path ./kubectl \ + -ginkgo.v \ + -ginkgo.timeout=2h \ + -ginkgo.focus='External.Storage.*csi.kubevirt.io.*should concurrently access the single volume from pods on different node.*' \ + -ginkgo.skip='CSI Ephemeral-volume*' \ + -ginkgo.skip='SELinuxMountReadWriteOncePod' \ + -ginkgo.skip='\((?:xfs|filesystem volmode|ntfs|ext4)\).* multiVolume \[Slow]' \ + -storage.testdriver=\${TEST_DRIVER_PATH}/test-driver-rwx.yaml \ + -report-prefix="rwx_" \ + -provider=local -report-dir=/tmp + ret2=\$? + if [[ \${ret2} -ne 0 ]]; then + echo "kubernetes e2e RWX test failed" + fi while [ ! -f /tmp/exit.txt ]; do sleep 2 done - exit \$ret + if [[ \${ret1} -ne 0 ]]; then + exit \ret1 + fi + exit \$ret2 volumeMounts: - name: kubeconfig mountPath: "/etc/kubernetes/kubeconfig" @@ -135,9 +164,16 @@ if ./kubevirtci kubectl get storageprofile local; then fi } +function make_control_plane_schedulable() { + for node in $(./kubevirtci kubectl-tenant get nodes -l node-role.kubernetes.io/control-plane -o custom-columns=:.metadata.name --no-headers 2>/dev/null | tail -n +2); do + ./kubevirtci kubectl-tenant patch node --type=json -p '[{"op": "remove", "path": "/spec/taints"}]' "${node}" | tail -n +2 || true + done +} + trap cleanup EXIT SIGSTOP SIGKILL SIGTERM ensure_cluster_up ensure_synced +make_control_plane_schedulable create_test_driver_cm create_capk_secret patch_local_storage_profile @@ -150,9 +186,14 @@ while [[ ! $(./kubevirtci kubectl exec -n $TENANT_CLUSTER_NAMESPACE ${test_pod} sleep 30 done +while [[ ! $(./kubevirtci kubectl exec -n $TENANT_CLUSTER_NAMESPACE ${test_pod} -- ls /tmp/junit_rwx_01.xml 2>/dev/null) ]]; do + sleep 30 +done + if [[ -n "$ARTIFACTS" ]]; then echo "Copying results" - ./kubevirtci kubectl cp ${TENANT_CLUSTER_NAMESPACE}/${test_pod}:/tmp/junit_01.xml $ARTIFACTS/junit.functest.xml + ./kubevirtci kubectl cp "${TENANT_CLUSTER_NAMESPACE}/${test_pod}:/tmp/junit_01.xml" "${ARTIFACTS}/junit.functest.xml" + ./kubevirtci kubectl cp "${TENANT_CLUSTER_NAMESPACE}/${test_pod}:/tmp/junit_rwx_01.xml" "${ARTIFACTS}/junit.functest-rwx.xml" fi ./kubevirtci kubectl exec -n $TENANT_CLUSTER_NAMESPACE ${test_pod} -- touch /tmp/exit.txt diff --git a/hack/test-driver-rwx.yaml b/hack/test-driver-rwx.yaml new file mode 100644 index 00000000..5b1f61af --- /dev/null +++ b/hack/test-driver-rwx.yaml @@ -0,0 +1,25 @@ +StorageClass: + FromName: false + FromExistingClassName: kubevirt +SnapshotClass: + FromName: true +DriverInfo: + Name: csi.kubevirt.io + Capabilities: + block: true + controllerExpansion: false + exec: true + fsGroup: true + multipods: true + nodeExpansion: false + persistence: true + singleNodeVolume: false + snapshotDataSource: true + topology: false + capacity: false + RWX: true + RequiredAccessModes: + - ReadWriteMany +InlineVolumes: +- shared: false + diff --git a/hack/test-driver.yaml b/hack/test-driver.yaml index 56ad2e00..087c411e 100644 --- a/hack/test-driver.yaml +++ b/hack/test-driver.yaml @@ -17,6 +17,7 @@ DriverInfo: snapshotDataSource: true topology: false capacity: false + RWX: false SupportedFsType: ext4: {} xfs: {} diff --git a/pkg/service/controller.go b/pkg/service/controller.go index 372c0c8d..ede46c4e 100644 --- a/pkg/service/controller.go +++ b/pkg/service/controller.go @@ -15,8 +15,10 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" + "k8s.io/utils/ptr" kubevirtv1 "kubevirt.io/api/core/v1" cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" @@ -51,36 +53,64 @@ var controllerCaps = []csi.ControllerServiceCapability_RPC_Type{ csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS, } -func (c *ControllerService) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error { +func (c *ControllerService) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) (bool, error) { if req == nil { - return status.Error(codes.InvalidArgument, "missing request") + return false, status.Error(codes.InvalidArgument, "missing request") } // Check arguments if len(req.GetName()) == 0 { - return status.Error(codes.InvalidArgument, "name missing in request") + return false, status.Error(codes.InvalidArgument, "name missing in request") } caps := req.GetVolumeCapabilities() + if caps == nil { - return status.Error(codes.InvalidArgument, "volume capabilities missing in request") + return false, status.Error(codes.InvalidArgument, "volume capabilities missing in request") + } + + isBlock, isRWX := getAccessMode(caps) + + if isRWX && !isBlock { + return false, status.Error(codes.InvalidArgument, "non-block volume with RWX access mode is not supported") } if c.storageClassEnforcement.AllowAll { - return nil + return isRWX, nil } storageClassName := req.Parameters[infraStorageClassNameParameter] if storageClassName == "" { if c.storageClassEnforcement.AllowDefault { - return nil + return isRWX, nil } else { - return unallowedStorageClass + return false, unallowedStorageClass } } if !util.Contains(c.storageClassEnforcement.AllowList, storageClassName) { - return unallowedStorageClass + return false, unallowedStorageClass } - return nil + return isRWX, nil +} + +func getAccessMode(caps []*csi.VolumeCapability) (bool, bool) { + isBlock := false + isRWX := false + + for _, capability := range caps { + if capability != nil { + if capability.GetBlock() != nil { + isBlock = true + } + + if am := capability.GetAccessMode(); am != nil { + if am.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER { + isRWX = true + } + } + } + } + + return isBlock, isRWX } // CreateVolume Create a new DataVolume. @@ -90,7 +120,8 @@ func (c *ControllerService) CreateVolume(ctx context.Context, req *csi.CreateVol if req != nil { klog.V(3).Infof("Create Volume Request: %+v", *req) } - if err := c.validateCreateVolumeRequest(req); err != nil { + isRWX, err := c.validateCreateVolumeRequest(req) + if err != nil { return nil, err } @@ -107,22 +138,40 @@ func (c *ControllerService) CreateVolume(ctx context.Context, req *csi.CreateVol } // Create DataVolume object - dv := &cdiv1.DataVolume{} - dv.Name = dvName - dv.Namespace = c.infraClusterNamespace - dv.Kind = "DataVolume" - dv.APIVersion = cdiv1.SchemeGroupVersion.String() - dv.ObjectMeta.Labels = c.infraClusterLabels - dv.ObjectMeta.Annotations = map[string]string{ - "cdi.kubevirt.io/storage.deleteAfterCompletion": "false", - } - dv.Spec.Storage = &cdiv1.StorageSpec{ - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceStorage: *resource.NewScaledQuantity(storageSize, 0)}, + source, err := c.determineDvSource(ctx, req) + if err != nil { + return nil, err + } + + dv := &cdiv1.DataVolume{ + TypeMeta: v1.TypeMeta{ + Kind: "DataVolume", + APIVersion: cdiv1.SchemeGroupVersion.String(), + }, + ObjectMeta: v1.ObjectMeta{ + Name: dvName, + Namespace: c.infraClusterNamespace, + Labels: c.infraClusterLabels, + Annotations: map[string]string{ + "cdi.kubevirt.io/storage.deleteAfterCompletion": "false", + }, + }, + Spec: cdiv1.DataVolumeSpec{ + Storage: &cdiv1.StorageSpec{ + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: *resource.NewScaledQuantity(storageSize, 0)}, + }, + }, + Source: source, }, } + if isRWX { + dv.Spec.Storage.VolumeMode = ptr.To(corev1.PersistentVolumeBlock) + dv.Spec.Storage.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany} + } + // Only set the storageclass if it is defined. Otherwise we use the // default storage class which means leaving the storageclass empty // (nil) on the PVC @@ -130,14 +179,9 @@ func (c *ControllerService) CreateVolume(ctx context.Context, req *csi.CreateVol dv.Spec.Storage.StorageClassName = &storageClassName } - var err error - dv.Spec.Source, err = c.determineDvSource(ctx, req) - if err != nil { - return nil, err - } - if existingDv, err := c.virtClient.GetDataVolume(ctx, c.infraClusterNamespace, dvName); errors.IsNotFound(err) { // Create DataVolume + klog.Infof("creating new DataVolume %s/%s", c.infraClusterNamespace, req.Name) dv, err = c.virtClient.CreateDataVolume(ctx, c.infraClusterNamespace, dv) if err != nil { klog.Error("failed creating DataVolume " + dvName) diff --git a/pkg/service/controller_test.go b/pkg/service/controller_test.go index 1033f7ef..fb98364e 100644 --- a/pkg/service/controller_test.go +++ b/pkg/service/controller_test.go @@ -19,6 +19,7 @@ import ( "k8s.io/utils/ptr" kubevirtv1 "kubevirt.io/api/core/v1" cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" + "kubevirt.io/csi-driver/pkg/util" . "github.com/onsi/ginkgo/v2" @@ -38,7 +39,7 @@ var _ = Describe("StorageClass", func() { client := &ControllerClientMock{} controller := ControllerService{client, testInfraNamespace, testInfraLabels, storageClassEnforcement} - response, err := controller.CreateVolume(context.TODO(), getCreateVolumeRequest()) + response, err := controller.CreateVolume(context.TODO(), getCreateVolumeRequest(getVolumeCapability(corev1.PersistentVolumeFilesystem, csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER))) Expect(err).ToNot(HaveOccurred()) Expect(testVolumeName).To(Equal(response.GetVolume().GetVolumeId())) Expect(testDataVolumeUID).To(Equal(response.GetVolume().VolumeContext[serialParameter])) @@ -48,24 +49,57 @@ var _ = Describe("StorageClass", func() { }) var _ = Describe("CreateVolume", func() { - It("should successfully create a volume", func() { + DescribeTable("should successfully create a volume", func(cap *csi.VolumeCapability, expectedAC *corev1.PersistentVolumeAccessMode, expectedVolumeMode *corev1.PersistentVolumeMode) { client := &ControllerClientMock{} controller := ControllerService{client, testInfraNamespace, testInfraLabels, storageClassEnforcement} - response, err := controller.CreateVolume(context.TODO(), getCreateVolumeRequest()) + request := getCreateVolumeRequest(cap) + response, err := controller.CreateVolume(context.TODO(), request) Expect(err).ToNot(HaveOccurred()) - Expect(testVolumeName).To(Equal(response.GetVolume().GetVolumeId())) - Expect(testDataVolumeUID).To(Equal(response.GetVolume().VolumeContext[serialParameter])) - Expect(string(getBusType())).To(Equal(response.GetVolume().VolumeContext[busParameter])) - Expect(testVolumeStorageSize).To(Equal(response.GetVolume().GetCapacityBytes())) + Expect(response.GetVolume().GetVolumeId()).To(Equal(testVolumeName)) + Expect(response.GetVolume().VolumeContext[serialParameter]).To(Equal(testDataVolumeUID)) + Expect(response.GetVolume().VolumeContext[busParameter]).To(Equal(string(getBusType()))) + Expect(response.GetVolume().GetCapacityBytes()).To(Equal(testVolumeStorageSize)) + + dv, err := client.GetDataVolume(context.TODO(), testInfraNamespace, request.Name) + Expect(err).ToNot(HaveOccurred()) + Expect(dv).ToNot(BeNil()) + + if expectedAC != nil { + Expect(dv.Spec.Storage).ToNot(BeNil()) + Expect(dv.Spec.Storage.AccessModes).ToNot(BeEmpty()) + Expect(dv.Spec.Storage.AccessModes[0]).To(Equal(*expectedAC)) + } else if dv.Spec.Storage != nil { + Expect(dv.Spec.Storage.AccessModes).To(BeEmpty()) + } + + if expectedVolumeMode != nil { + Expect(dv.Spec.Storage).ToNot(BeNil()) + Expect(dv.Spec.Storage.VolumeMode).To(HaveValue(Equal(*expectedVolumeMode))) + } else if dv.Spec.Storage != nil { + Expect(dv.Spec.Storage.VolumeMode).To(BeNil()) + } + }, + Entry("volume mode = block; [RWX]", getVolumeCapability(corev1.PersistentVolumeBlock, csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER), ptr.To(corev1.ReadWriteMany), ptr.To(corev1.PersistentVolumeBlock)), + Entry("volume mode = block; [RWO]", getVolumeCapability(corev1.PersistentVolumeBlock, csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER), nil, nil), + Entry("volume mode = filesystem; [RWO]", getVolumeCapability(corev1.PersistentVolumeFilesystem, csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER), nil, nil), + ) + + It("should reject create volume request for FS & RWX", func() { + client := &ControllerClientMock{} + controller := ControllerService{client, testInfraNamespace, testInfraLabels, storageClassEnforcement} + + response, err := controller.CreateVolume(context.TODO(), getCreateVolumeRequest(getVolumeCapability(corev1.PersistentVolumeFilesystem, csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER))) + Expect(err).To(MatchError(ContainSubstring("non-block volume with RWX access mode is not supported"))) + Expect(response).To(BeNil()) }) It("should propagate error from CreateVolume", func() { client := &ControllerClientMock{FailCreateDataVolume: true} controller := ControllerService{client, testInfraNamespace, testInfraLabels, storageClassEnforcement} - _, err := controller.CreateVolume(context.TODO(), getCreateVolumeRequest()) + _, err := controller.CreateVolume(context.TODO(), getCreateVolumeRequest(getVolumeCapability(corev1.PersistentVolumeFilesystem, csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER))) Expect(err).To(HaveOccurred()) }) @@ -76,15 +110,13 @@ var _ = Describe("CreateVolume", func() { busTypeLocal := kubevirtv1.DiskBusVirtio testBusType = &busTypeLocal - response, err := controller.CreateVolume(context.TODO(), getCreateVolumeRequest()) + response, err := controller.CreateVolume(context.TODO(), getCreateVolumeRequest(getVolumeCapability(corev1.PersistentVolumeFilesystem, csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER))) Expect(err).ToNot(HaveOccurred()) Expect(response.GetVolume().GetVolumeContext()[busParameter]).To(Equal(string(busTypeLocal))) }) It("should not allow storage class not in the allow list", func() { client := &ControllerClientMock{} - allowedStorageClasses = []string{"allowedClass"} - allowAllStorageClasses = false storageClassEnforcement = util.StorageClassEnforcement{ AllowList: []string{"allowedClass"}, AllowAll: false, @@ -92,7 +124,7 @@ var _ = Describe("CreateVolume", func() { } controller := ControllerService{client, testInfraNamespace, testInfraLabels, storageClassEnforcement} - request := getCreateVolumeRequest() + request := getCreateVolumeRequest(getVolumeCapability(corev1.PersistentVolumeFilesystem, csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER)) request.Parameters[infraStorageClassNameParameter] = "notAllowedClass" _, err := controller.CreateVolume(context.TODO(), request) @@ -474,11 +506,8 @@ var ( testVMName = "test-vm" testVMUID = "6fc9c805-b3a0-570b-9d1b-3b8b9cfc9fb7" testDataVolumeUID = "2d0111d5-494f-4731-8f67-122b27d3c366" - testVolumeMode = corev1.PersistentVolumeFilesystem testBusType *kubevirtv1.DiskBus = nil // nil==do not pass bus type testInfraLabels = map[string]string{"infra-label-name": "infra-label-value"} - allowedStorageClasses = []string{} - allowAllStorageClasses = true storageClassEnforcement = util.StorageClassEnforcement{ AllowAll: true, AllowDefault: true, @@ -493,20 +522,29 @@ func getBusType() kubevirtv1.DiskBus { } } -func getCreateVolumeRequest() *csi.CreateVolumeRequest { - +func getVolumeCapability(volumeMode corev1.PersistentVolumeMode, accessModes csi.VolumeCapability_AccessMode_Mode) *csi.VolumeCapability { var volumeCapability *csi.VolumeCapability - if testVolumeMode == corev1.PersistentVolumeFilesystem { + if volumeMode == corev1.PersistentVolumeFilesystem { volumeCapability = &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{}, } } else { volumeCapability = &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Block{}, + AccessType: &csi.VolumeCapability_Block{ + Block: &csi.VolumeCapability_BlockVolume{}, + }, } } + volumeCapability.AccessMode = &csi.VolumeCapability_AccessMode{ + Mode: accessModes, + } + + return volumeCapability +} + +func getCreateVolumeRequest(volumeCapability *csi.VolumeCapability) *csi.CreateVolumeRequest { parameters := map[string]string{} if testInfraStorageClassName != "" { parameters[infraStorageClassNameParameter] = testInfraStorageClassName @@ -652,6 +690,11 @@ func (c *ControllerClientMock) CreateDataVolume(_ context.Context, namespace str result.SetUID(types.UID(testDataVolumeUID)) + if c.datavolumes == nil { + c.datavolumes = make(map[string]*cdiv1.DataVolume) + } + c.datavolumes[getKey(namespace, dataVolume.Name)] = dataVolume + return result, nil } func (c *ControllerClientMock) GetDataVolume(_ context.Context, namespace string, name string) (*cdiv1.DataVolume, error) {