diff --git a/pkg/controller/mpi_job_controller.go b/pkg/controller/mpi_job_controller.go index 8f19a89ae..0958668ae 100644 --- a/pkg/controller/mpi_job_controller.go +++ b/pkg/controller/mpi_job_controller.go @@ -53,7 +53,6 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/klog" "k8s.io/utils/clock" - "k8s.io/utils/pointer" "k8s.io/utils/ptr" schedclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned" volcanoclient "volcano.sh/apis/pkg/client/clientset/versioned" @@ -673,7 +672,7 @@ func (c *MPIJobController) syncHandler(key string) error { if launcher != nil { if isMPIJobSuspended(mpiJob) != isJobSuspended(launcher) { // align the suspension state of launcher with the MPIJob - launcher.Spec.Suspend = pointer.Bool(isMPIJobSuspended(mpiJob)) + launcher.Spec.Suspend = ptr.To(isMPIJobSuspended(mpiJob)) if _, err := c.kubeClient.BatchV1().Jobs(namespace).Update(context.TODO(), launcher, metav1.UpdateOptions{}); err != nil { return err } @@ -998,11 +997,11 @@ func (c *MPIJobController) getOrCreateWorker(mpiJob *kubeflow.MPIJob) ([]*corev1 } func isMPIJobSuspended(mpiJob *kubeflow.MPIJob) bool { - return pointer.BoolDeref(mpiJob.Spec.RunPolicy.Suspend, false) + return ptr.Deref(mpiJob.Spec.RunPolicy.Suspend, false) } func isJobSuspended(job *batchv1.Job) bool { - return pointer.BoolDeref(job.Spec.Suspend, false) + return ptr.Deref(job.Spec.Suspend, false) } func (c *MPIJobController) deleteWorkerPods(mpiJob *kubeflow.MPIJob) error { @@ -1486,7 +1485,7 @@ func (c *MPIJobController) newLauncherJob(mpiJob *kubeflow.MPIJob) *batchv1.Job }, } if isMPIJobSuspended(mpiJob) { - job.Spec.Suspend = pointer.Bool(true) + job.Spec.Suspend = ptr.To(true) } return job } diff --git a/pkg/controller/mpi_job_controller_test.go b/pkg/controller/mpi_job_controller_test.go index dbc164d3c..e4d9e1a36 100644 --- a/pkg/controller/mpi_job_controller_test.go +++ b/pkg/controller/mpi_job_controller_test.go @@ -37,7 +37,6 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/utils/clock" clocktesting "k8s.io/utils/clock/testing" - "k8s.io/utils/pointer" "k8s.io/utils/ptr" schedv1alpha1 "sigs.k8s.io/scheduler-plugins/apis/scheduling/v1alpha1" schedclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned" @@ -804,7 +803,7 @@ func TestCreateSuspendedMPIJob(t *testing.T) { // create a suspended job var replicas int32 = 8 mpiJob := newMPIJob("test", &replicas, nil, nil) - mpiJob.Spec.RunPolicy.Suspend = pointer.Bool(true) + mpiJob.Spec.RunPolicy.Suspend = ptr.To(true) mpiJob.Spec.MPIImplementation = implementation f.setUpMPIJob(mpiJob) @@ -823,7 +822,7 @@ func TestCreateSuspendedMPIJob(t *testing.T) { // expect creating of the launcher fmjc := f.newFakeMPIJobController() launcher := fmjc.newLauncherJob(mpiJob) - launcher.Spec.Suspend = pointer.Bool(true) + launcher.Spec.Suspend = ptr.To(true) f.expectCreateJobAction(launcher) // expect an update to add the conditions @@ -851,7 +850,7 @@ func TestSuspendedRunningMPIJob(t *testing.T) { var replicas int32 = 8 startTime := metav1.Now() mpiJob := newMPIJob("test", &replicas, &startTime, nil) - mpiJob.Spec.RunPolicy.Suspend = pointer.Bool(false) + mpiJob.Spec.RunPolicy.Suspend = ptr.To(false) msg := fmt.Sprintf("MPIJob %s/%s is created.", mpiJob.Namespace, mpiJob.Name) updateMPIJobConditions(mpiJob, kubeflow.JobCreated, corev1.ConditionTrue, mpiJobCreatedReason, msg) msg = fmt.Sprintf("MPIJob %s/%s is running.", mpiJob.Namespace, mpiJob.Name) @@ -893,18 +892,18 @@ func TestSuspendedRunningMPIJob(t *testing.T) { // setup launcher and its pod launcher := fmjc.newLauncherJob(mpiJob) - launcher.Spec.Suspend = pointer.Bool(false) + launcher.Spec.Suspend = ptr.To(false) launcherPod := mockJobPod(launcher) launcherPod.Status.Phase = corev1.PodRunning f.setUpLauncher(launcher) f.setUpPod(launcherPod) // transition the MPIJob into suspended state - mpiJob.Spec.RunPolicy.Suspend = pointer.Bool(true) + mpiJob.Spec.RunPolicy.Suspend = ptr.To(true) // expect moving the launcher pod into suspended state launcherCopy := launcher.DeepCopy() - launcherCopy.Spec.Suspend = pointer.Bool(true) + launcherCopy.Spec.Suspend = ptr.To(true) f.expectUpdateJobAction(launcherCopy) // expect removal of the pods @@ -939,7 +938,7 @@ func TestResumeMPIJob(t *testing.T) { var replicas int32 = 8 startTime := metav1.Now() mpiJob := newMPIJob("test", &replicas, &startTime, nil) - mpiJob.Spec.RunPolicy.Suspend = pointer.Bool(true) + mpiJob.Spec.RunPolicy.Suspend = ptr.To(true) msg := fmt.Sprintf("MPIJob %s/%s is created.", mpiJob.Namespace, mpiJob.Name) updateMPIJobConditions(mpiJob, kubeflow.JobCreated, corev1.ConditionTrue, mpiJobCreatedReason, msg) updateMPIJobConditions(mpiJob, kubeflow.JobSuspended, corev1.ConditionTrue, mpiJobSuspendedReason, "MPIJob suspended") @@ -966,14 +965,14 @@ func TestResumeMPIJob(t *testing.T) { // expect creating of the launcher fmjc := f.newFakeMPIJobController() launcher := fmjc.newLauncherJob(mpiJob) - launcher.Spec.Suspend = pointer.Bool(true) + launcher.Spec.Suspend = ptr.To(true) f.setUpLauncher(launcher) // move the timer by a second so that the StartTime is updated after resume fakeClock.Sleep(time.Second) // resume the MPIJob - mpiJob.Spec.RunPolicy.Suspend = pointer.Bool(false) + mpiJob.Spec.RunPolicy.Suspend = ptr.To(false) // expect creation of the pods for i := 0; i < int(replicas); i++ { @@ -983,7 +982,7 @@ func TestResumeMPIJob(t *testing.T) { // expect the launcher update to resume it launcherCopy := launcher.DeepCopy() - launcherCopy.Spec.Suspend = pointer.Bool(false) + launcherCopy.Spec.Suspend = ptr.To(false) f.expectUpdateJobAction(launcherCopy) // expect an update to add the conditions @@ -1545,7 +1544,7 @@ func TestNewConfigMap(t *testing.T) { }, Spec: kubeflow.MPIJobSpec{ MPIImplementation: kubeflow.MPIImplementationOpenMPI, - RunLauncherAsWorker: pointer.Bool(true), + RunLauncherAsWorker: ptr.To(true), }, }, workerReplicas: 2, @@ -1570,7 +1569,7 @@ func TestNewConfigMap(t *testing.T) { }, Spec: kubeflow.MPIJobSpec{ MPIImplementation: kubeflow.MPIImplementationOpenMPI, - RunLauncherAsWorker: pointer.Bool(true), + RunLauncherAsWorker: ptr.To(true), }, }, workerReplicas: 0, @@ -1618,7 +1617,7 @@ func TestNewConfigMap(t *testing.T) { Namespace: "project-x", }, Spec: kubeflow.MPIJobSpec{ - SlotsPerWorker: pointer.Int32(10), + SlotsPerWorker: ptr.To[int32](10), MPIImplementation: kubeflow.MPIImplementationIntel, }, }, @@ -1643,7 +1642,7 @@ func TestNewConfigMap(t *testing.T) { Namespace: "project-x", }, Spec: kubeflow.MPIJobSpec{ - SlotsPerWorker: pointer.Int32(10), + SlotsPerWorker: ptr.To[int32](10), MPIImplementation: kubeflow.MPIImplementationMPICH, }, }, diff --git a/pkg/controller/podgroup.go b/pkg/controller/podgroup.go index c01d691b8..7bf105b2d 100644 --- a/pkg/controller/podgroup.go +++ b/pkg/controller/podgroup.go @@ -25,7 +25,7 @@ import ( schedulinglisters "k8s.io/client-go/listers/scheduling/v1" "k8s.io/client-go/tools/cache" "k8s.io/klog" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" schedv1alpha1 "sigs.k8s.io/scheduler-plugins/apis/scheduling/v1alpha1" schedclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned" schedinformers "sigs.k8s.io/scheduler-plugins/pkg/generated/informers/externalversions" @@ -242,7 +242,7 @@ func (s *SchedulerPluginsCtrl) newPodGroup(mpiJob *kubeflow.MPIJob) metav1.Objec if mpiJob == nil { return nil } - scheduleTimeoutSec := pointer.Int32(0) + scheduleTimeoutSec := ptr.To[int32](0) if schedPolicy := mpiJob.Spec.RunPolicy.SchedulingPolicy; schedPolicy != nil && schedPolicy.ScheduleTimeoutSeconds != nil { scheduleTimeoutSec = schedPolicy.ScheduleTimeoutSeconds } @@ -364,9 +364,9 @@ func calPGMinResource(minMember *int32, mpiJob *kubeflow.MPIJob, pcLister schedu klog.Warningf("Couldn't find the worker replicas") return nil } - order[wIndex].Replicas = pointer.Int32(*minMember - 1) + order[wIndex].Replicas = ptr.To(*minMember - 1) } else { - order[1].Replicas = pointer.Int32(*minMember - 1) + order[1].Replicas = ptr.To(*minMember - 1) } } @@ -390,7 +390,7 @@ func calculateMinAvailable(mpiJob *kubeflow.MPIJob) *int32 { if schedulingPolicy := mpiJob.Spec.RunPolicy.SchedulingPolicy; schedulingPolicy != nil && schedulingPolicy.MinAvailable != nil { return schedulingPolicy.MinAvailable } - return pointer.Int32(workerReplicas(mpiJob) + 1) + return ptr.To(workerReplicas(mpiJob) + 1) } // calculatePriorityClassName calculates the priorityClass name needed for podGroup according to the following priorities: diff --git a/pkg/controller/podgroup_test.go b/pkg/controller/podgroup_test.go index 2301b8aaa..9ce3d96df 100644 --- a/pkg/controller/podgroup_test.go +++ b/pkg/controller/podgroup_test.go @@ -25,7 +25,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/clock" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" schedv1alpha1 "sigs.k8s.io/scheduler-plugins/apis/scheduling/v1alpha1" volcanov1beta1 "volcano.sh/apis/pkg/apis/scheduling/v1beta1" @@ -57,16 +57,16 @@ func TestNewPodGroup(t *testing.T) { Spec: kubeflow.MPIJobSpec{ RunPolicy: kubeflow.RunPolicy{ SchedulingPolicy: &kubeflow.SchedulingPolicy{ - MinAvailable: pointer.Int32(2), + MinAvailable: ptr.To[int32](2), Queue: "project-y", PriorityClass: "high", MinResources: minResources, - ScheduleTimeoutSeconds: pointer.Int32(100), + ScheduleTimeoutSeconds: ptr.To[int32](100), }, }, MPIReplicaSpecs: map[kubeflow.MPIReplicaType]*kubeflow.ReplicaSpec{ kubeflow.MPIReplicaTypeLauncher: { - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{{ @@ -81,7 +81,7 @@ func TestNewPodGroup(t *testing.T) { }, }, kubeflow.MPIReplicaTypeWorker: { - Replicas: pointer.Int32(1000), + Replicas: ptr.To[int32](1000), Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{{ @@ -124,7 +124,7 @@ func TestNewPodGroup(t *testing.T) { Spec: schedv1alpha1.PodGroupSpec{ MinMember: 2, MinResources: *minResources, - ScheduleTimeoutSeconds: pointer.Int32(100), + ScheduleTimeoutSeconds: ptr.To[int32](100), }, }, }, @@ -139,7 +139,7 @@ func TestNewPodGroup(t *testing.T) { Spec: kubeflow.MPIJobSpec{ MPIReplicaSpecs: map[kubeflow.MPIReplicaType]*kubeflow.ReplicaSpec{ kubeflow.MPIReplicaTypeLauncher: { - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ PriorityClassName: "high", @@ -155,7 +155,7 @@ func TestNewPodGroup(t *testing.T) { }, }, kubeflow.MPIReplicaTypeWorker: { - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{{ @@ -200,7 +200,7 @@ func TestNewPodGroup(t *testing.T) { }, Spec: schedv1alpha1.PodGroupSpec{ MinMember: 3, - ScheduleTimeoutSeconds: pointer.Int32(0), + ScheduleTimeoutSeconds: ptr.To[int32](0), MinResources: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("21"), corev1.ResourceMemory: resource.MustParse("42Gi"), @@ -406,7 +406,7 @@ func TestCalculatePGMinResources(t *testing.T) { Spec: kubeflow.MPIJobSpec{ MPIReplicaSpecs: map[kubeflow.MPIReplicaType]*kubeflow.ReplicaSpec{ kubeflow.MPIReplicaTypeLauncher: { - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -423,7 +423,7 @@ func TestCalculatePGMinResources(t *testing.T) { }, }, kubeflow.MPIReplicaTypeWorker: { - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -492,7 +492,7 @@ func TestCalculatePGMinResources(t *testing.T) { Name: "test", }, }, - minMember: pointer.Int32(0), + minMember: ptr.To[int32](0), want: nil, }, "without priorityClass": { @@ -503,7 +503,7 @@ func TestCalculatePGMinResources(t *testing.T) { Spec: kubeflow.MPIJobSpec{ MPIReplicaSpecs: map[kubeflow.MPIReplicaType]*kubeflow.ReplicaSpec{ kubeflow.MPIReplicaTypeLauncher: { - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -520,7 +520,7 @@ func TestCalculatePGMinResources(t *testing.T) { }, }, kubeflow.MPIReplicaTypeWorker: { - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -553,7 +553,7 @@ func TestCalculatePGMinResources(t *testing.T) { }, }, "with non-existence priorityClass": { - minMember: pointer.Int32(2), + minMember: ptr.To[int32](2), job: &kubeflow.MPIJob{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -561,7 +561,7 @@ func TestCalculatePGMinResources(t *testing.T) { Spec: kubeflow.MPIJobSpec{ MPIReplicaSpecs: map[kubeflow.MPIReplicaType]*kubeflow.ReplicaSpec{ kubeflow.MPIReplicaTypeLauncher: { - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ PriorityClassName: "non-existence", @@ -579,7 +579,7 @@ func TestCalculatePGMinResources(t *testing.T) { }, }, kubeflow.MPIReplicaTypeWorker: { - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ PriorityClassName: "non-existence", @@ -619,7 +619,7 @@ func TestCalculatePGMinResources(t *testing.T) { Value: 10_010, }, }, - minMember: pointer.Int32(2), + minMember: ptr.To[int32](2), job: &kubeflow.MPIJob{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -627,7 +627,7 @@ func TestCalculatePGMinResources(t *testing.T) { Spec: kubeflow.MPIJobSpec{ MPIReplicaSpecs: map[kubeflow.MPIReplicaType]*kubeflow.ReplicaSpec{ kubeflow.MPIReplicaTypeLauncher: { - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ PriorityClassName: "high", @@ -645,7 +645,7 @@ func TestCalculatePGMinResources(t *testing.T) { }, }, kubeflow.MPIReplicaTypeWorker: { - Replicas: pointer.Int32(100), + Replicas: ptr.To[int32](100), Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ PriorityClassName: "low", @@ -774,15 +774,15 @@ func TestCalculateMinAvailable(t *testing.T) { Spec: kubeflow.MPIJobSpec{ RunPolicy: kubeflow.RunPolicy{ SchedulingPolicy: &kubeflow.SchedulingPolicy{ - MinAvailable: pointer.Int32(2), + MinAvailable: ptr.To[int32](2), }, }, MPIReplicaSpecs: map[kubeflow.MPIReplicaType]*kubeflow.ReplicaSpec{ kubeflow.MPIReplicaTypeLauncher: { - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), }, kubeflow.MPIReplicaTypeWorker: { - Replicas: pointer.Int32(1000), + Replicas: ptr.To[int32](1000), }, }, }, @@ -797,10 +797,10 @@ func TestCalculateMinAvailable(t *testing.T) { Spec: kubeflow.MPIJobSpec{ MPIReplicaSpecs: map[kubeflow.MPIReplicaType]*kubeflow.ReplicaSpec{ kubeflow.MPIReplicaTypeLauncher: { - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), }, kubeflow.MPIReplicaTypeWorker: { - Replicas: pointer.Int32(99), + Replicas: ptr.To[int32](99), }, }, }, diff --git a/test/e2e/mpi_job_test.go b/test/e2e/mpi_job_test.go index 878fff8b3..c8919d944 100644 --- a/test/e2e/mpi_job_test.go +++ b/test/e2e/mpi_job_test.go @@ -29,7 +29,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" schedv1alpha1 "sigs.k8s.io/scheduler-plugins/apis/scheduling/v1alpha1" kubeflow "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1" @@ -109,7 +109,7 @@ var _ = ginkgo.Describe("MPIJob", func() { ginkgo.When("suspended on creation", func() { ginkgo.BeforeEach(func() { - mpiJob.Spec.RunPolicy.Suspend = pointer.Bool(true) + mpiJob.Spec.RunPolicy.Suspend = ptr.To(true) }) ginkgo.It("should not create pods when suspended and succeed when resumed", func() { ctx := context.Background() @@ -494,7 +494,7 @@ var _ = ginkgo.Describe("MPIJob", func() { }) func resumeJob(ctx context.Context, mpiJob *kubeflow.MPIJob) *kubeflow.MPIJob { - mpiJob.Spec.RunPolicy.Suspend = pointer.Bool(false) + mpiJob.Spec.RunPolicy.Suspend = ptr.To(false) ginkgo.By("Resuming MPIJob") mpiJob, err := mpiClient.KubeflowV2beta1().MPIJobs(mpiJob.Namespace).Update(ctx, mpiJob, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) diff --git a/test/integration/mpi_job_controller_test.go b/test/integration/mpi_job_controller_test.go index 87c7eaf6c..4b20402f5 100644 --- a/test/integration/mpi_job_controller_test.go +++ b/test/integration/mpi_job_controller_test.go @@ -32,7 +32,7 @@ import ( kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/reference" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" schedv1alpha1 "sigs.k8s.io/scheduler-plugins/apis/scheduling/v1alpha1" schedclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned" volcanov1beta1 "volcano.sh/apis/pkg/apis/scheduling/v1beta1" @@ -321,7 +321,7 @@ func TestMPIJobResumingAndSuspending(t *testing.T) { SlotsPerWorker: newInt32(1), RunPolicy: kubeflow.RunPolicy{ CleanPodPolicy: kubeflow.NewCleanPodPolicy(kubeflow.CleanPodPolicyRunning), - Suspend: pointer.Bool(true), + Suspend: ptr.To(true), }, MPIReplicaSpecs: map[kubeflow.MPIReplicaType]*kubeflow.ReplicaSpec{ kubeflow.MPIReplicaTypeLauncher: { @@ -389,7 +389,7 @@ func TestMPIJobResumingAndSuspending(t *testing.T) { s.events.verify(t) // 2. Resume the MPIJob - mpiJob.Spec.RunPolicy.Suspend = pointer.Bool(false) + mpiJob.Spec.RunPolicy.Suspend = ptr.To(false) mpiJob, err = s.mpiClient.KubeflowV2beta1().MPIJobs(mpiJob.Namespace).Update(ctx, mpiJob, metav1.UpdateOptions{}) if err != nil { t.Fatalf("Failed to resume the MPIJob: %v", err) @@ -445,7 +445,7 @@ func TestMPIJobResumingAndSuspending(t *testing.T) { s.events.verify(t) // 4. Suspend the running MPIJob - mpiJob.Spec.RunPolicy.Suspend = pointer.Bool(true) + mpiJob.Spec.RunPolicy.Suspend = ptr.To(true) mpiJob, err = s.mpiClient.KubeflowV2beta1().MPIJobs(mpiJob.Namespace).Update(ctx, mpiJob, metav1.UpdateOptions{}) if err != nil { t.Fatalf("Failed to suspend the MPIJob: %v", err) @@ -614,7 +614,7 @@ func TestMPIJobWithSchedulerPlugins(t *testing.T) { RunPolicy: kubeflow.RunPolicy{ CleanPodPolicy: kubeflow.NewCleanPodPolicy(kubeflow.CleanPodPolicyRunning), SchedulingPolicy: &kubeflow.SchedulingPolicy{ - ScheduleTimeoutSeconds: pointer.Int32(900), + ScheduleTimeoutSeconds: ptr.To[int32](900), }, }, MPIReplicaSpecs: map[kubeflow.MPIReplicaType]*kubeflow.ReplicaSpec{ @@ -1166,7 +1166,7 @@ func mpiJobHasConditionWithStatus(job *kubeflow.MPIJob, cond kubeflow.JobConditi } func isJobSuspended(job *batchv1.Job) bool { - return pointer.BoolDeref(job.Spec.Suspend, false) + return ptr.Deref(job.Spec.Suspend, false) } func newInt32(v int32) *int32 {