Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix the 'printf: non-constant format string in call to fmt.Errorf (govet)' lint errors #666

Merged
merged 1 commit into from
Oct 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion .github/workflows/golangci-lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,4 +20,3 @@ jobs:
with:
version: v1.61.0
args: --timeout=5m
only-new-issues: true
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ tidy:

.PHONY: lint
lint: bin/golangci-lint ## Run golangci-lint linter
$(GOLANGCI_LINT) run --new-from-rev=origin/master
$(GOLANGCI_LINT) run

# Generate deploy/v2beta1/mpi-operator.yaml
manifest: kustomize crd
Expand All @@ -146,7 +146,7 @@ bin/envtest: bin ## Download envtest-setup locally if necessary.
@GOBIN=$(PROJECT_DIR)/bin go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest

bin/kubectl: bin
curl -L -o $(PROJECT_DIR)/bin/kubectl https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/$(GOOS)/$(GOARCH)/kubectl
curl -L -o $(PROJECT_DIR)/bin/kubectl https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/$(GOOS)/$(GOARCH)/kubectl
chmod +x $(PROJECT_DIR)/bin/kubectl

.PHONY: kind
Expand Down
41 changes: 21 additions & 20 deletions pkg/controller/mpi_job_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"crypto/rand"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"reflect"
"sort"
Expand All @@ -34,7 +35,7 @@ import (
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
Expand Down Expand Up @@ -363,7 +364,7 @@ func NewMPIJobControllerWithClock(
// Pipe to default handler first, which just logs the error
cache.DefaultWatchErrorHandler(r, err)

if errors.IsUnauthorized(err) || errors.IsForbidden(err) {
if apierrors.IsUnauthorized(err) || apierrors.IsForbidden(err) {
klog.Fatalf("Unable to sync cache for informer %s: %s. Requesting controller to exit.", name, err)
}
})
Expand Down Expand Up @@ -568,7 +569,7 @@ func (c *MPIJobController) syncHandler(key string) error {
sharedJob, err := c.mpiJobLister.MPIJobs(namespace).Get(name)
if err != nil {
// The MPIJob may no longer exist, in which case we stop processing.
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
klog.V(4).Infof("MPIJob has been deleted: %v", key)
return nil
}
Expand Down Expand Up @@ -718,7 +719,7 @@ func cleanUpWorkerPods(mpiJob *kubeflow.MPIJob, c *MPIJobController) error {
// getLauncherJob gets the launcher Job controlled by this MPIJob.
func (c *MPIJobController) getLauncherJob(mpiJob *kubeflow.MPIJob) (*batchv1.Job, error) {
launcher, err := c.jobLister.Jobs(mpiJob.Namespace).Get(mpiJob.Name + launcherSuffix)
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
return nil, nil
}
if err != nil {
Expand All @@ -733,7 +734,7 @@ func (c *MPIJobController) getLauncherJob(mpiJob *kubeflow.MPIJob) (*batchv1.Job
if !metav1.IsControlledBy(launcher, mpiJob) {
msg := fmt.Sprintf(MessageResourceExists, launcher.Name, launcher.Kind)
c.recorder.Event(mpiJob, corev1.EventTypeWarning, ErrResourceExists, msg)
return launcher, fmt.Errorf(msg)
return launcher, errors.New(msg)
}

return launcher, nil
Expand All @@ -744,7 +745,7 @@ func (c *MPIJobController) getOrCreatePodGroups(mpiJob *kubeflow.MPIJob) (metav1
newPodGroup := c.PodGroupCtrl.newPodGroup(mpiJob)
podGroup, err := c.PodGroupCtrl.getPodGroup(newPodGroup.GetNamespace(), newPodGroup.GetName())
// If the PodGroup doesn't exist, we'll create it.
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
return c.PodGroupCtrl.createPodGroup(context.TODO(), newPodGroup)
}
// If an error occurs during Get/Create, we'll requeue the item so we
Expand All @@ -758,7 +759,7 @@ func (c *MPIJobController) getOrCreatePodGroups(mpiJob *kubeflow.MPIJob) (metav1
if !metav1.IsControlledBy(podGroup, mpiJob) {
msg := fmt.Sprintf(MessageResourceExists, podGroup.GetName(), "PodGroup")
c.recorder.Event(mpiJob, corev1.EventTypeWarning, ErrResourceExists, msg)
return nil, fmt.Errorf(msg)
return nil, errors.New(msg)
}

if !c.PodGroupCtrl.pgSpecsAreEqual(podGroup, newPodGroup) {
Expand All @@ -771,7 +772,7 @@ func (c *MPIJobController) getOrCreatePodGroups(mpiJob *kubeflow.MPIJob) (metav1
func (c *MPIJobController) deletePodGroups(mpiJob *kubeflow.MPIJob) error {
podGroup, err := c.PodGroupCtrl.getPodGroup(mpiJob.Namespace, mpiJob.Name)
if err != nil {
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
return nil
}
return err
Expand All @@ -782,7 +783,7 @@ func (c *MPIJobController) deletePodGroups(mpiJob *kubeflow.MPIJob) error {
if !metav1.IsControlledBy(podGroup, mpiJob) {
msg := fmt.Sprintf(MessageResourceExists, podGroup.GetName(), "PodGroup")
c.recorder.Event(mpiJob, corev1.EventTypeWarning, ErrResourceExists, msg)
return fmt.Errorf(msg)
return errors.New(msg)
}

// If the PodGroup exist, we'll delete it.
Expand Down Expand Up @@ -843,7 +844,7 @@ func (c *MPIJobController) getOrCreateConfigMap(mpiJob *kubeflow.MPIJob) (*corev

cm, err := c.configMapLister.ConfigMaps(mpiJob.Namespace).Get(mpiJob.Name + configSuffix)
// If the ConfigMap doesn't exist, we'll create it.
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
return c.kubeClient.CoreV1().ConfigMaps(mpiJob.Namespace).Create(context.TODO(), newCM, metav1.CreateOptions{})
}
if err != nil {
Expand All @@ -855,7 +856,7 @@ func (c *MPIJobController) getOrCreateConfigMap(mpiJob *kubeflow.MPIJob) (*corev
if !metav1.IsControlledBy(cm, mpiJob) {
msg := fmt.Sprintf(MessageResourceExists, cm.Name, cm.Kind)
c.recorder.Event(mpiJob, corev1.EventTypeWarning, ErrResourceExists, msg)
return nil, fmt.Errorf(msg)
return nil, errors.New(msg)
}

// If the ConfigMap is changed, update it
Expand All @@ -873,7 +874,7 @@ func (c *MPIJobController) getOrCreateConfigMap(mpiJob *kubeflow.MPIJob) (*corev

func (c *MPIJobController) getOrCreateService(job *kubeflow.MPIJob, newSvc *corev1.Service) (*corev1.Service, error) {
svc, err := c.serviceLister.Services(job.Namespace).Get(newSvc.Name)
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
return c.kubeClient.CoreV1().Services(job.Namespace).Create(context.TODO(), newSvc, metav1.CreateOptions{})
}
if err != nil {
Expand All @@ -882,7 +883,7 @@ func (c *MPIJobController) getOrCreateService(job *kubeflow.MPIJob, newSvc *core
if !metav1.IsControlledBy(svc, job) {
msg := fmt.Sprintf(MessageResourceExists, svc.Name, svc.Kind)
c.recorder.Event(job, corev1.EventTypeWarning, ErrResourceExists, msg)
return nil, fmt.Errorf(msg)
return nil, errors.New(msg)
}

// If the Service selector is changed, update it.
Expand All @@ -899,7 +900,7 @@ func (c *MPIJobController) getOrCreateService(job *kubeflow.MPIJob, newSvc *core
// or create one if it doesn't exist.
func (c *MPIJobController) getOrCreateSSHAuthSecret(job *kubeflow.MPIJob) (*corev1.Secret, error) {
secret, err := c.secretLister.Secrets(job.Namespace).Get(job.Name + sshAuthSecretSuffix)
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
secret, err := newSSHAuthSecret(job)
if err != nil {
return nil, err
Expand All @@ -912,7 +913,7 @@ func (c *MPIJobController) getOrCreateSSHAuthSecret(job *kubeflow.MPIJob) (*core
if !metav1.IsControlledBy(secret, job) {
msg := fmt.Sprintf(MessageResourceExists, secret.Name, secret.Kind)
c.recorder.Event(job, corev1.EventTypeWarning, ErrResourceExists, msg)
return nil, fmt.Errorf(msg)
return nil, errors.New(msg)
}
newSecret, err := newSSHAuthSecret(job)
if err != nil {
Expand Down Expand Up @@ -977,7 +978,7 @@ func (c *MPIJobController) getOrCreateWorker(mpiJob *kubeflow.MPIJob) ([]*corev1
pod, err := c.podLister.Pods(mpiJob.Namespace).Get(workerName(mpiJob, i))

// If the worker Pod doesn't exist, we'll create it.
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
worker := c.newWorker(mpiJob, i)
pod, err = c.kubeClient.CoreV1().Pods(mpiJob.Namespace).Create(context.TODO(), worker, metav1.CreateOptions{})
}
Expand All @@ -993,7 +994,7 @@ func (c *MPIJobController) getOrCreateWorker(mpiJob *kubeflow.MPIJob) ([]*corev1
if pod != nil && !metav1.IsControlledBy(pod, mpiJob) {
msg := fmt.Sprintf(MessageResourceExists, pod.Name, pod.Kind)
c.recorder.Event(mpiJob, corev1.EventTypeWarning, ErrResourceExists, msg)
return nil, fmt.Errorf(msg)
return nil, errors.New(msg)
}
workerPods = append(workerPods, pod)
}
Expand Down Expand Up @@ -1024,15 +1025,15 @@ func (c *MPIJobController) deleteWorkerPods(mpiJob *kubeflow.MPIJob) error {
pod, err := c.podLister.Pods(mpiJob.Namespace).Get(name)

// If the worker Pod doesn't exist, no need to remove it.
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
continue
}
// If the worker is not controlled by this MPIJob resource, we should log
// a warning to the event recorder and return.
if pod != nil && !metav1.IsControlledBy(pod, mpiJob) {
msg := fmt.Sprintf(MessageResourceExists, pod.Name, pod.Kind)
c.recorder.Event(mpiJob, corev1.EventTypeWarning, ErrResourceExists, msg)
return fmt.Errorf(msg)
return errors.New(msg)
}
// If the worker pod is not running and cleanupPolicy is
// set to CleanPodPolicyRunning, keep the pod.
Expand All @@ -1043,7 +1044,7 @@ func (c *MPIJobController) deleteWorkerPods(mpiJob *kubeflow.MPIJob) error {
continue
}
err = c.kubeClient.CoreV1().Pods(mpiJob.Namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
if err != nil && !apierrors.IsNotFound(err) {
klog.Errorf("Failed to delete pod[%s/%s]: %v", mpiJob.Namespace, name, err)
return err
}
Expand Down
Loading