Skip to content

Commit

Permalink
fix some issues reported by golint
Browse files Browse the repository at this point in the history
Signed-off-by: rayoluo <[email protected]>
  • Loading branch information
rayoluo committed Jun 27, 2023
1 parent ed5c215 commit 5592c8a
Show file tree
Hide file tree
Showing 13 changed files with 39 additions and 36 deletions.
4 changes: 2 additions & 2 deletions pkg/scheduler/api/devices/nvidia/gpushare/device_info.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ func (gs *GPUDevices) GetIgnoredDevices() []string {
return []string{""}
}

// AddGPUResource adds the pod to GPU pool if it is assigned
// AddResource adds the pod to GPU pool if it is assigned
func (gs *GPUDevices) AddResource(pod *v1.Pod) {
gpuRes := getGPUMemoryOfPod(pod)
if gpuRes > 0 {
Expand All @@ -107,7 +107,7 @@ func (gs *GPUDevices) AddResource(pod *v1.Pod) {
}
}

// SubGPUResource frees the gpu hold by the pod
// SubResource frees the gpu hold by the pod
func (gs *GPUDevices) SubResource(pod *v1.Pod) {
gpuRes := getGPUMemoryOfPod(pod)
if gpuRes > 0 {
Expand Down
4 changes: 2 additions & 2 deletions pkg/scheduler/api/devices/nvidia/vgpu/device_info.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ func (gs *GPUDevices) GetIgnoredDevices() []string {
return []string{VolcanoVGPUMemory, VolcanoVGPUMemoryPercentage, VolcanoVGPUCores}
}

// AddGPUResource adds the pod to GPU pool if it is assigned
// AddResource adds the pod to GPU pool if it is assigned
func (gs *GPUDevices) AddResource(pod *v1.Pod) {
ids, ok := pod.Annotations[AssignedIDsAnnotations]
if !ok {
Expand All @@ -141,7 +141,7 @@ func (gs *GPUDevices) AddResource(pod *v1.Pod) {
}
}

// SubGPUResource frees the gpu hold by the pod
// SubResource frees the gpu hold by the pod
func (gs *GPUDevices) SubResource(pod *v1.Pod) {
ids, ok := pod.Annotations[AssignedIDsAnnotations]
if !ok {
Expand Down
10 changes: 5 additions & 5 deletions pkg/scheduler/api/devices/nvidia/vgpu/type.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,17 +31,17 @@ const (

NvidiaGPUDevice = "NVIDIA"

// VolcanoGPUMemory extended gpu memory
// VolcanoVGPUMemory extended gpu memory
VolcanoVGPUMemory = "volcano.sh/vgpu-memory"
// VolcanoMemoryPercentage extends gpu memory
// VolcanoVGPUMemoryPercentage extends gpu memory
VolcanoVGPUMemoryPercentage = "volcano.sh/vgpu-memory-percentage"
// VolcanoVGPUcores indicates utilization percentage of vgpu
// VolcanoVGPUCores indicates utilization percentage of vgpu
VolcanoVGPUCores = "volcano.sh/vgpu-cores"
// VolcanoGPUNumber virtual GPU card number
// VolcanoVGPUNumber virtual GPU card number
VolcanoVGPUNumber = "volcano.sh/vgpu-number"
// VolcanoVGPURegister virtual gpu information registered from device-plugin to scheduler
VolcanoVGPURegister = "volcano.sh/node-vgpu-register"
// Volcanohandshake for vgpu
// VolcanoVGPUHandshake for vgpu
VolcanoVGPUHandshake = "volcano.sh/node-vgpu-handshake"

// PredicateTime is the key of predicate time
Expand Down
7 changes: 4 additions & 3 deletions pkg/scheduler/api/node_info.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
k8sframework "k8s.io/kubernetes/pkg/scheduler/framework"

"volcano.sh/apis/pkg/apis/scheduling/v1beta1"

"volcano.sh/volcano/pkg/scheduler/api/devices/nvidia/gpushare"
"volcano.sh/volcano/pkg/scheduler/api/devices/nvidia/vgpu"
)
Expand Down Expand Up @@ -213,7 +214,7 @@ func (ni *NodeInfo) Clone() *NodeInfo {
klog.V(5).Infof("imageStates is %v", res.ImageStates)

res.Others = ni.CloneOthers()
res.ImageStates = ni.CloneImageSumary()
res.ImageStates = ni.CloneImageSummary()
return res
}

Expand Down Expand Up @@ -539,8 +540,8 @@ func (ni *NodeInfo) Pods() (pods []*v1.Pod) {
return
}

// Clone Image State
func (ni *NodeInfo) CloneImageSumary() map[string]*k8sframework.ImageStateSummary {
// CloneImageSummary Clone Image State
func (ni *NodeInfo) CloneImageSummary() map[string]*k8sframework.ImageStateSummary {
nodeImageStates := make(map[string]*k8sframework.ImageStateSummary)
for imageName, summary := range ni.ImageStates {
newImageSummary := &k8sframework.ImageStateSummary{
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/framework/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ func GenerateNodeMapAndSlice(nodes map[string]*api.NodeInfo) map[string]*schedul
nodeInfo.SetNode(node.Node)
nodeMap[node.Name] = nodeInfo
// add imagestate into nodeinfo
nodeMap[node.Name].ImageStates = node.CloneImageSumary()
nodeMap[node.Name].ImageStates = node.CloneImageSummary()
}
return nodeMap
}
Expand Down
6 changes: 3 additions & 3 deletions pkg/scheduler/plugins/proportion/proportion_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
"k8s.io/client-go/util/workqueue"

schedulingv1beta1 "volcano.sh/apis/pkg/apis/scheduling/v1beta1"

"volcano.sh/volcano/cmd/scheduler/app/options"
"volcano.sh/volcano/pkg/scheduler/actions/allocate"
"volcano.sh/volcano/pkg/scheduler/api"
Expand Down Expand Up @@ -282,10 +283,9 @@ func TestProportion(t *testing.T) {
t.Errorf("after delete vcjob pg2, queue_allocated metrics is fail,%v", metrics)
c <- false
return
} else {
t.Logf("after delete vcjob pg2, queue_allocated metrics is ok,%v", metrics)
c <- true
}
t.Logf("after delete vcjob pg2, queue_allocated metrics is ok,%v", metrics)
c <- true
}
num++
}
Expand Down
6 changes: 3 additions & 3 deletions pkg/webhooks/admission/pods/mutate/mutate_pod_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,9 @@ import (
)

func TestMutatePods(t *testing.T) {
affinityJsonStr := `{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchExpressions":[{"key":"kubernetes.io/os","operator":"In","values":["linux"]}]}]}}}`
affinityJSONStr := `{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchExpressions":[{"key":"kubernetes.io/os","operator":"In","values":["linux"]}]}]}}}`
var affinity v1.Affinity
json.Unmarshal([]byte(affinityJsonStr), &affinity)
json.Unmarshal([]byte(affinityJSONStr), &affinity)

admissionConfigData := &webconfig.AdmissionConfiguration{
ResGroupsConfig: []webconfig.ResGroupConfig{
Expand All @@ -51,7 +51,7 @@ func TestMutatePods(t *testing.T) {
Effect: v1.TaintEffectNoSchedule,
},
},
Affinity: affinityJsonStr,
Affinity: affinityJSONStr,
Labels: map[string]string{
"volcano.sh/nodetype": "management",
},
Expand Down
8 changes: 4 additions & 4 deletions test/e2e/jobp/job_lifecycle.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ var _ = Describe("Job Life Cycle", func() {
Img: e2eutil.DefaultNginxImage,
Min: 2,
Rep: 2,
Req: e2eutil.CpuResource("10000"),
Req: e2eutil.CPUResource("10000"),
},
},
})
Expand Down Expand Up @@ -222,7 +222,7 @@ var _ = Describe("Job Life Cycle", func() {
Rep: 1,
Command: "sleep 10s",
RestartPolicy: v1.RestartPolicyNever,
Req: e2eutil.CpuResource("1"),
Req: e2eutil.CPUResource("1"),
},
},
})
Expand Down Expand Up @@ -306,8 +306,8 @@ var _ = Describe("Job Life Cycle", func() {
Rep: rep + 1,
Command: "sleep 10s",
RestartPolicy: v1.RestartPolicyNever,
Req: e2eutil.CpuResource("1"),
Limit: e2eutil.CpuResource("1"),
Req: e2eutil.CPUResource("1"),
Limit: e2eutil.CPUResource("1"),
Affinity: &v1.Affinity{NodeAffinity: nodeAffinity},
},
},
Expand Down
5 changes: 3 additions & 2 deletions test/e2e/jobseq/job_error_handling.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (

vcbatch "volcano.sh/apis/pkg/apis/batch/v1alpha1"
vcbus "volcano.sh/apis/pkg/apis/bus/v1alpha1"

jobctl "volcano.sh/volcano/pkg/controllers/job"

e2eutil "volcano.sh/volcano/test/e2e/util"
Expand Down Expand Up @@ -834,14 +835,14 @@ var _ = Describe("Job Error Handling", func() {
Name: "higherprioritytask",
Img: e2eutil.DefaultNginxImage,
Rep: int32(nodecount),
Req: e2eutil.CpuResource(strconv.Itoa(int(rep)/nodecount - 1)),
Req: e2eutil.CPUResource(strconv.Itoa(int(rep)/nodecount - 1)),
Taskpriority: e2eutil.MasterPriority,
},
{
Name: "lowerprioritytask",
Img: e2eutil.DefaultNginxImage,
Rep: int32(nodecount),
Req: e2eutil.CpuResource(strconv.Itoa(int(rep)/nodecount - 1)),
Req: e2eutil.CPUResource(strconv.Itoa(int(rep)/nodecount - 1)),
Taskpriority: e2eutil.MasterPriority,
},
},
Expand Down
12 changes: 6 additions & 6 deletions test/e2e/schedulingbase/sla.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,22 +96,22 @@ var _ = Describe("SLA Test", func() {
Expect(err).NotTo(HaveOccurred())

job2.Name = "j2-slow-sla"
slowSlaJob := e2eutil.CreateJobWithPodGroup(ctx, job2, "", map[string]string{jobWaitingTime: "1h"})
err = e2eutil.WaitTaskPhase(ctx, slowSlaJob, []v1.PodPhase{v1.PodPending}, 0)
slowSLAJob := e2eutil.CreateJobWithPodGroup(ctx, job2, "", map[string]string{jobWaitingTime: "1h"})
err = e2eutil.WaitTaskPhase(ctx, slowSLAJob, []v1.PodPhase{v1.PodPending}, 0)
Expect(err).NotTo(HaveOccurred())

job2.Name = "j3-fast-sla"
fastSlaJob := e2eutil.CreateJobWithPodGroup(ctx, job2, "", map[string]string{jobWaitingTime: "30m"})
err = e2eutil.WaitTaskPhase(ctx, fastSlaJob, []v1.PodPhase{v1.PodPending}, 0)
fastSLAJob := e2eutil.CreateJobWithPodGroup(ctx, job2, "", map[string]string{jobWaitingTime: "30m"})
err = e2eutil.WaitTaskPhase(ctx, fastSLAJob, []v1.PodPhase{v1.PodPending}, 0)
Expect(err).NotTo(HaveOccurred())

err = ctx.Vcclient.BatchV1alpha1().Jobs(e2eutil.Namespace(ctx, job1)).Delete(context.TODO(), job1.Name, metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())

err = e2eutil.WaitTaskPhase(ctx, slowSlaJob, []v1.PodPhase{v1.PodPending}, 0)
err = e2eutil.WaitTaskPhase(ctx, slowSLAJob, []v1.PodPhase{v1.PodPending}, 0)
Expect(err).NotTo(HaveOccurred())

err = e2eutil.WaitTasksReady(ctx, fastSlaJob, int(rep))
err = e2eutil.WaitTasksReady(ctx, fastSLAJob, int(rep))
Expect(err).NotTo(HaveOccurred())
})
})
6 changes: 3 additions & 3 deletions test/e2e/util/job.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ type JobSpec struct {
Volumes []batchv1alpha1.VolumeSpec
NodeName string
// ttl seconds after job finished
Ttl *int32
TTL *int32
MinSuccess *int32
// job max retry
MaxRetry int32
Expand Down Expand Up @@ -101,7 +101,7 @@ func CreateJobWithPodGroup(ctx *TestContext, jobSpec *JobSpec,
Policies: jobSpec.Policies,
Queue: jobSpec.Queue,
Plugins: jobSpec.Plugins,
TTLSecondsAfterFinished: jobSpec.Ttl,
TTLSecondsAfterFinished: jobSpec.TTL,
},
}

Expand Down Expand Up @@ -197,7 +197,7 @@ func CreateJobInner(ctx *TestContext, jobSpec *JobSpec) (*batchv1alpha1.Job, err
Policies: jobSpec.Policies,
Queue: jobSpec.Queue,
Plugins: jobSpec.Plugins,
TTLSecondsAfterFinished: jobSpec.Ttl,
TTLSecondsAfterFinished: jobSpec.TTL,
MinSuccess: jobSpec.MinSuccess,
MaxRetry: jobSpec.MaxRetry,
},
Expand Down
3 changes: 2 additions & 1 deletion test/e2e/util/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ import (
"k8s.io/client-go/kubernetes"

vcclient "volcano.sh/apis/pkg/client/clientset/versioned"

"volcano.sh/volcano/pkg/controllers/job/helpers"
schedulerapi "volcano.sh/volcano/pkg/scheduler/api"
)
Expand Down Expand Up @@ -80,7 +81,7 @@ const (
DefaultPytorchImage = "volcanosh/pytorch-mnist-v1beta1-9ee8fda-example:0.0.1"
)

func CpuResource(request string) v1.ResourceList {
func CPUResource(request string) v1.ResourceList {
return v1.ResourceList{v1.ResourceCPU: resource.MustParse(request)}
}

Expand Down
2 changes: 1 addition & 1 deletion test/e2e/vcctl/command.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ var _ = Describe("Job E2E Test: Test Job Command", func() {
{
Name: taskName,
Img: e2eutil.DefaultNginxImage,
Req: e2eutil.CpuResource(fmt.Sprintf("%dm", 1000*rep)),
Req: e2eutil.CPUResource(fmt.Sprintf("%dm", 1000*rep)),
Min: 1,
Rep: 1,
},
Expand Down

0 comments on commit 5592c8a

Please sign in to comment.