From e832b6218f5490e7edfa6f3bccd2dfdc4dc7240a Mon Sep 17 00:00:00 2001 From: Jonathan Innis Date: Tue, 29 Nov 2022 10:19:17 -0800 Subject: [PATCH] Fix bugs in scheduling profiling (#88) --- .golangci.yaml | 6 ++ .../scheduling/scheduling_benchmark_test.go | 76 ++++++++++--------- pkg/test/provisioner.go | 3 +- 3 files changed, 48 insertions(+), 37 deletions(-) diff --git a/.golangci.yaml b/.golangci.yaml index a37cf2f4bf..4d93316358 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -4,6 +4,9 @@ run: timeout: 5m + build-tags: + - test_performance + skip-dirs: - tools - website @@ -62,3 +65,6 @@ issues: - linters: - goheader path: 'zz_(.+)\.go' + - linters: + - goheader + path: 'scheduling_benchmark_test.go' diff --git a/pkg/controllers/provisioning/scheduling/scheduling_benchmark_test.go b/pkg/controllers/provisioning/scheduling/scheduling_benchmark_test.go index 431ba2ede6..2b18c5bac7 100644 --- a/pkg/controllers/provisioning/scheduling/scheduling_benchmark_test.go +++ b/pkg/controllers/provisioning/scheduling/scheduling_benchmark_test.go @@ -27,9 +27,11 @@ import ( "text/tabwriter" "time" + "github.com/samber/lo" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/utils/clock" + "github.com/aws/karpenter-core/pkg/apis/config/settings" "github.com/aws/karpenter-core/pkg/cloudprovider" "github.com/aws/karpenter-core/pkg/cloudprovider/fake" pscheduling "github.com/aws/karpenter-core/pkg/controllers/provisioning/scheduling" @@ -47,6 +49,7 @@ import ( const MinPodsPerSec = 100.0 const PrintStats = false +//nolint:gosec var r = rand.New(rand.NewSource(42)) func BenchmarkScheduling1(b *testing.B) { @@ -81,14 +84,14 @@ func TestSchedulingProfile(t *testing.T) { if err != nil { t.Fatalf("error creating CPU profile: %s", err) } - pprof.StartCPUProfile(cpuf) + lo.Must0(pprof.StartCPUProfile(cpuf)) defer pprof.StopCPUProfile() heapf, err := os.Create("schedule.heapprofile") if err != nil { t.Fatalf("error creating heap profile: %s", err) } - defer pprof.WriteHeapProfile(heapf) + defer lo.Must0(pprof.WriteHeapProfile(heapf)) totalPods := 0 totalNodes := 0 @@ -112,14 +115,14 @@ func benchmarkScheduler(b *testing.B, instanceCount, podCount int) { // disable logging ctx := logging.WithLogger(context.Background(), zap.NewNop().Sugar()) ctx = settings.ToContext(ctx, test.Settings()) - provisioner := test.Provisioner(test.ProvisionerOptions{Limits: map[v1.ResourceName]resource.Quantity{}}) + provisioner = test.Provisioner(test.ProvisionerOptions{Limits: map[v1.ResourceName]resource.Quantity{}}) instanceTypes := fake.InstanceTypes(instanceCount) - cloudProv := fake.NewCloudProvider() + cloudProv = fake.NewCloudProvider() cloudProv.InstanceTypes = instanceTypes scheduler := pscheduling.NewScheduler(ctx, nil, []*scheduling.NodeTemplate{scheduling.NewNodeTemplate(provisioner)}, nil, state.NewCluster(ctx, &clock.RealClock{}, nil, cloudProv), nil, &pscheduling.Topology{}, - map[string][]cloudprovider.InstanceType{provisioner.Name: instanceTypes}, map[*scheduling.NodeTemplate]v1.ResourceList{}, + map[string][]*cloudprovider.InstanceType{provisioner.Name: instanceTypes}, map[*scheduling.NodeTemplate]v1.ResourceList{}, test.NewEventRecorder(), pscheduling.SchedulerOptions{}) @@ -196,27 +199,28 @@ func makeDiversePods(count int) []*v1.Pod { return pods } -func makePodAntiAffinityPods(count int, key string) []*v1.Pod { - var pods []*v1.Pod - for i := 0; i < count; i++ { - pods = append(pods, test.Pod( - test.PodOptions{ - ObjectMeta: metav1.ObjectMeta{Labels: randomAntiAffinityLabels()}, - PodAntiRequirements: []v1.PodAffinityTerm{ - { - LabelSelector: &metav1.LabelSelector{MatchLabels: randomAntiAffinityLabels()}, - TopologyKey: key, - }, - }, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: randomCpu(), - v1.ResourceMemory: randomMemory(), - }, - }})) - } - return pods -} +//func makePodAntiAffinityPods(count int, key string) []*v1.Pod { +// var pods []*v1.Pod +// for i := 0; i < count; i++ { +// pods = append(pods, test.Pod( +// test.PodOptions{ +// ObjectMeta: metav1.ObjectMeta{Labels: randomAntiAffinityLabels()}, +// PodAntiRequirements: []v1.PodAffinityTerm{ +// { +// LabelSelector: &metav1.LabelSelector{MatchLabels: randomAntiAffinityLabels()}, +// TopologyKey: key, +// }, +// }, +// ResourceRequirements: v1.ResourceRequirements{ +// Requests: v1.ResourceList{ +// v1.ResourceCPU: randomCPU(), +// v1.ResourceMemory: randomMemory(), +// }, +// }})) +// } +// return pods +//} + func makePodAffinityPods(count int, key string) []*v1.Pod { var pods []*v1.Pod for i := 0; i < count; i++ { @@ -231,7 +235,7 @@ func makePodAffinityPods(count int, key string) []*v1.Pod { }, ResourceRequirements: v1.ResourceRequirements{ Requests: v1.ResourceList{ - v1.ResourceCPU: randomCpu(), + v1.ResourceCPU: randomCPU(), v1.ResourceMemory: randomMemory(), }, }})) @@ -257,7 +261,7 @@ func makeTopologySpreadPods(count int, key string) []*v1.Pod { }, ResourceRequirements: v1.ResourceRequirements{ Requests: v1.ResourceList{ - v1.ResourceCPU: randomCpu(), + v1.ResourceCPU: randomCPU(), v1.ResourceMemory: randomMemory(), }, }})) @@ -273,7 +277,7 @@ func makeGenericPods(count int) []*v1.Pod { ObjectMeta: metav1.ObjectMeta{Labels: randomLabels()}, ResourceRequirements: v1.ResourceRequirements{ Requests: v1.ResourceList{ - v1.ResourceCPU: randomCpu(), + v1.ResourceCPU: randomCPU(), v1.ResourceMemory: randomMemory(), }, }})) @@ -286,11 +290,13 @@ func randomAffinityLabels() map[string]string { "my-affininity": randomLabelValue(), } } -func randomAntiAffinityLabels() map[string]string { - return map[string]string{ - "my-anti-affininity": randomLabelValue(), - } -} + +//func randomAntiAffinityLabels() map[string]string { +// return map[string]string{ +// "my-anti-affininity": randomLabelValue(), +// } +//} + func randomLabels() map[string]string { return map[string]string{ "my-label": randomLabelValue(), @@ -307,7 +313,7 @@ func randomMemory() resource.Quantity { return resource.MustParse(fmt.Sprintf("%dMi", mem[r.Intn(len(mem))])) } -func randomCpu() resource.Quantity { +func randomCPU() resource.Quantity { cpu := []int{100, 250, 500, 1000, 1500} return resource.MustParse(fmt.Sprintf("%dm", cpu[r.Intn(len(cpu))])) } diff --git a/pkg/test/provisioner.go b/pkg/test/provisioner.go index 2a44072d2b..16e3a3fff2 100644 --- a/pkg/test/provisioner.go +++ b/pkg/test/provisioner.go @@ -19,7 +19,6 @@ import ( "fmt" "github.com/imdario/mergo" - . "github.com/onsi/gomega" //nolint:revive,stylecheck "github.com/samber/lo" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -64,7 +63,7 @@ func Provisioner(overrides ...ProvisionerOptions) *v1alpha5.Provisioner { options.Limits = v1.ResourceList{v1.ResourceCPU: resource.MustParse("2000")} } raw := &runtime.RawExtension{} - ExpectWithOffset(1, raw.UnmarshalJSON(lo.Must(json.Marshal(options.Provider)))).To(Succeed()) + lo.Must0(raw.UnmarshalJSON(lo.Must(json.Marshal(options.Provider)))) provisioner := &v1alpha5.Provisioner{ ObjectMeta: ObjectMeta(options.ObjectMeta),