diff --git a/cli/cmd/data/flame.go b/cli/cmd/data/flame.go new file mode 100644 index 0000000..e281f14 --- /dev/null +++ b/cli/cmd/data/flame.go @@ -0,0 +1,9 @@ +package data + +import "k8s.io/cli-runtime/pkg/genericclioptions" + +type FlameConfig struct { + TargetConfig *TargetDetails + JobConfig *JobDetails + ConfigFlags *genericclioptions.ConfigFlags +} diff --git a/cli/cmd/data/job.go b/cli/cmd/data/job.go new file mode 100644 index 0000000..44b6427 --- /dev/null +++ b/cli/cmd/data/job.go @@ -0,0 +1,75 @@ +package data + +import ( + "fmt" + + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +// JobDetails holds configuration options for the profiling job that is launched +// by kubectl-flame. +type JobDetails struct { + // RequestConfig configures resource requests for the job that is started. + RequestConfig ResourceConfig + + // LimitConfig configures resource limits for the job that is started. + LimitConfig ResourceConfig +} + +// ResourceConfig holds resource configuration for either requests or limits. +type ResourceConfig struct { + CPU string + Memory string +} + +// ToResourceRequirements parses JobDetails into an apiv1.ResourceRequirements +// map which can be passed to a container spec. +func (jd *JobDetails) ToResourceRequirements() (apiv1.ResourceRequirements, error) { + var out apiv1.ResourceRequirements + + requests, err := jd.RequestConfig.ParseResources() + if err != nil { + return out, fmt.Errorf("unable to generate container requests: %w", err) + } + + limits, err := jd.LimitConfig.ParseResources() + if err != nil { + return out, fmt.Errorf("unable to generate container limits: %w", err) + } + + out.Requests = requests + out.Limits = limits + + return out, nil +} + +// ParseResources parses the ResourceConfig and returns an apiv1.ResourceList +// which can be used in a apiv1.ResourceRequirements map. +func (rc ResourceConfig) ParseResources() (apiv1.ResourceList, error) { + if rc.CPU == "" && rc.Memory == "" { + return nil, nil + } + + list := make(apiv1.ResourceList) + + if rc.CPU != "" { + cpu, err := resource.ParseQuantity(rc.CPU) + if err != nil { + return nil, fmt.Errorf("unable to parse CPU value %q: %w", rc.CPU, err) + } + + list[apiv1.ResourceCPU] = cpu + } + + if rc.Memory != "" { + mem, err := resource.ParseQuantity(rc.Memory) + if err != nil { + return nil, fmt.Errorf("unable to parse memory value %q: %w", rc.Memory, err) + } + + list[apiv1.ResourceMemory] = mem + } + + return list, nil +} diff --git a/cli/cmd/data/job_test.go b/cli/cmd/data/job_test.go new file mode 100644 index 0000000..0152ee4 --- /dev/null +++ b/cli/cmd/data/job_test.go @@ -0,0 +1,237 @@ +package data + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +func TestResourceConfig_ParseResources(t *testing.T) { + tt := []struct { + name string + resConf ResourceConfig + want apiv1.ResourceList + wantErrMsg string + }{ + { + name: "empty config yields no resource list", + }, + { + name: "invalid CPU yields error", + resConf: ResourceConfig{ + CPU: "test", + }, + wantErrMsg: "unable to parse CPU value", + }, + { + name: "invalid memory yields error", + resConf: ResourceConfig{ + Memory: "test", + }, + wantErrMsg: "unable to parse memory value", + }, + { + name: "invalid CPU shortcircuits", + resConf: ResourceConfig{ + CPU: "test", + Memory: "200Mi", + }, + wantErrMsg: "unable to parse CPU value", + }, + { + name: "only CPU is parsed correctly", + resConf: ResourceConfig{ + CPU: "200m", + }, + want: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("200m"), + }, + }, + { + name: "only memory is parsed correctly", + resConf: ResourceConfig{ + Memory: "200Mi", + }, + want: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + { + name: "both CPU and memory are parsed correctly", + resConf: ResourceConfig{ + CPU: "200m", + Memory: "200Mi", + }, + want: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("200m"), + apiv1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + got, err := tc.resConf.ParseResources() + + if tc.wantErrMsg != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.wantErrMsg) + } else { + require.NoError(t, err) + } + + assert.Equal(t, tc.want, got) + }) + } +} + +func TestJobDetails_ToResourceRequirements(t *testing.T) { + tt := []struct { + name string + jobDetails *JobDetails + want apiv1.ResourceRequirements + wantErrMsg string + }{ + { + name: "empty resources yields empty requirements", + jobDetails: &JobDetails{}, + want: apiv1.ResourceRequirements{}, + }, + { + name: "invalid request CPU yields error", + jobDetails: &JobDetails{ + RequestConfig: ResourceConfig{ + CPU: "test", + }, + }, + wantErrMsg: "unable to generate container requests", + }, + { + name: "invalid request mem yields error", + jobDetails: &JobDetails{ + RequestConfig: ResourceConfig{ + Memory: "test", + }, + }, + wantErrMsg: "unable to generate container requests", + }, + { + name: "valid requests yields requests only", + jobDetails: &JobDetails{ + RequestConfig: ResourceConfig{ + CPU: "100m", + Memory: "200Mi", + }, + }, + want: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("100m"), + apiv1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + }, + { + name: "valid requests & invalid cpu limits yields error", + jobDetails: &JobDetails{ + RequestConfig: ResourceConfig{ + CPU: "100m", + Memory: "200Mi", + }, + }, + want: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("100m"), + apiv1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + }, + { + name: "valid requests & invalid memory limits yields error", + jobDetails: &JobDetails{ + RequestConfig: ResourceConfig{ + CPU: "100m", + Memory: "200Mi", + }, + LimitConfig: ResourceConfig{ + CPU: "test", + }, + }, + wantErrMsg: "unable to generate container limits", + }, + { + name: "valid requests & invalid memory limits yields error", + jobDetails: &JobDetails{ + RequestConfig: ResourceConfig{ + CPU: "100m", + Memory: "200Mi", + }, + LimitConfig: ResourceConfig{ + Memory: "test", + }, + }, + wantErrMsg: "unable to generate container limits", + }, + { + name: "valid requests & memory yields both correctly", + jobDetails: &JobDetails{ + RequestConfig: ResourceConfig{ + CPU: "100m", + Memory: "200Mi", + }, + LimitConfig: ResourceConfig{ + CPU: "100m", + Memory: "200Mi", + }, + }, + want: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("100m"), + apiv1.ResourceMemory: resource.MustParse("200Mi"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("100m"), + apiv1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + }, + { + name: "missing cpu limits yields requirements without cpu limits", + jobDetails: &JobDetails{ + RequestConfig: ResourceConfig{ + CPU: "100m", + Memory: "200Mi", + }, + LimitConfig: ResourceConfig{ + Memory: "200Mi", + }, + }, + want: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("100m"), + apiv1.ResourceMemory: resource.MustParse("200Mi"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + got, err := tc.jobDetails.ToResourceRequirements() + + if tc.wantErrMsg != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.wantErrMsg) + } else { + require.NoError(t, err) + } + + assert.Equal(t, tc.want, got) + }) + } +} diff --git a/cli/cmd/data/target.go b/cli/cmd/data/target.go index bad4291..10a44e7 100644 --- a/cli/cmd/data/target.go +++ b/cli/cmd/data/target.go @@ -3,8 +3,9 @@ package data import ( - "github.com/VerizonMedia/kubectl-flame/api" "time" + + "github.com/VerizonMedia/kubectl-flame/api" ) type TargetDetails struct { @@ -12,7 +13,7 @@ type TargetDetails struct { PodName string ContainerName string ContainerId string - Event api.ProfilingEvent + Event api.ProfilingEvent Duration time.Duration Id string FileName string diff --git a/cli/cmd/kubernetes/job/bpf.go b/cli/cmd/kubernetes/job/bpf.go index 8f6b087..3ebb2e7 100644 --- a/cli/cmd/kubernetes/job/bpf.go +++ b/cli/cmd/kubernetes/job/bpf.go @@ -16,18 +16,18 @@ import ( type bpfCreator struct{} -func (b *bpfCreator) create(targetPod *apiv1.Pod, targetDetails *data.TargetDetails) (string, *batchv1.Job) { +func (b *bpfCreator) create(targetPod *apiv1.Pod, cfg *data.FlameConfig) (string, *batchv1.Job, error) { id := string(uuid.NewUUID()) var imageName string - if targetDetails.Image != "" { - imageName = targetDetails.Image + if cfg.TargetConfig.Image != "" { + imageName = cfg.TargetConfig.Image } else { imageName = fmt.Sprintf("%s:%s-bpf", baseImageName, version.GetCurrent()) } commonMeta := metav1.ObjectMeta{ Name: fmt.Sprintf("kubectl-flame-%s", id), - Namespace: targetDetails.Namespace, + Namespace: cfg.TargetConfig.Namespace, Labels: map[string]string{ "kubectl-flame/id": id, }, @@ -36,6 +36,11 @@ func (b *bpfCreator) create(targetPod *apiv1.Pod, targetDetails *data.TargetDeta }, } + resources, err := cfg.JobConfig.ToResourceRequirements() + if err != nil { + return "", nil, fmt.Errorf("unable to generate resource requirements: %w", err) + } + job := &batchv1.Job{ TypeMeta: metav1.TypeMeta{ Kind: "Job", @@ -75,13 +80,14 @@ func (b *bpfCreator) create(targetPod *apiv1.Pod, targetDetails *data.TargetDeta Name: ContainerName, Image: imageName, Command: []string{"/app/agent"}, - Args: []string{id, + Args: []string{ + id, string(targetPod.UID), - targetDetails.ContainerName, - targetDetails.ContainerId, - targetDetails.Duration.String(), - string(targetDetails.Language), - targetDetails.Pgrep, + cfg.TargetConfig.ContainerName, + cfg.TargetConfig.ContainerId, + cfg.TargetConfig.Duration.String(), + string(cfg.TargetConfig.Language), + cfg.TargetConfig.Pgrep, }, VolumeMounts: []apiv1.VolumeMount{ { @@ -96,6 +102,7 @@ func (b *bpfCreator) create(targetPod *apiv1.Pod, targetDetails *data.TargetDeta SecurityContext: &apiv1.SecurityContext{ Privileged: boolPtr(true), }, + Resources: resources, }, }, RestartPolicy: "Never", @@ -105,5 +112,5 @@ func (b *bpfCreator) create(targetPod *apiv1.Pod, targetDetails *data.TargetDeta }, } - return id, job + return id, job, nil } diff --git a/cli/cmd/kubernetes/job/jvm.go b/cli/cmd/kubernetes/job/jvm.go index fe882e1..7d34ba6 100644 --- a/cli/cmd/kubernetes/job/jvm.go +++ b/cli/cmd/kubernetes/job/jvm.go @@ -16,21 +16,23 @@ import ( type jvmCreator struct{} -func (c *jvmCreator) create(targetPod *apiv1.Pod, targetDetails *data.TargetDetails) (string, *batchv1.Job) { +func (c *jvmCreator) create(targetPod *apiv1.Pod, cfg *data.FlameConfig) (string, *batchv1.Job, error) { id := string(uuid.NewUUID()) - imageName := c.getAgentImage(targetDetails) - args := []string{id, string(targetPod.UID), - targetDetails.ContainerName, targetDetails.ContainerId, - targetDetails.Duration.String(), string(targetDetails.Language), - string(targetDetails.Event)} + imageName := c.getAgentImage(cfg.TargetConfig) + args := []string{ + id, string(targetPod.UID), + cfg.TargetConfig.ContainerName, cfg.TargetConfig.ContainerId, + cfg.TargetConfig.Duration.String(), string(cfg.TargetConfig.Language), + string(cfg.TargetConfig.Event), + } - if targetDetails.Pgrep != "" { - args = append(args, targetDetails.Pgrep) + if cfg.TargetConfig.Pgrep != "" { + args = append(args, cfg.TargetConfig.Pgrep) } commonMeta := metav1.ObjectMeta{ Name: fmt.Sprintf("kubectl-flame-%s", id), - Namespace: targetDetails.Namespace, + Namespace: cfg.TargetConfig.Namespace, Labels: map[string]string{ "kubectl-flame/id": id, }, @@ -39,6 +41,11 @@ func (c *jvmCreator) create(targetPod *apiv1.Pod, targetDetails *data.TargetDeta }, } + resources, err := cfg.JobConfig.ToResourceRequirements() + if err != nil { + return "", nil, fmt.Errorf("unable to generate resource requirements: %w", err) + } + job := &batchv1.Job{ TypeMeta: metav1.TypeMeta{ Kind: "Job", @@ -58,7 +65,7 @@ func (c *jvmCreator) create(targetPod *apiv1.Pod, targetDetails *data.TargetDeta Name: "target-filesystem", VolumeSource: apiv1.VolumeSource{ HostPath: &apiv1.HostPathVolumeSource{ - Path: targetDetails.DockerPath, + Path: cfg.TargetConfig.DockerPath, }, }, }, @@ -80,6 +87,7 @@ func (c *jvmCreator) create(targetPod *apiv1.Pod, targetDetails *data.TargetDeta SecurityContext: &apiv1.SecurityContext{ Privileged: boolPtr(true), }, + Resources: resources, }, }, RestartPolicy: "Never", @@ -89,7 +97,7 @@ func (c *jvmCreator) create(targetPod *apiv1.Pod, targetDetails *data.TargetDeta }, } - return id, job + return id, job, nil } func (c *jvmCreator) getAgentImage(targetDetails *data.TargetDetails) string { diff --git a/cli/cmd/kubernetes/job/python.go b/cli/cmd/kubernetes/job/python.go index ae35bc3..c637431 100644 --- a/cli/cmd/kubernetes/job/python.go +++ b/cli/cmd/kubernetes/job/python.go @@ -2,6 +2,7 @@ package job import ( "fmt" + "github.com/VerizonMedia/kubectl-flame/cli/cmd/data" "github.com/VerizonMedia/kubectl-flame/cli/cmd/version" batchv1 "k8s.io/api/batch/v1" @@ -12,18 +13,18 @@ import ( type pythonCreator struct{} -func (p *pythonCreator) create(targetPod *apiv1.Pod, targetDetails *data.TargetDetails) (string, *batchv1.Job) { +func (p *pythonCreator) create(targetPod *apiv1.Pod, cfg *data.FlameConfig) (string, *batchv1.Job, error) { id := string(uuid.NewUUID()) var imageName string - if targetDetails.Image != "" { - imageName = targetDetails.Image + if cfg.TargetConfig.Image != "" { + imageName = cfg.TargetConfig.Image } else { imageName = fmt.Sprintf("%s:%s-python", baseImageName, version.GetCurrent()) } commonMeta := metav1.ObjectMeta{ Name: fmt.Sprintf("kubectl-flame-%s", id), - Namespace: targetDetails.Namespace, + Namespace: cfg.TargetConfig.Namespace, Labels: map[string]string{ "kubectl-flame/id": id, }, @@ -32,6 +33,11 @@ func (p *pythonCreator) create(targetPod *apiv1.Pod, targetDetails *data.TargetD }, } + resources, err := cfg.JobConfig.ToResourceRequirements() + if err != nil { + return "", nil, fmt.Errorf("unable to generate resource requirements: %w", err) + } + job := &batchv1.Job{ TypeMeta: metav1.TypeMeta{ Kind: "Job", @@ -53,13 +59,14 @@ func (p *pythonCreator) create(targetPod *apiv1.Pod, targetDetails *data.TargetD Name: ContainerName, Image: imageName, Command: []string{"/app/agent"}, - Args: []string{id, + Args: []string{ + id, string(targetPod.UID), - targetDetails.ContainerName, - targetDetails.ContainerId, - targetDetails.Duration.String(), - string(targetDetails.Language), - targetDetails.Pgrep, + cfg.TargetConfig.ContainerName, + cfg.TargetConfig.ContainerId, + cfg.TargetConfig.Duration.String(), + string(cfg.TargetConfig.Language), + cfg.TargetConfig.Pgrep, }, SecurityContext: &apiv1.SecurityContext{ Privileged: boolPtr(true), @@ -67,6 +74,7 @@ func (p *pythonCreator) create(targetPod *apiv1.Pod, targetDetails *data.TargetD Add: []apiv1.Capability{"SYS_PTRACE"}, }, }, + Resources: resources, }, }, RestartPolicy: "Never", @@ -76,5 +84,5 @@ func (p *pythonCreator) create(targetPod *apiv1.Pod, targetDetails *data.TargetD }, } - return id, job + return id, job, nil } diff --git a/cli/cmd/kubernetes/job/root.go b/cli/cmd/kubernetes/job/root.go index 119ae40..a04795b 100644 --- a/cli/cmd/kubernetes/job/root.go +++ b/cli/cmd/kubernetes/job/root.go @@ -3,6 +3,8 @@ package job import ( + "errors" + batchv1 "k8s.io/api/batch/v1" apiv1 "k8s.io/api/core/v1" @@ -22,19 +24,19 @@ var ( ) type creator interface { - create(targetPod *apiv1.Pod, targetDetails *data.TargetDetails) (string, *batchv1.Job) + create(targetPod *apiv1.Pod, cfg *data.FlameConfig) (string, *batchv1.Job, error) } -func Create(targetPod *apiv1.Pod, targetDetails *data.TargetDetails) (string, *batchv1.Job) { - switch targetDetails.Language { +func Create(targetPod *apiv1.Pod, cfg *data.FlameConfig) (string, *batchv1.Job, error) { + switch cfg.TargetConfig.Language { case api.Java: - return jvm.create(targetPod, targetDetails) + return jvm.create(targetPod, cfg) case api.Go: - return bpf.create(targetPod, targetDetails) + return bpf.create(targetPod, cfg) case api.Python: - return python.create(targetPod, targetDetails) + return python.create(targetPod, cfg) } // Should not happen - panic("got language without job creator") + return "", nil, errors.New("got language without job creator") } diff --git a/cli/cmd/kubernetes/launch.go b/cli/cmd/kubernetes/launch.go index b4eb98a..2158a7c 100644 --- a/cli/cmd/kubernetes/launch.go +++ b/cli/cmd/kubernetes/launch.go @@ -4,9 +4,11 @@ package kubernetes import ( "context" - "github.com/VerizonMedia/kubectl-flame/cli/cmd/kubernetes/job" + "fmt" "os" + "github.com/VerizonMedia/kubectl-flame/cli/cmd/kubernetes/job" + "github.com/VerizonMedia/kubectl-flame/cli/cmd/data" batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" @@ -14,19 +16,21 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer/json" ) -func LaunchFlameJob(targetPod *v1.Pod, targetDetails *data.TargetDetails, ctx context.Context) (string, *batchv1.Job, error) { - id, flameJob := job.Create(targetPod, targetDetails) +func LaunchFlameJob(targetPod *v1.Pod, cfg *data.FlameConfig, ctx context.Context) (string, *batchv1.Job, error) { + id, flameJob, err := job.Create(targetPod, cfg) + if err != nil { + return "", nil, fmt.Errorf("unable to create job: %w", err) + } - if targetDetails.DryRun { + if cfg.TargetConfig.DryRun { err := printJob(flameJob) return "", nil, err } createJob, err := clientSet. BatchV1(). - Jobs(targetDetails.Namespace). + Jobs(cfg.TargetConfig.Namespace). Create(ctx, flameJob, metav1.CreateOptions{}) - if err != nil { return "", nil, err } diff --git a/cli/cmd/logic.go b/cli/cmd/logic.go index 5c8b215..9ab8a24 100644 --- a/cli/cmd/logic.go +++ b/cli/cmd/logic.go @@ -12,26 +12,27 @@ import ( "github.com/VerizonMedia/kubectl-flame/cli/cmd/handler" "github.com/VerizonMedia/kubectl-flame/cli/cmd/kubernetes" v1 "k8s.io/api/core/v1" - "k8s.io/cli-runtime/pkg/genericclioptions" ) -func Flame(target *data.TargetDetails, configFlags *genericclioptions.ConfigFlags) { - ns, err := kubernetes.Connect(configFlags) - p := NewPrinter(target.DryRun) +func Flame(cfg *data.FlameConfig) { + ns, err := kubernetes.Connect(cfg.ConfigFlags) if err != nil { log.Fatalf("Failed connecting to kubernetes cluster: %v\n", err) } - target.Namespace = ns + p := NewPrinter(cfg.TargetConfig.DryRun) + + cfg.TargetConfig.Namespace = ns ctx := context.Background() + p.Print("Verifying target pod ... ") - pod, err := kubernetes.GetPodDetails(target.PodName, target.Namespace, ctx) + pod, err := kubernetes.GetPodDetails(cfg.TargetConfig.PodName, cfg.TargetConfig.Namespace, ctx) if err != nil { p.PrintError() log.Fatalf(err.Error()) } - containerName, err := validatePod(pod, target) + containerName, err := validatePod(pod, cfg.TargetConfig) if err != nil { p.PrintError() log.Fatalf(err.Error()) @@ -44,21 +45,23 @@ func Flame(target *data.TargetDetails, configFlags *genericclioptions.ConfigFlag } p.PrintSuccess() - target.ContainerName = containerName - target.ContainerId = containerId + + cfg.TargetConfig.ContainerName = containerName + cfg.TargetConfig.ContainerId = containerId + p.Print("Launching profiler ... ") - profileId, job, err := kubernetes.LaunchFlameJob(pod, target, ctx) + profileId, job, err := kubernetes.LaunchFlameJob(pod, cfg, ctx) if err != nil { p.PrintError() log.Fatalf(err.Error()) } - if target.DryRun { + if cfg.TargetConfig.DryRun { return } - target.Id = profileId - profilerPod, err := kubernetes.WaitForPodStart(target, ctx) + cfg.TargetConfig.Id = profileId + profilerPod, err := kubernetes.WaitForPodStart(cfg.TargetConfig, ctx) if err != nil { p.PrintError() log.Fatalf(err.Error()) @@ -67,7 +70,7 @@ func Flame(target *data.TargetDetails, configFlags *genericclioptions.ConfigFlag p.PrintSuccess() apiHandler := &handler.ApiEventsHandler{ Job: job, - Target: target, + Target: cfg.TargetConfig, } done, err := kubernetes.GetLogsFromPod(profilerPod, apiHandler, ctx) if err != nil { diff --git a/cli/cmd/root.go b/cli/cmd/root.go index 13b5227..4263b79 100644 --- a/cli/cmd/root.go +++ b/cli/cmd/root.go @@ -4,13 +4,14 @@ package cmd import ( "fmt" + "os" + "time" + "github.com/VerizonMedia/kubectl-flame/api" "github.com/VerizonMedia/kubectl-flame/cli/cmd/data" "github.com/VerizonMedia/kubectl-flame/cli/cmd/version" "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" - "os" - "time" ) const ( @@ -29,6 +30,9 @@ These commands help you identify application performance issues. # Profile specific container container1 from pod mypod in namespace test %[1]s flame mypod -f /tmp/flame.svg -n test container1 + + # Set custom resource requests and limits for the kubectl-flame pod (default: neither requests nor limits are set) + %[1]s flame mypod -f flame.svg -cpu.requests 100m -cpu.limits 200m -mem.requests 100Mi -mem.limits 200Mi ` ) @@ -45,10 +49,13 @@ func NewFlameOptions(streams genericclioptions.IOStreams) *FlameOptions { } func NewFlameCommand(streams genericclioptions.IOStreams) *cobra.Command { - var targetDetails data.TargetDetails - var showVersion bool - var chosenLang string - var chosenEvent string + var ( + targetDetails data.TargetDetails + jobDetails data.JobDetails + showVersion bool + chosenLang string + chosenEvent string + ) options := NewFlameOptions(streams) cmd := &cobra.Command{ @@ -71,7 +78,7 @@ func NewFlameCommand(streams genericclioptions.IOStreams) *cobra.Command { return } - if err := validateFlags(chosenLang, chosenEvent, &targetDetails); err != nil { + if err := validateFlags(chosenLang, chosenEvent, &targetDetails, &jobDetails); err != nil { fmt.Fprintln(streams.Out, err) os.Exit(1) } @@ -81,11 +88,18 @@ func NewFlameCommand(streams genericclioptions.IOStreams) *cobra.Command { targetDetails.ContainerName = args[1] } - Flame(&targetDetails, options.configFlags) + cfg := &data.FlameConfig{ + TargetConfig: &targetDetails, + JobConfig: &jobDetails, + ConfigFlags: options.configFlags, + } + + Flame(cfg) }, } cmd.Flags().BoolVar(&showVersion, "version", false, "Print version info") + cmd.Flags().DurationVarP(&targetDetails.Duration, "time", "t", defaultDuration, "Max scan Duration") cmd.Flags().StringVarP(&targetDetails.FileName, "file", "f", "flamegraph.svg", "Optional file location") cmd.Flags().BoolVar(&targetDetails.Alpine, "alpine", false, "Target image is based on Alpine") @@ -93,16 +107,23 @@ func NewFlameCommand(streams genericclioptions.IOStreams) *cobra.Command { cmd.Flags().StringVar(&targetDetails.Image, "image", "", "Manually choose agent docker image") cmd.Flags().StringVar(&targetDetails.DockerPath, "docker-path", "/var/lib/docker/", "Use a different Docker install path") cmd.Flags().StringVarP(&targetDetails.Pgrep, "pgrep", "p", "", "name of the target process") + cmd.Flags().StringVarP(&chosenLang, "lang", "l", "", fmt.Sprintf("Programming language of "+ "the target application, choose one of %v", api.AvailableLanguages())) cmd.Flags().StringVarP(&chosenEvent, "event", "e", defaultEvent, fmt.Sprintf("Profiling event, choose one of %v", api.AvailableEvents())) + + cmd.Flags().StringVar(&jobDetails.RequestConfig.CPU, "cpu.requests", "", "CPU requests of the started profiling container") + cmd.Flags().StringVar(&jobDetails.RequestConfig.Memory, "mem.requests", "", "Memory requests of the started profiling container") + cmd.Flags().StringVar(&jobDetails.LimitConfig.CPU, "cpu.limits", "", "CPU limits of the started profiling container") + cmd.Flags().StringVar(&jobDetails.LimitConfig.Memory, "mem.limits", "", "Memory limits of the started profiling container") + options.configFlags.AddFlags(cmd.Flags()) return cmd } -func validateFlags(langString string, eventString string, details *data.TargetDetails) error { +func validateFlags(langString string, eventString string, targetDetails *data.TargetDetails, jobDetails *data.JobDetails) error { if langString == "" { return fmt.Errorf("use -l flag to select one of the supported languages %s", api.AvailableLanguages()) } @@ -115,7 +136,16 @@ func validateFlags(langString string, eventString string, details *data.TargetDe return fmt.Errorf("unsupported event, choose one of %s", api.AvailableEvents()) } - details.Language = api.ProgrammingLanguage(langString) - details.Event = api.ProfilingEvent(eventString) + targetDetails.Language = api.ProgrammingLanguage(langString) + targetDetails.Event = api.ProfilingEvent(eventString) + + if _, err := jobDetails.RequestConfig.ParseResources(); err != nil { + return fmt.Errorf("unable to parse resource requests: %w", err) + } + + if _, err := jobDetails.LimitConfig.ParseResources(); err != nil { + return fmt.Errorf("unable to parse resourse limits: %w", err) + } + return nil } diff --git a/go.mod b/go.mod index adbc468..574e57b 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/spf13/cobra v1.0.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.7.0 + github.com/stretchr/testify v1.4.0 k8s.io/api v0.18.6 k8s.io/apimachinery v0.18.6 k8s.io/cli-runtime v0.18.6