diff --git a/cmd/root.go b/cmd/root.go index b286736..a84db94 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -104,6 +104,11 @@ func init() { if err != nil { klog.Exitf("Failed to bind containers flag: %v", err) } + findCmd.Flags().Bool("helm", false, "Show old helm chart versions. You can combine this flag with --containers to have both output in a single run.") + err = viper.BindPFlag("helm", findCmd.Flags().Lookup("helm")) + if err != nil { + klog.Exitf("Failed to bind containers flag: %v", err) + } findCmd.Flags().Bool("show-non-semver", false, "When finding container images, show all containers even if they don't follow semver.") err = viper.BindPFlag("show-non-semver", findCmd.Flags().Lookup("show-non-semver")) if err != nil { @@ -226,94 +231,44 @@ var findCmd = &cobra.Command{ klog.Exitf("--format flag value is not valid. Run `nova find --help` to see flag options") } - if viper.GetBool("containers") { - // Set up a context we can use to cancel all operations to external container registries if we need to - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Interrupt, syscall.SIGTERM) - defer func() { - signal.Stop(signals) - cancel() - }() - go func() { - select { - case <-signals: - fmt.Print("\nCancelling operations to external container registries\n") - cancel() - case <-ctx.Done(): - } - }() - showNonSemver := viper.GetBool("show-non-semver") - showErrored := viper.GetBool("show-errored-containers") - includeAll := viper.GetBool("include-all") - iClient := containers.NewClient(kubeContext) - containers, err := iClient.Find(ctx) + if viper.GetBool("helm") && viper.GetBool("containers") { + output, err := handleHelmAndContainers(kubeContext) if err != nil { - klog.Exitf("ERROR during images.Find() %v", err) + klog.Exit(err) } - out := output.NewContainersOutput(containers.Images, containers.ErrImages, showNonSemver, showErrored, includeAll) - out.Print(format) - return - } - - h := nova_helm.NewHelm(kubeContext) - ahClient, err := nova_helm.NewArtifactHubPackageClient(version) - if err != nil { - klog.Exitf("error setting up artifact hub client: %s", err) - } - - if viper.IsSet("desired-versions") { - klog.V(3).Infof("desired-versions is set - attempting to load them") - klog.V(8).Infof("raw desired-versions: %v", viper.Get("desired-versions")) - - desiredVersion := viper.GetStringMapString("desired-versions") - for k, v := range desiredVersion { - klog.V(2).Infof("version override for %s: %s", k, v) - h.DesiredVersions = append(h.DesiredVersions, nova_helm.DesiredVersion{ - Name: k, - Version: v, - }) + outputFile := viper.GetString("output-file") + if outputFile != "" { + err = output.ToFile(outputFile) + if err != nil { + klog.Exitf("error outputting to file: %s", err) + } + } else { + output.Print(format, viper.GetBool("wide"), viper.GetBool("show-old")) } + return } - releases, chartNames, err := h.GetReleaseOutput() - if err != nil { - klog.Exitf("error getting helm releases: %s", err) - } - out := output.NewOutputWithHelmReleases(releases) - out.IncludeAll = viper.GetBool("include-all") - if viper.GetBool("poll-artifacthub") { - packageRepos, err := ahClient.MultiSearch(chartNames) + if viper.GetBool("containers") { + output, err := handleContainers(kubeContext) if err != nil { - klog.Exitf("Error getting artifacthub package repos: %v", err) - } - packages := ahClient.GetPackages(packageRepos) - klog.V(2).Infof("found %d possible package matches", len(packages)) - for _, release := range releases { - o := nova_helm.FindBestArtifactHubMatch(release, packages) - if o != nil { - h.OverrideDesiredVersion(o) - out.HelmReleases = append(out.HelmReleases, *o) - } + klog.Exit(err) } + output.Print(format) + return } - if len(viper.GetStringSlice("url")) > 0 { - repos := viper.GetStringSlice("url") - helmRepos := nova_helm.NewRepos(repos) - outputObjects := h.GetHelmReleasesVersion(helmRepos, releases) - out.HelmReleases = append(out.HelmReleases, outputObjects...) - if err != nil { - klog.Exitf("Error getting helm releases from cluster: %v", err) - } + + output, err := handleHelm(kubeContext) + if err != nil { + klog.Exit(err) } outputFile := viper.GetString("output-file") if outputFile != "" { - err = out.ToFile(outputFile) + err = output.ToFile(outputFile) if err != nil { klog.Exitf("error outputting to file: %s", err) } } else { - out.Print(format, viper.GetBool("wide"), viper.GetBool("show-old")) + output.Print(format, viper.GetBool("wide"), viper.GetBool("show-old")) } }, } @@ -338,3 +293,93 @@ func Execute(VERSION, COMMIT string) { klog.Exit(err) } } + +func handleContainers(kubeContext string) (*output.ContainersOutput, error) { + // Set up a context we can use to cancel all operations to external container registries if we need to + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + signals := make(chan os.Signal, 1) + signal.Notify(signals, os.Interrupt, syscall.SIGTERM) + defer func() { + signal.Stop(signals) + cancel() + }() + go func() { + select { + case <-signals: + fmt.Print("\nCancelling operations to external container registries\n") + cancel() + case <-ctx.Done(): + } + }() + iClient := containers.NewClient(kubeContext) + containers, err := iClient.Find(ctx) + if err != nil { + return nil, fmt.Errorf("ERROR during images.Find() %w", err) + } + showNonSemver := viper.GetBool("show-non-semver") + showErrored := viper.GetBool("show-errored-containers") + includeAll := viper.GetBool("include-all") + return output.NewContainersOutput(containers.Images, containers.ErrImages, showNonSemver, showErrored, includeAll), nil +} + +func handleHelm(kubeContext string) (*output.Output, error) { + h := nova_helm.NewHelm(kubeContext) + if viper.IsSet("desired-versions") { + klog.V(3).Infof("desired-versions is set - attempting to load them") + klog.V(8).Infof("raw desired-versions: %v", viper.Get("desired-versions")) + + desiredVersion := viper.GetStringMapString("desired-versions") + for k, v := range desiredVersion { + klog.V(2).Infof("version override for %s: %s", k, v) + h.DesiredVersions = append(h.DesiredVersions, nova_helm.DesiredVersion{ + Name: k, + Version: v, + }) + } + } + releases, chartNames, err := h.GetReleaseOutput() + if err != nil { + return nil, fmt.Errorf("error getting helm releases: %s", err) + } + out := output.NewOutputWithHelmReleases(releases) + out.IncludeAll = viper.GetBool("include-all") + + if viper.GetBool("poll-artifacthub") { + ahClient, err := nova_helm.NewArtifactHubPackageClient(version) + if err != nil { + return nil, fmt.Errorf("error setting up artifact hub client: %s", err) + } + packageRepos, err := ahClient.MultiSearch(chartNames) + if err != nil { + return nil, fmt.Errorf("Error getting artifacthub package repos: %v", err) + } + packages := ahClient.GetPackages(packageRepos) + klog.V(2).Infof("found %d possible package matches", len(packages)) + for _, release := range releases { + o := nova_helm.FindBestArtifactHubMatch(release, packages) + if o != nil { + h.OverrideDesiredVersion(o) + out.HelmReleases = append(out.HelmReleases, *o) + } + } + } + if len(viper.GetStringSlice("url")) > 0 { + repos := viper.GetStringSlice("url") + helmRepos := nova_helm.NewRepos(repos) + outputObjects := h.GetHelmReleasesVersion(helmRepos, releases) + out.HelmReleases = append(out.HelmReleases, outputObjects...) + } + return &out, nil +} + +func handleHelmAndContainers(kubeContext string) (*output.HelmAndContainersOutput, error) { + helmOutput, err := handleHelm(kubeContext) + if err != nil { + return nil, err + } + containersOutput, err := handleContainers(kubeContext) + if err != nil { + return nil, err + } + return output.NewHelmAndContainersOutput(*helmOutput, *containersOutput), nil +} diff --git a/docs/usage.md b/docs/usage.md index 9535579..ecd9a87 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -12,7 +12,8 @@ nova find --wide ## Options ``` Flags: - --containers Show old container image versions instead of helm chart versions. There will be no helm output if this flag is set. + --containers Show old container image versions. There will be no helm output unless the --helm flag is set as well + --helm Show old helm releases. You can combine this flag with `--containers` to have both output in a single run. -h, --help help for find --show-non-semver When finding container images, show all containers even if they don't follow semver. @@ -134,3 +135,72 @@ Container Name Error ============== ===== examplething.com/testing:v1.0.0 Get "https://examplething.com/v2/": dial tcp: lookup examplethingert.com: no such host ===== ``` + +## Helm Releases and Container Images combined output +If you want to run nova and both helm releases and containers images results in a single run + +Below is sample output for Nova when using the `--helm --containers` flag + +``` +$ nova --format=table find --helm --containers +Release Name Installed Latest Old Deprecated +============ ========= ====== === ========== +cert-manager v1.9.1 1.9.1 false false +insights-agent 2.0.7 2.6.8 true false + +Container Name Current Version Old Latest Latest Minor Latest Patch +============== =============== === ====== ============= ============= +k8s.gcr.io/coredns/coredns v1.8.4 true v1.9.3 v1.9.3 v1.8.6 +k8s.gcr.io/etcd 3.5.0-0 true 3.5.4-0 3.5.0-0 3.5.0-0 +k8s.gcr.io/kube-apiserver v1.22.9 true v1.25.0 v1.25.0 v1.22.13 +k8s.gcr.io/kube-controller-manager v1.22.9 true v1.25.0 v1.25.0 v1.22.13 +k8s.gcr.io/kube-proxy v1.22.9 true v1.25.0 v1.25.0 v1.22.13 +k8s.gcr.io/kube-scheduler v1.22.9 true v1.25.0 v1.25.0 v1.22.13 +``` + +You can print the output in `json` format + +``` +$ nova --format=json find --helm --containers | jq +{ + "helm": [ + { + "release": "cert-manager", + "chartName": "cert-manager", + "namespace": "cert-manager", + "description": "A Helm chart for cert-manager", + "home": "https://github.com/cert-manager/cert-manager", + "icon": "https://raw.githubusercontent.com/cert-manager/cert-manager/d53c0b9270f8cd90d908460d69502694e1838f5f/logo/logo-small.png", + "Installed": { "version": "v1.9.1", "appVersion": "v1.9.1" }, + "Latest": { "version": "1.9.1", "appVersion": "v1.9.1" }, + "outdated": false, + "deprecated": false, + "helmVersion": "3", + "overridden": false + } + ], + "include_all": false, + "container": { + "container_images": [ + { + "name": "k8s.gcr.io/kube-scheduler", + "current_version": "v1.22.9", + "latest_version": "v1.25.0", + "latest_minor_version": "v1.25.0", + "latest_patch_version": "v1.22.13", + "outdated": true, + "affectedWorkloads": [ + { + "name": "kube-scheduler-kind-control-plane", + "namespace": "kube-system", + "kind": "Pod", + "container": "kube-scheduler" + } + ] + } + ], + "err_images": null, + "latest_string_found": false + } +} +``` \ No newline at end of file diff --git a/go.mod b/go.mod index 10044c9..ad4fa4b 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.17 require ( github.com/Masterminds/semver/v3 v3.1.1 + github.com/fairwindsops/controller-utils v0.1.2 github.com/google/go-containerregistry v0.10.0 github.com/mcuadros/go-version v0.0.0-20190830083331-035f6764e8d2 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index d581026..cf30205 100644 --- a/go.sum +++ b/go.sum @@ -462,6 +462,8 @@ github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQL github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/fairwindsops/controller-utils v0.1.2 h1:QskWhyiZAYtp4DfL8ZWrUubIQoJ6Ci7gm0hVje7DoGM= +github.com/fairwindsops/controller-utils v0.1.2/go.mod h1:4vc5Tpnak9VIE75sFg8J3iz28BGiP+omPv9QaSngzuk= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= @@ -1806,6 +1808,7 @@ golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2200,6 +2203,7 @@ k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= +k8s.io/api v0.23.4/go.mod h1:i77F4JfyNNrhOjZF7OwwNJS5Y1S9dpwvb9iYRYRczfI= k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= k8s.io/api v0.24.4 h1:I5Y645gJ8zWKawyr78lVfDQkZrAViSbeRXsPZWTxmXk= k8s.io/api v0.24.4/go.mod h1:42pVfA0NRxrtJhZQOvRSyZcJihzAdU59WBtTjYcB0/M= @@ -2210,6 +2214,7 @@ k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRp k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U= +k8s.io/apimachinery v0.23.4/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= k8s.io/apimachinery v0.24.4 h1:S0Ur3J/PbivTcL43EdSdPhqCqKla2NIuneNwZcTDeGQ= k8s.io/apimachinery v0.24.4/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= @@ -2223,6 +2228,7 @@ k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= +k8s.io/client-go v0.23.4/go.mod h1:PKnIL4pqLuvYUK1WU7RLTMYKPiIh7MYShLshtRY9cj0= k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA= k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30= k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= @@ -2248,6 +2254,7 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= @@ -2255,6 +2262,7 @@ k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= k8s.io/kube-openapi v0.0.0-20220627174259-011e075b9cb8 h1:yEQKdMCjzAOvGeiTwG4hO/hNVNtDOuUFvMUZ0OlaIzs= k8s.io/kube-openapi v0.0.0-20220627174259-011e075b9cb8/go.mod h1:mbJ+NSUoAhuR14N0S63bPkh8MGVSo3VYSGZtH/mfMe0= @@ -2265,6 +2273,8 @@ k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20211208161948-7d6a63dca704/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= @@ -2281,6 +2291,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyz sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= sigs.k8s.io/controller-runtime v0.12.2 h1:nqV02cvhbAj7tbt21bpPpTByrXGn2INHRsi39lXy9sE= sigs.k8s.io/controller-runtime v0.12.2/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0= +sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124 h1:2sgAQQcY0dEW2SsQwTXhQV4vO6+rSslYx8K3XmM5hqQ= sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= diff --git a/main.go b/main.go index 595983a..3b012b9 100644 --- a/main.go +++ b/main.go @@ -21,7 +21,7 @@ import ( var ( // version is set during build version = "development" - // comit is set during build + // commit is set during build commit = "n/a" ) diff --git a/pkg/containers/images.go b/pkg/containers/images.go index 49620d1..035f66a 100644 --- a/pkg/containers/images.go +++ b/pkg/containers/images.go @@ -16,6 +16,7 @@ package containers import ( "context" + "encoding/json" "fmt" "regexp" "sort" @@ -23,12 +24,16 @@ import ( "sync" version "github.com/Masterminds/semver/v3" + "github.com/fairwindsops/controller-utils/pkg/controller" "github.com/fairwindsops/nova/pkg/kube" "github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/name" "github.com/google/go-containerregistry/pkg/v1/remote" "github.com/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/dynamic" "k8s.io/klog/v2" ) @@ -64,6 +69,15 @@ type Image struct { nonSemverTags []string repo name.Repository allTags []string + WorkLoads []Workload +} + +// Workload contains all the relevant data for the container workload +type Workload struct { + Name string + Namespace string + Kind string + Container string } // PodData represents a pod and it's images so that we can report the namespace and other information later @@ -88,33 +102,30 @@ func NewClient(kubeContext string) *Client { } // Find is the primary function for this package that returns the results of images found in the cluster and whether they are out of date or not -func (c *Client) Find(ctx context.Context) (Results, error) { - clusterImages, err := c.getContainerImages() +func (c *Client) Find(ctx context.Context) (*Results, error) { + clusterImages, err := c.getContainerImages(controller.GetAllTopControllers) if err != nil { - return Results{}, err + return nil, err } if len(clusterImages) == 0 { - return Results{}, fmt.Errorf("no container images found in cluster") + return nil, fmt.Errorf("no container images found in cluster") } - images := make([]*Image, len(clusterImages)) + images := make([]*Image, 0) errored := make([]*ErroredImage, 0) - wg := new(sync.WaitGroup) - for i, fullName := range clusterImages { - i, fullName := i, fullName - image, err := newImage(fullName) + for fullName, workloads := range clusterImages { + image, err := newImage(fullName, workloads) if err != nil { errored = append(errored, &ErroredImage{ Image: fullName, Err: err.Error(), }) - images[i] = nil continue } klog.V(8).Infof("Getting tags for %s", image.Name) wg.Add(1) - go func() { + go func(fullName string) { defer wg.Done() err := image.getTags(ctx) if err != nil { @@ -124,11 +135,11 @@ func (c *Client) Find(ctx context.Context) (Results, error) { }) return } - images[i] = image + images = append(images, image) klog.V(8).Infof("Done grabbing tags for %s", image.Name) - }() + }(fullName) } - klog.V(5).Infof("Waiting for all tag reciever goroutines to finish") + klog.V(5).Infof("Waiting for all tag receiver goroutines to finish") wg.Wait() for _, image := range images { if image == nil { @@ -137,42 +148,68 @@ func (c *Client) Find(ctx context.Context) (Results, error) { image.parseTags() err := image.populateNewest() if err != nil { - return Results{}, err + return nil, err } } - return Results{ + return &Results{ Images: images, ErrImages: errored, }, nil } -// getContainerImages fetches all pods and returns a slice of container images -func (c *Client) getContainerImages() ([]string, error) { - klog.V(3).Infof("Getting container images from pods") +// topControllerGetter was extract out to facilitate mocking controller.GetAllTopControllers function for testing +type topControllerGetter = func(ctx context.Context, dynamicClient dynamic.Interface, restMapper meta.RESTMapper, namespace string) ([]controller.Workload, error) - k := c.Kube.Client - pods, err := k.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{}) +// getContainerImages fetches all pods and returns a slice of container images +func (c *Client) getContainerImages(topControllerGetter topControllerGetter) (map[string][]Workload, error) { + klog.V(3).Infof("Getting all top controllers from cluster") + topControllers, err := topControllerGetter(context.TODO(), c.Kube.DynamicClient, c.Kube.RESTMapper, "") if err != nil { - return nil, errors.Wrap(err, "getting all pods") + return nil, err } - - imagesFound := make([]string, 0) - for _, pod := range pods.Items { - if len(pod.Spec.InitContainers) > 0 { - for _, container := range pod.Spec.InitContainers { - if container.Image != "" { - imagesFound = append(imagesFound, container.Image) + images := make(map[string][]Workload, 0) + for _, w := range topControllers { + if len(w.Pods) > 0 { + unstructuredPod := w.Pods[0] // just need to check the first pod (to avoid workload duplication) + pod, err := toV1Pod(unstructuredPod) + if err != nil { + return nil, fmt.Errorf("unable to parse Pod from unstructured object: %w", err) + } + if len(pod.Spec.InitContainers) > 0 { + for _, container := range pod.Spec.InitContainers { + if container.Image != "" { + images[container.Image] = append(images[container.Image], Workload{ + Name: w.TopController.GetName(), + Namespace: w.TopController.GetNamespace(), + Kind: w.TopController.GetKind(), + Container: container.Name, + }) + } } } - } - for _, container := range pod.Spec.Containers { - if container.Image != "" { - imagesFound = append(imagesFound, container.Image) + for _, container := range pod.Spec.Containers { + if container.Image != "" { + images[container.Image] = append(images[container.Image], Workload{ + Name: w.TopController.GetName(), + Namespace: w.TopController.GetNamespace(), + Kind: w.TopController.GetKind(), + Container: container.Name, + }) + } } } } - imagesFound = removeDuplicateStr(imagesFound) - return imagesFound, nil + return images, nil +} + +func toV1Pod(possiblePod unstructured.Unstructured) (*v1.Pod, error) { + b, err := possiblePod.MarshalJSON() + if err != nil { + return nil, err + } + var pod v1.Pod + err = json.Unmarshal(b, &pod) + return &pod, err } func removeDuplicateStr(strSlice []string) []string { @@ -187,7 +224,7 @@ func removeDuplicateStr(strSlice []string) []string { return list } -func newImage(fullImageTag string) (*Image, error) { +func newImage(fullImageTag string, workloads []Workload) (*Image, error) { klog.V(8).Infof("Creating image object for %s", fullImageTag) var ( @@ -224,7 +261,7 @@ func newImage(fullImageTag string) (*Image, error) { if err != nil { return nil, err } - + image.WorkLoads = workloads return image, nil } diff --git a/pkg/containers/images_test.go b/pkg/containers/images_test.go index 21e62ac..a7bede1 100644 --- a/pkg/containers/images_test.go +++ b/pkg/containers/images_test.go @@ -16,14 +16,18 @@ package containers import ( "context" + "encoding/json" "reflect" "testing" version "github.com/Masterminds/semver/v3" + "github.com/fairwindsops/controller-utils/pkg/controller" "github.com/fairwindsops/nova/pkg/kube" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/fake" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/dynamic" ) const ( @@ -92,24 +96,42 @@ var ( ) func TestGetContainerImages(t *testing.T) { - setupKubeObjects(t, testClient) - defer teardownKubeObjects(t, testClient) + b, err := json.Marshal(testPodSpec) + if err != nil { + t.Error(err) + } + var obj map[string]interface{} + err = json.Unmarshal(b, &obj) + if err != nil { + t.Error(err) + } + fakeTopControllerGetter := func(ctx context.Context, dynamicClient dynamic.Interface, restMapper meta.RESTMapper, namespace string) ([]controller.Workload, error) { + return []controller.Workload{ + { + TopController: unstructured.Unstructured{Object: map[string]interface{}{"kind": "Deployment", "metadata": map[string]interface{}{"name": "name", "namespace": "my-namespace"}}}, + Pods: []unstructured.Unstructured{{Object: obj}}, + }, + }, nil + } tests := []struct { name string - want []string + want map[string][]Workload wantErr bool }{ { - name: "TestGetContainerImages", - want: []string{testInitContainerImage, testContainerImage}, + name: "TestGetContainerImages", + want: map[string][]Workload{ + "test-image:v1.0.0": {{Name: "name", Namespace: "my-namespace", Kind: "Deployment", Container: "test-container"}}, + "test-init-container-image:v1.0.0": {{Name: "name", Namespace: "my-namespace", Kind: "Deployment", Container: "test-init-container"}}, + }, wantErr: bool(false), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := testClient.getContainerImages() + got, err := testClient.getContainerImages(fakeTopControllerGetter) if (err != nil) != tt.wantErr { t.Errorf("getContainerImages() error = %v, wantErr %v", err, tt.wantErr) return @@ -148,6 +170,7 @@ func TestNewImage(t *testing.T) { tests := []struct { name string fullImageTag string + workloads []Workload want *Image wantErr bool }{ @@ -164,10 +187,35 @@ func TestNewImage(t *testing.T) { }, wantErr: false, }, + { + name: "TestNewImageWithWorkloads_Good", + fullImageTag: testContainerImage, + workloads: []Workload{{ + Name: "coredns", + Namespace: "kube-system", + Kind: "Deployment", + Container: "coredns", + }}, + want: &Image{ + Name: "test-image", + Prefix: "v", + Current: &Tag{ + Value: "1.0.0", + }, + WorkLoads: []Workload{{ + Name: "coredns", + Namespace: "kube-system", + Kind: "Deployment", + Container: "coredns", + }}, + StrictSemver: true, + }, + wantErr: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := newImage(tt.fullImageTag) + got, err := newImage(tt.fullImageTag, tt.workloads) if (err != nil) != tt.wantErr { t.Errorf("NewImage() error = %v, wantErr %v", err, tt.wantErr) return @@ -334,16 +382,14 @@ func TestPreReleaseRegex(t *testing.T) { } func setupKubeObjects(t *testing.T, c *Client) { - k := c.Kube.Client.(*fake.Clientset) - _, err := k.CoreV1().Pods(testNamespace).Create(context.TODO(), testPodSpec, metav1.CreateOptions{}) + _, err := c.Kube.Client.CoreV1().Pods(testNamespace).Create(context.TODO(), testPodSpec, metav1.CreateOptions{}) if err != nil { t.Errorf("Error creating pod: %v", err) } } func teardownKubeObjects(t *testing.T, c *Client) { - k := c.Kube.Client.(*fake.Clientset) - err := k.CoreV1().Pods(testNamespace).Delete(context.TODO(), testPodName, metav1.DeleteOptions{}) + err := c.Kube.Client.CoreV1().Pods(testNamespace).Delete(context.TODO(), testPodName, metav1.DeleteOptions{}) if err != nil { t.Errorf("Error deleting pod: %v", err) } diff --git a/pkg/kube/kube.go b/pkg/kube/kube.go index ce3f199..30d7e24 100644 --- a/pkg/kube/kube.go +++ b/pkg/kube/kube.go @@ -15,21 +15,27 @@ package kube import ( - "os" "sync" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/dynamic" + dfake "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" "k8s.io/klog/v2" // add all known auth providers _ "k8s.io/client-go/plugin/pkg/client/auth" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/client/config" ) // Connection holds a kubernetes.interface as the Client parameter type Connection struct { - Client kubernetes.Interface + Client kubernetes.Interface + DynamicClient dynamic.Interface + RESTMapper meta.RESTMapper } var ( @@ -40,36 +46,59 @@ var ( // GetConfigInstance returns a Kubernetes interface based on the current configuration func GetConfigInstance(context string) *Connection { once.Do(func() { - if kubeClient == nil { - kubeClient = &Connection{ - Client: getKubeClient(context), - } + kubeClient = &Connection{ + Client: getKubeClient(context), + DynamicClient: getDynamicKubeClient(context), + RESTMapper: getRESTMapper(context), } }) return kubeClient } func getKubeClient(context string) kubernetes.Interface { - var clientset *kubernetes.Clientset - kubeConf, err := config.GetConfigWithContext(context) if err != nil { - klog.Errorf("error getting config with context %s: %v", context, err) - os.Exit(1) + klog.Fatalf("error getting config with context %s: %v", context, err) } - clientset, err = kubernetes.NewForConfig(kubeConf) + clientset, err := kubernetes.NewForConfig(kubeConf) if err != nil { - klog.Errorf("error create kubernetes client: %v", err) - os.Exit(1) + klog.Fatalf("error create kubernetes client: %v", err) } return clientset } +func getDynamicKubeClient(context string) dynamic.Interface { + kubeConf, err := config.GetConfigWithContext(context) + if err != nil { + klog.Fatalf("error getting config with context %s: %v", context, err) + } + dynamicClient, err := dynamic.NewForConfig(kubeConf) + if err != nil { + klog.Fatalf("error create dynamic kubernetes client: %v", err) + } + return dynamicClient +} + +func getRESTMapper(context string) meta.RESTMapper { + kubeConf, err := config.GetConfigWithContext(context) + if err != nil { + klog.Fatalf("error getting config with context %s: %v", context, err) + } + + restMapper, err := apiutil.NewDynamicRESTMapper(kubeConf) + if err != nil { + klog.Fatalf("Error creating REST Mapper: %v", err) + } + return restMapper +} + // SetAndGetMock sets the singleton's interface to use a fake ClientSet func SetAndGetMock() *Connection { kc := Connection{ - Client: fake.NewSimpleClientset(), + Client: fake.NewSimpleClientset(), + DynamicClient: dfake.NewSimpleDynamicClient(runtime.NewScheme()), + RESTMapper: &meta.DefaultRESTMapper{}, } SetInstance(kc) return &kc diff --git a/pkg/output/output.go b/pkg/output/output.go index 4eb9e90..04097df 100644 --- a/pkg/output/output.go +++ b/pkg/output/output.go @@ -52,6 +52,12 @@ type ContainersOutput struct { LatestStringFound bool `json:"latest_string_found"` } +// HelmAndContainersOutput represents the output data we need for displaying a table of out of date container images +type HelmAndContainersOutput struct { + Helm Output + Container ContainersOutput +} + // ReleaseOutput represents a release type ReleaseOutput struct { ReleaseName string `json:"release"` @@ -68,14 +74,23 @@ type ReleaseOutput struct { Overridden bool `json:"overridden"` } +// WorkloadOutput represents a workload +type WorkloadOutput struct { + Name string `json:"name"` + Namespace string `json:"namespace"` + Kind string `json:"kind"` + Container string `json:"container"` +} + // ContainerOutput represents all the data we need for a single container image type ContainerOutput struct { - Name string `json:"name"` - CurrentVersion string `json:"current_version"` - LatestVersion string `json:"latest_version"` - LatestMinorVersion string `json:"latest_minor_version"` - LatestPatchVersion string `json:"latest_patch_version"` - IsOld bool `json:"outdated"` + Name string `json:"name"` + CurrentVersion string `json:"current_version"` + LatestVersion string `json:"latest_version"` + LatestMinorVersion string `json:"latest_minor_version"` + LatestPatchVersion string `json:"latest_patch_version"` + IsOld bool `json:"outdated"` + AffectedWorkloads []WorkloadOutput `json:"affectedWorkloads"` } // VersionInfo contains both a chart version and an app version @@ -96,6 +111,7 @@ func NewOutputWithHelmReleases(helmReleases []*release.Release) Output { release.Home = helmRelease.Chart.Metadata.Home release.Icon = helmRelease.Chart.Metadata.Icon release.Installed = VersionInfo{helmRelease.Chart.Metadata.Version, helmRelease.Chart.Metadata.AppVersion} + release.HelmVersion = "3" output.HelmReleases = append(output.HelmReleases, release) } return output @@ -134,9 +150,7 @@ func (output Output) ToFile(filename string) error { } w.WriteAll(data) default: - err := errors.New("File format is not supported. The supported file format are json and csv only") - return err - + return errors.New("File format is not supported. The supported file format are json and csv only") } return nil } @@ -190,7 +204,7 @@ func (output Output) Print(format string, wide, showOld bool) { } // dedupe will remove duplicate releases from the output if both artifacthub and a custom URL to a helm repository find matches. -// this will always overrite any found by artifacthub with the version from a custom helm repo url because those are found last and +// this will always override any found by artifacthub with the version from a custom helm repo url because those are found last and // will therefore always be at the end of the output.HelmReleases array. func (output *Output) dedupe() { var unique []ReleaseOutput @@ -210,7 +224,7 @@ func (output *Output) dedupe() { } // NewContainersOutput creates a new ContainersOutput object ready to be printed -func NewContainersOutput(containers []*containers.Image, errImages []*containers.ErroredImage, showNonSemver, showErrored, includeAll bool) ContainersOutput { +func NewContainersOutput(containers []*containers.Image, errImages []*containers.ErroredImage, showNonSemver, showErrored, includeAll bool) *ContainersOutput { var output ContainersOutput output.IncludeAll = includeAll for _, container := range containers { @@ -241,12 +255,22 @@ func NewContainersOutput(containers []*containers.Image, errImages []*containers if containerOutput.CurrentVersion == "latest" { output.LatestStringFound = true } + var affectedWorkloads = make([]WorkloadOutput, len(container.WorkLoads)) + for i, w := range container.WorkLoads { + affectedWorkloads[i] = WorkloadOutput{ + Name: w.Name, + Namespace: w.Namespace, + Kind: w.Kind, + Container: w.Container, + } + } + containerOutput.AffectedWorkloads = affectedWorkloads output.ContainerImages = append(output.ContainerImages, containerOutput) } if showErrored { output.ErrImages = errImages } - return output + return &output } // Print prints the ContainersOutput to STDOUT @@ -303,3 +327,82 @@ func (output ContainersOutput) Print(format string) { klog.Errorf("Output format is not supported. The supported formats are json and table only") } } + +// CombinedOutputFormat has both helm releases and containers info in a backwards compatible way +type CombinedOutputFormat struct { + Helm []ReleaseOutput `json:"helm"` + IncludeAll bool `json:"include_all"` + Container struct { + ContainerImages []ContainerOutput `json:"container_images"` + ErrImages []*containers.ErroredImage `json:"err_images"` + LatestStringFound bool `json:"latest_string_found"` + } `json:"container"` +} + +// NewHelmAndContainersOutput creates a new HelmAndContainersOutput object ready to be printed +func NewHelmAndContainersOutput(helm Output, container ContainersOutput) *HelmAndContainersOutput { + return &HelmAndContainersOutput{ + Helm: helm, + Container: container, + } +} + +// Print prints the HelmAndContainersOutput to STDOUT +func (output HelmAndContainersOutput) Print(format string, wide, showOld bool) { + switch format { + case TableFormat: + output.Helm.Print(format, wide, showOld) + fmt.Println("") + output.Container.Print(format) + case JSONFormat: + outputFormat := CombinedOutputFormat{ + Helm: output.Helm.HelmReleases, + Container: struct { + ContainerImages []ContainerOutput `json:"container_images"` + ErrImages []*containers.ErroredImage `json:"err_images"` + LatestStringFound bool `json:"latest_string_found"` + }{ + ContainerImages: output.Container.ContainerImages, + ErrImages: output.Container.ErrImages, + LatestStringFound: output.Container.LatestStringFound, + }, + IncludeAll: output.Helm.IncludeAll, + } + data, _ := json.Marshal(outputFormat) + fmt.Fprintln(os.Stdout, string(data)) + } +} + +// ToFile writes the output to a file +func (output HelmAndContainersOutput) ToFile(filename string) error { + output.Helm.dedupe() + extension := path.Ext(filename) + switch extension { + case ".json": + outputFormat := CombinedOutputFormat{ + Helm: output.Helm.HelmReleases, + Container: struct { + ContainerImages []ContainerOutput `json:"container_images"` + ErrImages []*containers.ErroredImage `json:"err_images"` + LatestStringFound bool `json:"latest_string_found"` + }{ + ContainerImages: output.Container.ContainerImages, + ErrImages: output.Container.ErrImages, + LatestStringFound: output.Container.LatestStringFound, + }, + IncludeAll: output.Helm.IncludeAll, + } + data, err := json.Marshal(outputFormat) + if err != nil { + klog.Errorf("Error marshaling json: %v", err) + return err + } + err = ioutil.WriteFile(filename, data, 0644) + if err != nil { + klog.Errorf("Error writing to file %s: %v", filename, err) + } + default: + return errors.New("File format is not supported. The supported file format is json only") + } + return nil +}