-
Notifications
You must be signed in to change notification settings - Fork 166
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add cascadeDelete
option to the Image
and Build
objects
#1742
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -4,6 +4,7 @@ import ( | |
"context" | ||
"encoding/json" | ||
"fmt" | ||
"slices" | ||
|
||
"github.com/google/go-containerregistry/pkg/authn" | ||
ggcrv1 "github.com/google/go-containerregistry/pkg/v1" | ||
|
@@ -41,6 +42,7 @@ const ( | |
Kind = "Build" | ||
k8sOSLabel = "kubernetes.io/os" | ||
ReasonCompleted = "Completed" | ||
BuildFinalizer = "builds.kpack.io/finalizer" | ||
) | ||
|
||
//go:generate counterfeiter . MetadataRetriever | ||
|
@@ -69,6 +71,11 @@ type SecretFetcher interface { | |
SecretsForSystemServiceAccount(context.Context) ([]*corev1.Secret, error) | ||
} | ||
|
||
//go:generate counterfeiter . RegistryClient | ||
type RegistryClient interface { | ||
Delete(keychain authn.Keychain, repoName string) error | ||
} | ||
|
||
func NewController( | ||
ctx context.Context, opt reconciler.Options, k8sClient k8sclient.Interface, | ||
informer buildinformers.BuildInformer, podInformer corev1Informers.PodInformer, | ||
|
@@ -78,6 +85,7 @@ func NewController( | |
attester SLSAAttester, | ||
secretFetcher SecretFetcher, | ||
featureFlags config.FeatureFlags, | ||
registryClient RegistryClient, | ||
) *controller.Impl { | ||
c := &Reconciler{ | ||
Client: opt.Client, | ||
|
@@ -91,6 +99,7 @@ func NewController( | |
Attester: attester, | ||
SecretFetcher: secretFetcher, | ||
FeatureFlags: featureFlags, | ||
RegistryClient: registryClient, | ||
} | ||
|
||
logger := opt.Logger.With( | ||
|
@@ -121,6 +130,7 @@ type Reconciler struct { | |
Attester SLSAAttester | ||
SecretFetcher SecretFetcher | ||
FeatureFlags config.FeatureFlags | ||
RegistryClient RegistryClient | ||
} | ||
|
||
func (c *Reconciler) Reconcile(ctx context.Context, key string) error { | ||
|
@@ -136,6 +146,14 @@ func (c *Reconciler) Reconcile(ctx context.Context, key string) error { | |
return err | ||
} | ||
|
||
if !build.DeletionTimestamp.IsZero() { | ||
return c.finalize(ctx, build) | ||
} | ||
|
||
if err := c.setFinalizer(ctx, build); err != nil { | ||
return err | ||
} | ||
|
||
build = build.DeepCopy() | ||
build.SetDefaults(ctx) | ||
|
||
|
@@ -352,6 +370,64 @@ func (c *Reconciler) conditionForPod(pod *corev1.Pod, stepsCompleted []string) c | |
} | ||
} | ||
|
||
func (c *Reconciler) finalize(ctx context.Context, build *buildapi.Build) error { | ||
if !slices.Contains(build.GetFinalizers(), BuildFinalizer) { | ||
return nil | ||
} | ||
|
||
if build.Finished() { | ||
keychain, err := c.KeychainFactory.KeychainForSecretRef(ctx, registry.SecretRef{ | ||
ServiceAccount: build.Spec.ServiceAccountName, | ||
Namespace: build.Namespace, | ||
}) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
if err := c.RegistryClient.Delete(keychain, build.Status.LatestImage); err != nil { | ||
//logger.Printf(errors.Wrapf(err, "Could not delete image %q", build.Status.LatestImage)) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. How should we handle the error in that case? It seems like error logging is not supposed to happen at the reconciliation stage. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. You can log and it will print to the controller logs |
||
} | ||
} | ||
|
||
mergePatch := map[string]interface{}{ | ||
"metadata": map[string]interface{}{ | ||
"finalizers": slices.DeleteFunc(build.GetFinalizers(), func(f string) bool { | ||
return f == BuildFinalizer | ||
}), | ||
"resourceVersion": build.ResourceVersion, | ||
}, | ||
} | ||
|
||
patch, err := json.Marshal(mergePatch) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
_, err = c.Client.KpackV1alpha2().Builds(build.Namespace).Patch(ctx, build.Name, types.MergePatchType, patch, metav1.PatchOptions{}) | ||
return err | ||
} | ||
|
||
func (c *Reconciler) setFinalizer(ctx context.Context, build *buildapi.Build) error { | ||
if slices.Contains(build.GetFinalizers(), BuildFinalizer) || !build.CascadeDeleteImage() { | ||
return nil | ||
} | ||
|
||
mergePatch := map[string]interface{}{ | ||
"metadata": map[string]interface{}{ | ||
"finalizers": append(build.GetFinalizers(), BuildFinalizer), | ||
"resourceVersion": build.ResourceVersion, | ||
}, | ||
} | ||
|
||
patch, err := json.Marshal(mergePatch) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
_, err = c.Client.KpackV1alpha2().Builds(build.Namespace).Patch(ctx, build.Name, types.MergePatchType, patch, metav1.PatchOptions{}) | ||
return err | ||
} | ||
|
||
func stepStates(pod *corev1.Pod) []corev1.ContainerState { | ||
states := make([]corev1.ContainerState, 0, len(buildapi.BuildSteps())) | ||
for _, s := range append(pod.Status.InitContainerStatuses, pod.Status.ContainerStatuses...) { | ||
|
@@ -442,17 +518,17 @@ func (c *Reconciler) attestBuild(ctx context.Context, build *buildapi.Build, bui | |
signers[i] = s | ||
} | ||
|
||
buildId := slsa.UnsignedBuildID | ||
buildID := slsa.UnsignedBuildID | ||
if len(signers) > 0 { | ||
buildId = slsa.SignedBuildID | ||
buildID = slsa.SignedBuildID | ||
} | ||
|
||
deps, err := c.attestBuildDeps(ctx, build, pod, secrets) | ||
if err != nil { | ||
return "", fmt.Errorf("failed to gather build deps: %v", err) | ||
} | ||
|
||
statement, err := c.Attester.AttestBuild(build, buildMetadata, pod, keychain, buildId, deps...) | ||
statement, err := c.Attester.AttestBuild(build, buildMetadata, pod, keychain, buildID, deps...) | ||
if err != nil { | ||
return "", fmt.Errorf("failed to generate statement: %v", err) | ||
} | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We would also need to delete all of the additional tags as well.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I was under the assumption that a
Build
object produces exactly one image, whileImage
can produce multipleBuild
's. If eachBuild
cleans up it's own image, would there be any leftovers? Or the assumption is incorrect?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
A build can push multiple images (see additionalTags). There is also the signed images and image attestations but that might be too much to try to delete and we probably don’t want to delete the attestations
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
According to docs, the
status.latestImage
field contain the image's digest. It should be enough to delete just this (no necessity to delete all tags individually): https://github.com/opencontainers/distribution-spec/blob/main/spec.md#deleting-manifests.