Skip to content
This repository has been archived by the owner on Apr 4, 2024. It is now read-only.

Commit

Permalink
update knative serving to 0.39.3, removing patch (#211)
Browse files Browse the repository at this point in the history
update knative serving to 0.39.3

Signed-off-by: Kenny Leung <[email protected]>
  • Loading branch information
k4leung4 authored Dec 19, 2023
1 parent ed7c931 commit 19c9049
Show file tree
Hide file tree
Showing 7 changed files with 18 additions and 26 deletions.
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ require (
knative.dev/net-istio v0.39.0
knative.dev/networking v0.0.0-20231017124814-2a7676e912b7
knative.dev/pkg v0.0.0-20231023151236-29775d7c9e5c
knative.dev/serving v0.39.0
knative.dev/serving v0.39.3

)

Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -877,8 +877,8 @@ knative.dev/networking v0.0.0-20231017124814-2a7676e912b7 h1:6+1icZuxiZO1paFZ4d/
knative.dev/networking v0.0.0-20231017124814-2a7676e912b7/go.mod h1:1gcHoIVG47ekQWjkddqRq+/7tWRh+CB9W4k/NAcdRbk=
knative.dev/pkg v0.0.0-20231023151236-29775d7c9e5c h1:xyPoEToTWeBdn6tinhLxXfnhJhTNQt5WzHiTNiFphRw=
knative.dev/pkg v0.0.0-20231023151236-29775d7c9e5c/go.mod h1:HHRXEd7ZlFpthgE+rwAZ6MUVnuJOAeolnaFSthXloUQ=
knative.dev/serving v0.39.0 h1:NVt8WthHmFFMWZ3qpBblXt47del8qqrbCegqwGBVSwk=
knative.dev/serving v0.39.0/go.mod h1:0QIp5mvgWa1oUC2MxMf+Q/JWgG8JhAsSdJKc6iTRlvE=
knative.dev/serving v0.39.3 h1:x3p3iCY0eKwKZmlXUZfc9C0YawyiB6Kc1HlE66b530I=
knative.dev/serving v0.39.3/go.mod h1:bWylSgwnRZeL659qy7m3/TZioYk25TIfusPUEeR695A=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
Expand Down
21 changes: 0 additions & 21 deletions hack/patches/14592.patch

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package v1
import (
"time"

appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
net "knative.dev/networking/pkg/apis/networking"
"knative.dev/pkg/kmeta"
Expand Down Expand Up @@ -143,3 +144,9 @@ func (rs *RevisionStatus) IsActivationRequired() bool {
c := revisionCondSet.Manage(rs).GetCondition(RevisionConditionActive)
return c != nil && c.Status != corev1.ConditionTrue
}

// IsReplicaSetFailure returns true if the deployment replicaset failed to create
func (rs *RevisionStatus) IsReplicaSetFailure(deploymentStatus *appsv1.DeploymentStatus) bool {
ds := serving.TransformDeploymentStatus(deploymentStatus)
return ds != nil && ds.GetCondition(serving.DeploymentConditionReplicaSetReady).IsFalse()
}
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ const (
ReasonProgressDeadlineExceeded = "ProgressDeadlineExceeded"
)

// RevisionConditionActive is not part of the RevisionConditionSet because we can have Inactive Ready Revisions (scale to zero)
var revisionCondSet = apis.NewLivingConditionSet(
RevisionConditionResourcesAvailable,
RevisionConditionContainerHealthy,
Expand Down Expand Up @@ -171,7 +172,6 @@ func (rs *RevisionStatus) PropagateDeploymentStatus(original *appsv1.DeploymentS
func (rs *RevisionStatus) PropagateAutoscalerStatus(ps *autoscalingv1alpha1.PodAutoscalerStatus) {
// Reflect the PA status in our own.
cond := ps.GetCondition(autoscalingv1alpha1.PodAutoscalerConditionReady)

rs.ActualReplicas = nil
if ps.ActualScale != nil && *ps.ActualScale >= 0 {
rs.ActualReplicas = ps.ActualScale
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,12 @@ func (c *Reconciler) reconcileDeployment(ctx context.Context, rev *v1.Revision)
}
}

// If the replicaset is failing we assume its an error we have to surface
if rev.Status.IsReplicaSetFailure(&deployment.Status) {
rev.Status.PropagateDeploymentStatus(&deployment.Status)
return nil
}

// If a container keeps crashing (no active pods in the deployment although we want some)
if *deployment.Spec.Replicas > 0 && deployment.Status.AvailableReplicas == 0 {
pods, err := c.kubeclient.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: metav1.FormatLabelSelector(deployment.Spec.Selector)})
Expand Down
2 changes: 1 addition & 1 deletion vendor/modules.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1335,7 +1335,7 @@ knative.dev/pkg/webhook/resourcesemantics/defaulting
knative.dev/pkg/webhook/resourcesemantics/validation
knative.dev/pkg/webhook/testing
knative.dev/pkg/websocket
# knative.dev/serving v0.39.0
# knative.dev/serving v0.39.3
## explicit; go 1.18
knative.dev/serving/cmd/activator
knative.dev/serving/cmd/autoscaler
Expand Down

0 comments on commit 19c9049

Please sign in to comment.