From 074f5bc6ff140de0b0ed7720bb031ae325f3db22 Mon Sep 17 00:00:00 2001 From: Anisur Rahman Date: Wed, 7 Feb 2024 12:45:24 +0600 Subject: [PATCH] Update deps Signed-off-by: Anisur Rahman --- Makefile | 4 +- go.mod | 2 +- go.sum | 4 +- pkg/util.go | 10 +- .../client-go/core/v1/pod_status.go | 126 ++++++++++++++++++ vendor/modules.txt | 2 +- 6 files changed, 137 insertions(+), 11 deletions(-) diff --git a/Makefile b/Makefile index dee5efa1..60edab5f 100644 --- a/Makefile +++ b/Makefile @@ -68,8 +68,8 @@ TAG := $(VERSION)_$(OS)_$(ARCH) TAG_PROD := $(TAG) TAG_DBG := $(VERSION)-dbg_$(OS)_$(ARCH) -GO_VERSION ?= 1.20 -BUILD_IMAGE ?= appscode/golang-dev:$(GO_VERSION) +GO_VERSION ?= 1.21 +BUILD_IMAGE ?= ghcr.io/appscode/golang-dev:$(GO_VERSION) OUTBIN = bin/$(OS)_$(ARCH)/$(BIN) ifeq ($(OS),windows) diff --git a/go.mod b/go.mod index de7d9951..37e5f548 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( k8s.io/apimachinery v0.29.0 k8s.io/client-go v0.29.0 k8s.io/klog/v2 v2.110.1 - kmodules.xyz/client-go v0.29.3 + kmodules.xyz/client-go v0.29.4 kmodules.xyz/custom-resources v0.29.0 kmodules.xyz/offshoot-api v0.29.0 stash.appscode.dev/apimachinery v0.32.1-0.20240206075719-41610d0ce38f diff --git a/go.sum b/go.sum index df312722..69b53552 100644 --- a/go.sum +++ b/go.sum @@ -522,8 +522,8 @@ k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSn k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= kmodules.xyz/apiversion v0.2.0 h1:vAQYqZFm4xu4pbB1cAdHbFEPES6EQkcR4wc06xdTOWk= kmodules.xyz/apiversion v0.2.0/go.mod h1:oPX8g8LvlPdPX3Yc5YvCzJHQnw3YF/X4/jdW0b1am80= -kmodules.xyz/client-go v0.29.3 h1:vkQz4zaqWZ5wk+YQwl+C2LhzTXuJZcCUMvPpwdAzGTo= -kmodules.xyz/client-go v0.29.3/go.mod h1:xWlS/1zWkx1sIKCAkzULy9570mHZYi2exDECEoP1ek4= +kmodules.xyz/client-go v0.29.4 h1:WW4vlYtzLc9JXrJjcFuJO4DX/kIZ5ia7QtDyhNDUwfI= +kmodules.xyz/client-go v0.29.4/go.mod h1:xWlS/1zWkx1sIKCAkzULy9570mHZYi2exDECEoP1ek4= kmodules.xyz/custom-resources v0.29.0 h1:RaDM2+wSVXiwIvLqmkTVYpwoH83AC8wruXe2p2rOZNY= kmodules.xyz/custom-resources v0.29.0/go.mod h1:MzZyXtxdg1PDxGk3RTTO1Xv3KiVqZnIonSwmxVbagOY= kmodules.xyz/objectstore-api v0.29.1-0.20240205052451-a5cf0aa669f1 h1:k66vcGkx9SNka0tfmbeBiEgwj1E2+EKJHxnifOUsroA= diff --git a/pkg/util.go b/pkg/util.go index 873c573e..fc3ce076 100644 --- a/pkg/util.go +++ b/pkg/util.go @@ -142,7 +142,7 @@ func (opt *options) waitForDBReady(creds []string) error { args = append(args, "--endpoints", opt.etcd.endpoint, "endpoint", "health") - return wait.PollImmediate(time.Second*5, time.Second*time.Duration(opt.waitTimeout), func() (bool, error) { + return wait.PollUntilContextTimeout(context.TODO(), time.Second*5, time.Second*time.Duration(opt.waitTimeout), true, func(ctx context.Context) (bool, error) { err := sh.Command(EtcdBackupCMD, args).Run() if err != nil { return false, nil @@ -416,7 +416,7 @@ func (opt *options) execCommandOnPod(pod *corev1.Pod, containerName string, comm return nil, fmt.Errorf("failed to init executor: %v", err) } - err = exec.Stream(remotecommand.StreamOptions{ + err = exec.StreamWithContext(context.TODO(), remotecommand.StreamOptions{ Stdout: &execOut, Stderr: &execErr, Tty: true, @@ -430,7 +430,7 @@ func (opt *options) execCommandOnPod(pod *corev1.Pod, containerName string, comm } func waitUntilPodReady(c kubernetes.Interface, meta metav1.ObjectMeta) error { - return wait.PollImmediate(kutil.RetryInterval, 5*time.Minute, func() (bool, error) { + return wait.PollUntilContextTimeout(context.TODO(), kutil.RetryInterval, 5*time.Minute, true, func(ctx context.Context) (bool, error) { if obj, err := c.CoreV1().Pods(meta.Namespace).Get(context.TODO(), meta.Name, metav1.GetOptions{}); err == nil { return obj.Status.Phase == corev1.PodRunning, nil } @@ -439,7 +439,7 @@ func waitUntilPodReady(c kubernetes.Interface, meta metav1.ObjectMeta) error { } func (opt *options) waitUntilRestoreComplete(numberOfMembersInEtcdCluster int) error { - return wait.PollImmediate(1*time.Second, 2*time.Hour, func() (bool, error) { + return wait.PollUntilContextTimeout(context.TODO(), 1*time.Second, 2*time.Hour, true, func(ctx context.Context) (bool, error) { restoreSession, err := opt.stashClient.StashV1beta1().RestoreSessions(opt.namespace).Get(context.TODO(), opt.invokerName, metav1.GetOptions{}) if err != nil { return false, err @@ -465,7 +465,7 @@ func (opt *options) waitUntilRestoreComplete(numberOfMembersInEtcdCluster int) e func (opt *options) waitUntilScalingCompleted() error { switch opt.workloadKind { case apis.KindStatefulSet: - return wait.PollImmediate(kutil.RetryInterval, time.Second*time.Duration(opt.waitTimeout), func() (bool, error) { + return wait.PollUntilContextTimeout(context.TODO(), kutil.RetryInterval, time.Second*time.Duration(opt.waitTimeout), true, func(ctx context.Context) (bool, error) { ss, err := opt.kubeClient.AppsV1().StatefulSets(opt.namespace).Get(context.TODO(), opt.workloadName, metav1.GetOptions{}) if err != nil { return false, err diff --git a/vendor/kmodules.xyz/client-go/core/v1/pod_status.go b/vendor/kmodules.xyz/client-go/core/v1/pod_status.go index eab9e766..cf87120a 100644 --- a/vendor/kmodules.xyz/client-go/core/v1/pod_status.go +++ b/vendor/kmodules.xyz/client-go/core/v1/pod_status.go @@ -17,6 +17,8 @@ limitations under the License. package v1 import ( + "fmt" + core "k8s.io/api/core/v1" ) @@ -108,3 +110,127 @@ func UpsertPodReadinessGateConditionType(readinessGates []core.PodReadinessGate, ConditionType: conditionType, }) } + +const ( + // NodeUnreachablePodReason is the reason on a pod when its state cannot be confirmed as kubelet is unresponsive + // on the node it is (was) running. + NodeUnreachablePodReason = "NodeLost" +) + +// GetPodStatus returns pod status like kubectl +// Adapted from: https://github.com/kubernetes/kubernetes/blob/735804dc812ce647f8c130dced45b5ba4079b76e/pkg/printers/internalversion/printers.go#L825 +func GetPodStatus(pod *core.Pod) string { + reason := string(pod.Status.Phase) + if pod.Status.Reason != "" { + reason = pod.Status.Reason + } + + // If the Pod carries {type:PodScheduled, reason:WaitingForGates}, set reason to 'SchedulingGated'. + for _, condition := range pod.Status.Conditions { + if condition.Type == core.PodScheduled && condition.Reason == core.PodReasonSchedulingGated { + reason = core.PodReasonSchedulingGated + } + } + + initContainers := make(map[string]*core.Container) + for i := range pod.Spec.InitContainers { + initContainers[pod.Spec.InitContainers[i].Name] = &pod.Spec.InitContainers[i] + } + + initializing := false + for i := range pod.Status.InitContainerStatuses { + container := pod.Status.InitContainerStatuses[i] + switch { + case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0: + continue + case isRestartableInitContainer(initContainers[container.Name]) && + container.Started != nil && *container.Started: + continue + case container.State.Terminated != nil: + // initialization is failed + if len(container.State.Terminated.Reason) == 0 { + if container.State.Terminated.Signal != 0 { + reason = fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal) + } else { + reason = fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode) + } + } else { + reason = "Init:" + container.State.Terminated.Reason + } + initializing = true + case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing": + reason = "Init:" + container.State.Waiting.Reason + initializing = true + default: + reason = fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers)) + initializing = true + } + break + } + + if !initializing || isPodInitializedConditionTrue(&pod.Status) { + hasRunning := false + for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- { + container := pod.Status.ContainerStatuses[i] + + if container.State.Waiting != nil && container.State.Waiting.Reason != "" { + reason = container.State.Waiting.Reason + } else if container.State.Terminated != nil && container.State.Terminated.Reason != "" { + reason = container.State.Terminated.Reason + } else if container.State.Terminated != nil && container.State.Terminated.Reason == "" { + if container.State.Terminated.Signal != 0 { + reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal) + } else { + reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode) + } + } else if container.Ready && container.State.Running != nil { + hasRunning = true + } + } + + // change pod status back to "Running" if there is at least one container still reporting as "Running" status + if reason == "Completed" && hasRunning { + if hasPodReadyCondition(pod.Status.Conditions) { + reason = "Running" + } else { + reason = "NotReady" + } + } + } + + if pod.DeletionTimestamp != nil && pod.Status.Reason == NodeUnreachablePodReason { + reason = "Unknown" + } else if pod.DeletionTimestamp != nil { + reason = "Terminating" + } + + return reason +} + +func hasPodReadyCondition(conditions []core.PodCondition) bool { + for _, condition := range conditions { + if condition.Type == core.PodReady && condition.Status == core.ConditionTrue { + return true + } + } + return false +} + +func isRestartableInitContainer(initContainer *core.Container) bool { + if initContainer.RestartPolicy == nil { + return false + } + + return *initContainer.RestartPolicy == core.ContainerRestartPolicyAlways +} + +func isPodInitializedConditionTrue(status *core.PodStatus) bool { + for _, condition := range status.Conditions { + if condition.Type != core.PodInitialized { + continue + } + + return condition.Status == core.ConditionTrue + } + return false +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 45900b9d..21023245 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -657,7 +657,7 @@ k8s.io/utils/trace # kmodules.xyz/apiversion v0.2.0 ## explicit; go 1.14 kmodules.xyz/apiversion -# kmodules.xyz/client-go v0.29.3 +# kmodules.xyz/client-go v0.29.4 ## explicit; go 1.21.5 kmodules.xyz/client-go kmodules.xyz/client-go/api/v1