Skip to content

Commit

Permalink
bump up version (#105)
Browse files Browse the repository at this point in the history
modify golang ci

Signed-off-by: nasusoba <[email protected]>
  • Loading branch information
nasusoba committed May 9, 2024
1 parent 7d52020 commit 0527d86
Show file tree
Hide file tree
Showing 13 changed files with 272 additions and 951 deletions.
6 changes: 3 additions & 3 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -147,14 +147,14 @@ linters-settings:
- name: unused-parameter
disabled: true
staticcheck:
go: "1.20"
go: "1.21"
stylecheck:
go: "1.20"
go: "1.21"
tagliatelle:
case:
rules:
# Any struct tag type can be used.
# Support string case: `camel`, `pascal`, `kebab`, `snake`, `goCamel`, `goPascal`, `goKebab`, `goSnake`, `upper`, `lower`, `header`
json: goCamel
unused:
go: "1.20"
go: "1.21"
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.

# Build the manager binary
FROM --platform=${BUILDPLATFORM} docker.io/library/golang:1.20.7 as build
FROM --platform=${BUILDPLATFORM} docker.io/library/golang:1.21.9 as build
ARG TARGETOS TARGETARCH
ARG package

Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ SHELL:=/usr/bin/env bash

.DEFAULT_GOAL:=help

GO_VERSION ?= 1.20.7
GO_VERSION ?= 1.21.9
GO_CONTAINER_IMAGE ?= docker.io/library/golang:$(GO_VERSION)

ARCH ?= $(shell go env GOARCH)
Expand Down
22 changes: 11 additions & 11 deletions controlplane/controllers/kthreescontrolplane_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ import (
"sigs.k8s.io/cluster-api/controllers/external"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/collections"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/cluster-api/util/predicates"
Expand All @@ -48,7 +49,6 @@ import (
controlplanev1 "github.com/k3s-io/cluster-api-k3s/controlplane/api/v1beta2"
k3s "github.com/k3s-io/cluster-api-k3s/pkg/k3s"
"github.com/k3s-io/cluster-api-k3s/pkg/kubeconfig"
"github.com/k3s-io/cluster-api-k3s/pkg/machinefilters"
"github.com/k3s-io/cluster-api-k3s/pkg/secret"
"github.com/k3s-io/cluster-api-k3s/pkg/token"
)
Expand Down Expand Up @@ -185,7 +185,7 @@ func (r *KThreesControlPlaneReconciler) reconcileDelete(ctx context.Context, clu
if err != nil {
return reconcile.Result{}, err
}
ownedMachines := allMachines.Filter(machinefilters.OwnedMachines(kcp))
ownedMachines := allMachines.Filter(collections.OwnedMachines(kcp))

// If no control plane machines remain, remove the finalizer
if len(ownedMachines) == 0 {
Expand Down Expand Up @@ -219,7 +219,7 @@ func (r *KThreesControlPlaneReconciler) reconcileDelete(ctx context.Context, clu
}

// Delete control plane machines in parallel
machinesToDelete := ownedMachines.Filter(machinefilters.Not(machinefilters.HasDeletionTimestamp))
machinesToDelete := ownedMachines.Filter(collections.Not(collections.HasDeletionTimestamp))
var errs []error
for i := range machinesToDelete {
m := machinesToDelete[i]
Expand Down Expand Up @@ -334,12 +334,12 @@ func (r *KThreesControlPlaneReconciler) ClusterToKThreesControlPlane(ctx context
// updateStatus is called after every reconcilitation loop in a defer statement to always make sure we have the
// resource status subresourcs up-to-date.
func (r *KThreesControlPlaneReconciler) updateStatus(ctx context.Context, kcp *controlplanev1.KThreesControlPlane, cluster *clusterv1.Cluster) error {
selector := machinefilters.ControlPlaneSelectorForCluster(cluster.Name)
selector := collections.ControlPlaneSelectorForCluster(cluster.Name)
// Copy label selector to its status counterpart in string format.
// This is necessary for CRDs including scale subresources.
kcp.Status.Selector = selector.String()

ownedMachines, err := r.managementCluster.GetMachinesForCluster(ctx, util.ObjectKey(cluster), machinefilters.OwnedMachines(kcp))
ownedMachines, err := r.managementCluster.GetMachinesForCluster(ctx, util.ObjectKey(cluster), collections.OwnedMachines(kcp))
if err != nil {
return fmt.Errorf("failed to get list of owned machines: %w", err)
}
Expand Down Expand Up @@ -377,7 +377,7 @@ func (r *KThreesControlPlaneReconciler) updateStatus(ctx context.Context, kcp *c
// make sure last resize operation is marked as completed.
// NOTE: we are checking the number of machines ready so we report resize completed only when the machines
// are actually provisioned (vs reporting completed immediately after the last machine object is created).
readyMachines := ownedMachines.Filter(machinefilters.IsReady())
readyMachines := ownedMachines.Filter(collections.IsReady())
if int32(len(readyMachines)) == replicas {
conditions.MarkTrue(kcp, controlplanev1.ResizedCondition)
}
Expand Down Expand Up @@ -443,20 +443,20 @@ func (r *KThreesControlPlaneReconciler) reconcile(ctx context.Context, cluster *
return result, err
}

controlPlaneMachines, err := r.managementClusterUncached.GetMachinesForCluster(ctx, util.ObjectKey(cluster), machinefilters.ControlPlaneMachines(cluster.Name))
controlPlaneMachines, err := r.managementClusterUncached.GetMachinesForCluster(ctx, util.ObjectKey(cluster), collections.ControlPlaneMachines(cluster.Name))
if err != nil {
logger.Error(err, "failed to retrieve control plane machines for cluster")
return reconcile.Result{}, err
}

adoptableMachines := controlPlaneMachines.Filter(machinefilters.AdoptableControlPlaneMachines(cluster.Name))
adoptableMachines := controlPlaneMachines.Filter(collections.AdoptableControlPlaneMachines(cluster.Name))
if len(adoptableMachines) > 0 {
// We adopt the Machines and then wait for the update event for the ownership reference to re-queue them so the cache is up-to-date
// err = r.adoptMachines(ctx, kcp, adoptableMachines, cluster)
return reconcile.Result{}, err
}

ownedMachines := controlPlaneMachines.Filter(machinefilters.OwnedMachines(kcp))
ownedMachines := controlPlaneMachines.Filter(collections.OwnedMachines(kcp))
if len(ownedMachines) != len(controlPlaneMachines) {
logger.Info("Not all control plane machines are owned by this KThreesControlPlane, refusing to operate in mixed management mode")
return reconcile.Result{}, nil
Expand Down Expand Up @@ -526,7 +526,7 @@ func (r *KThreesControlPlaneReconciler) reconcile(ctx context.Context, cluster *
case numMachines > desiredReplicas:
logger.Info("Scaling down control plane", "Desired", desiredReplicas, "Existing", numMachines)
// The last parameter (i.e. machines needing to be rolled out) should always be empty here.
return r.scaleDownControlPlane(ctx, cluster, kcp, controlPlane, k3s.FilterableMachineCollection{})
return r.scaleDownControlPlane(ctx, cluster, kcp, controlPlane, collections.Machines{})
}

// Get the workload cluster client.
Expand Down Expand Up @@ -711,7 +711,7 @@ func (r *KThreesControlPlaneReconciler) upgradeControlPlane(
cluster *clusterv1.Cluster,
kcp *controlplanev1.KThreesControlPlane,
controlPlane *k3s.ControlPlane,
machinesRequireUpgrade k3s.FilterableMachineCollection,
machinesRequireUpgrade collections.Machines,
) (ctrl.Result, error) {
// TODO: handle reconciliation of etcd members and kubeadm config in case they get out of sync with cluster

Expand Down
24 changes: 12 additions & 12 deletions controlplane/controllers/scale.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,24 +32,24 @@ import (
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/controllers/external"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/collections"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
ctrl "sigs.k8s.io/controller-runtime"

bootstrapv1 "github.com/k3s-io/cluster-api-k3s/bootstrap/api/v1beta2"
controlplanev1 "github.com/k3s-io/cluster-api-k3s/controlplane/api/v1beta2"
k3s "github.com/k3s-io/cluster-api-k3s/pkg/k3s"
"github.com/k3s-io/cluster-api-k3s/pkg/machinefilters"
)

var ErrPreConditionFailed = errors.New("precondition check failed")

func (r *KThreesControlPlaneReconciler) initializeControlPlane(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KThreesControlPlane, controlPlane *k3s.ControlPlane) (ctrl.Result, error) {
logger := controlPlane.Logger()
logger := ctrl.LoggerFrom(ctx)

// Perform an uncached read of all the owned machines. This check is in place to make sure
// that the controller cache is not misbehaving and we end up initializing the cluster more than once.
ownedMachines, err := r.managementClusterUncached.GetMachinesForCluster(ctx, util.ObjectKey(cluster), machinefilters.OwnedMachines(kcp))
ownedMachines, err := r.managementClusterUncached.GetMachinesForCluster(ctx, util.ObjectKey(cluster), collections.OwnedMachines(kcp))
if err != nil {
logger.Error(err, "failed to perform an uncached read of control plane machines for cluster")
return ctrl.Result{}, err
Expand All @@ -62,7 +62,7 @@ func (r *KThreesControlPlaneReconciler) initializeControlPlane(ctx context.Conte
}

bootstrapSpec := controlPlane.InitialControlPlaneConfig()
fd := controlPlane.NextFailureDomainForScaleUp()
fd := controlPlane.NextFailureDomainForScaleUp(ctx)
if err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, bootstrapSpec, fd); err != nil {
logger.Error(err, "Failed to create initial control plane Machine")
r.recorder.Eventf(kcp, corev1.EventTypeWarning, "FailedInitialization", "Failed to create initial control plane Machine for cluster %s/%s control plane: %v", cluster.Namespace, cluster.Name, err)
Expand All @@ -74,7 +74,7 @@ func (r *KThreesControlPlaneReconciler) initializeControlPlane(ctx context.Conte
}

func (r *KThreesControlPlaneReconciler) scaleUpControlPlane(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KThreesControlPlane, controlPlane *k3s.ControlPlane) (ctrl.Result, error) {
logger := controlPlane.Logger()
logger := ctrl.LoggerFrom(ctx)

// Run preflight checks to ensure that the control plane is stable before proceeding with a scale up/scale down operation; if not, wait.
if result, err := r.preflightChecks(ctx, controlPlane); err != nil || !result.IsZero() {
Expand All @@ -83,7 +83,7 @@ func (r *KThreesControlPlaneReconciler) scaleUpControlPlane(ctx context.Context,

// Create the bootstrap configuration
bootstrapSpec := controlPlane.JoinControlPlaneConfig()
fd := controlPlane.NextFailureDomainForScaleUp()
fd := controlPlane.NextFailureDomainForScaleUp(ctx)
if err := r.cloneConfigsAndGenerateMachine(ctx, cluster, kcp, bootstrapSpec, fd); err != nil {
logger.Error(err, "Failed to create additional control plane Machine")
r.recorder.Eventf(kcp, corev1.EventTypeWarning, "FailedScaleUp", "Failed to create additional control plane Machine for cluster %s/%s control plane: %v", cluster.Namespace, cluster.Name, err)
Expand All @@ -99,12 +99,12 @@ func (r *KThreesControlPlaneReconciler) scaleDownControlPlane(
cluster *clusterv1.Cluster,
kcp *controlplanev1.KThreesControlPlane,
controlPlane *k3s.ControlPlane,
outdatedMachines k3s.FilterableMachineCollection,
outdatedMachines collections.Machines,
) (ctrl.Result, error) {
logger := controlPlane.Logger()
logger := ctrl.LoggerFrom(ctx)

// Pick the Machine that we should scale down.
machineToDelete, err := selectMachineForScaleDown(controlPlane, outdatedMachines)
machineToDelete, err := selectMachineForScaleDown(ctx, controlPlane, outdatedMachines)
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to select machine for scale down: %w", err)
}
Expand Down Expand Up @@ -179,7 +179,7 @@ func (r *KThreesControlPlaneReconciler) preflightChecks(_ context.Context, contr

// If there are deleting machines, wait for the operation to complete.
if controlPlane.HasDeletingMachine() {
logger.Info("Waiting for machines to be deleted", "Machines", strings.Join(controlPlane.Machines.Filter(machinefilters.HasDeletionTimestamp).Names(), ", "))
logger.Info("Waiting for machines to be deleted", "Machines", strings.Join(controlPlane.Machines.Filter(collections.HasDeletionTimestamp).Names(), ", "))
return ctrl.Result{RequeueAfter: deleteRequeueAfter}, nil
}

Expand Down Expand Up @@ -237,7 +237,7 @@ func preflightCheckCondition(kind string, obj conditions.Getter, condition clust
return nil
}

func selectMachineForScaleDown(controlPlane *k3s.ControlPlane, outdatedMachines k3s.FilterableMachineCollection) (*clusterv1.Machine, error) {
func selectMachineForScaleDown(ctx context.Context, controlPlane *k3s.ControlPlane, outdatedMachines collections.Machines) (*clusterv1.Machine, error) {
machines := controlPlane.Machines
switch {
case controlPlane.MachineWithDeleteAnnotation(outdatedMachines).Len() > 0:
Expand All @@ -247,7 +247,7 @@ func selectMachineForScaleDown(controlPlane *k3s.ControlPlane, outdatedMachines
case outdatedMachines.Len() > 0:
machines = outdatedMachines
}
return controlPlane.MachineInFailureDomainWithMostMachines(machines)
return controlPlane.MachineInFailureDomainWithMostMachines(ctx, machines)
}

func (r *KThreesControlPlaneReconciler) cloneConfigsAndGenerateMachine(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KThreesControlPlane, bootstrapSpec *bootstrapv1.KThreesConfigSpec, failureDomain *string) error {
Expand Down
Loading

0 comments on commit 0527d86

Please sign in to comment.