Skip to content

Commit

Permalink
Read userdata from bootstrap secret.
Browse files Browse the repository at this point in the history
  • Loading branch information
Gerrit91 committed Nov 14, 2024
1 parent 2260356 commit ebe0325
Show file tree
Hide file tree
Showing 4 changed files with 37 additions and 1 deletion.
1 change: 1 addition & 0 deletions api/v1alpha1/metalstackmachine_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ const (
TagInfraMachineID = "metal-stack.infrastructure.cluster.x-k8s.io/machine-id"

ProviderMachineCreated clusterv1.ConditionType = "MachineCreated"
ProviderMachineReady clusterv1.ConditionType = "MachineReady"
ProviderMachineHealthy clusterv1.ConditionType = "MachineHealthy"
)

Expand Down
7 changes: 7 additions & 0 deletions config/samples/example.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,13 @@ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
metadata:
name: metal-test-controlplane
spec:
kubeadmConfigSpec:
format: ignition
initConfiguration:
nodeRegistration: {}
joinConfiguration:
controlPlane: {}
nodeRegistration: {}
machineTemplate:
nodeDrainTimeout: 10m
infrastructureRef:
Expand Down
2 changes: 2 additions & 0 deletions internal/controller/metalstackcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,8 @@ func (r *MetalStackClusterReconciler) Reconcile(ctx context.Context, req ctrl.Re
statusErr := reconciler.status()
if statusErr != nil {
err = errors.Join(err, fmt.Errorf("unable to update status: %w", statusErr))
} else if !reconciler.infraCluster.Status.Ready {
err = errors.New("cluster is not yet ready, requeueing")
}
}()

Expand Down
28 changes: 27 additions & 1 deletion internal/controller/metalstackmachine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,8 @@ func (r *MetalStackMachineReconciler) Reconcile(ctx context.Context, req ctrl.Re
statusErr := reconciler.status()
if statusErr != nil {
err = errors.Join(err, fmt.Errorf("unable to update status: %w", statusErr))
} else if !reconciler.infraMachine.Status.Ready {
err = errors.New("machine is not yet ready, requeueing")
}
}()

Expand Down Expand Up @@ -188,6 +190,10 @@ func (r *MetalStackMachineReconciler) Reconcile(ctx context.Context, req ctrl.Re
return ctrl.Result{}, errors.New("waiting until control plane ip was set to cluster spec")
}

if machine.Spec.Bootstrap.DataSecretName == nil {
return ctrl.Result{}, errors.New("waiting until bootstrap data secret was created")
}

err = reconciler.reconcile()

return ctrl.Result{}, err // remember to return err here and not nil because the defer func can influence this
Expand Down Expand Up @@ -256,6 +262,17 @@ func (r *machineReconciler) delete() error {
}

func (r *machineReconciler) create() (*models.V1MachineResponse, error) {
bootstrapSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: *r.clusterMachine.Spec.Bootstrap.DataSecretName,
Namespace: r.infraMachine.Namespace,
},
}
err := r.client.Get(r.ctx, client.ObjectKeyFromObject(bootstrapSecret), bootstrapSecret)
if err != nil {
return nil, fmt.Errorf("unable to fetch bootstrap secret: %w", err)
}

var (
ips []string
nws = []*models.V1MachineAllocationNetwork{
Expand Down Expand Up @@ -292,7 +309,8 @@ func (r *machineReconciler) create() (*models.V1MachineResponse, error) {
Description: fmt.Sprintf("%s/%s for cluster %s/%s", r.infraMachine.Namespace, r.infraMachine.Name, r.infraCluster.Namespace, r.infraCluster.Name),
Networks: nws,
Ips: ips,
// TODO: UserData, SSHPubKeys, ...
UserData: string(bootstrapSecret.Data["value"]),
// TODO: SSHPubKeys, ...
}), nil)
if err != nil {
return nil, fmt.Errorf("failed to allocate machine: %w", err)
Expand Down Expand Up @@ -330,9 +348,11 @@ func (r *machineReconciler) status() error {
case err != nil && !errors.Is(err, errProviderMachineNotFound):
conditions.MarkFalse(r.infraMachine, v1alpha1.ProviderMachineCreated, "InternalError", clusterv1.ConditionSeverityError, "%s", err.Error())
conditions.MarkFalse(r.infraMachine, v1alpha1.ProviderMachineHealthy, "NotHealthy", clusterv1.ConditionSeverityWarning, "machine not created")
conditions.MarkFalse(r.infraMachine, v1alpha1.ProviderMachineReady, "NotReady", clusterv1.ConditionSeverityWarning, "machine not created")
case err != nil && errors.Is(err, errProviderMachineNotFound):
conditions.MarkFalse(r.infraMachine, v1alpha1.ProviderMachineCreated, "NotCreated", clusterv1.ConditionSeverityError, "%s", err.Error())
conditions.MarkFalse(r.infraMachine, v1alpha1.ProviderMachineHealthy, "NotHealthy", clusterv1.ConditionSeverityWarning, "machine not created")
conditions.MarkFalse(r.infraMachine, v1alpha1.ProviderMachineReady, "NotReady", clusterv1.ConditionSeverityWarning, "machine not created")
default:
if r.infraMachine.Spec.ProviderID == *m.ID {
conditions.MarkTrue(r.infraMachine, v1alpha1.ProviderMachineCreated)
Expand All @@ -357,6 +377,12 @@ func (r *machineReconciler) status() error {
}
}

if m.Events != nil && len(m.Events.Log) > 0 && ptr.Deref(m.Events.Log[0].Event, "") == "Phoned Home" {
conditions.MarkTrue(r.infraMachine, v1alpha1.ProviderMachineReady)
} else {
conditions.MarkFalse(r.infraMachine, v1alpha1.ProviderMachineReady, "NotReady", clusterv1.ConditionSeverityWarning, "machine is not in phoned home state")
}

if len(errs) == 0 {
conditions.MarkTrue(r.infraMachine, v1alpha1.ProviderMachineHealthy)
} else {
Expand Down

0 comments on commit ebe0325

Please sign in to comment.