From 41c12dab1734b10df12ef5b668c54ac842196af8 Mon Sep 17 00:00:00 2001 From: Kartik Joshi Date: Mon, 3 Apr 2023 10:54:39 +0530 Subject: [PATCH] install: Update node label prefix Update node label from node-role.kubernetes.io to node.kubernetes.io Fixes: #780 Signed-off-by: Kartik Joshi --- azure/README.md | 2 +- ibmcloud/cluster/label-nodes.sh | 2 +- install/README.md | 2 +- install/yamls/caa-pod.yaml | 2 +- libvirt/install_operator.sh | 4 ++-- peerpodconfig-ctrl/controllers/peerpodconfig_controller.go | 2 +- test/provisioner/provision_azure.go | 4 ++-- test/provisioner/provision_ibmcloud.go | 2 +- volumes/csi-wrapper/README.md | 2 +- webhook/hack/extended-resources/ext-res-ds.yaml | 2 +- 10 files changed, 12 insertions(+), 12 deletions(-) diff --git a/azure/README.md b/azure/README.md index a088e9d18..1e30240a9 100644 --- a/azure/README.md +++ b/azure/README.md @@ -320,7 +320,7 @@ az aks get-credentials \ Label the nodes so that CAA can be deployed on it: ```bash -kubectl label nodes --all node-role.kubernetes.io/worker= +kubectl label nodes --all node.kubernetes.io/worker= ``` ## Deploy Cloud API Adaptor diff --git a/ibmcloud/cluster/label-nodes.sh b/ibmcloud/cluster/label-nodes.sh index 8fbdd90cd..cf1b152c0 100755 --- a/ibmcloud/cluster/label-nodes.sh +++ b/ibmcloud/cluster/label-nodes.sh @@ -11,7 +11,7 @@ nodes=$(kubectl --kubeconfig config get nodes -o name) worker= for node in $nodes; do if [ -n "$worker" ]; then - kubectl --kubeconfig config label "$node" node-role.kubernetes.io/worker= + kubectl --kubeconfig config label "$node" node.kubernetes.io/worker= fi worker=true kubectl --kubeconfig config label "$node" "topology.kubernetes.io/region=$region" diff --git a/install/README.md b/install/README.md index eb1d2b497..4984130d4 100644 --- a/install/README.md +++ b/install/README.md @@ -22,7 +22,7 @@ ``` export NODENAME= - kubectl label node $NODENAME node-role.kubernetes.io/worker= + kubectl label node $NODENAME node.kubernetes.io/worker= ``` ## Deploy webhook diff --git a/install/yamls/caa-pod.yaml b/install/yamls/caa-pod.yaml index 50f04a883..4fa6983dd 100644 --- a/install/yamls/caa-pod.yaml +++ b/install/yamls/caa-pod.yaml @@ -50,7 +50,7 @@ spec: name: netns hostNetwork: true nodeSelector: - node-role.kubernetes.io/worker: "" + node.kubernetes.io/worker: "" volumes: - name: auth-json secret: diff --git a/libvirt/install_operator.sh b/libvirt/install_operator.sh index 3748c8bf3..bff426b73 100755 --- a/libvirt/install_operator.sh +++ b/libvirt/install_operator.sh @@ -17,11 +17,11 @@ LIBVIRT_NET="${LIBVIRT_NET:-default}" LIBVIRT_POOL="${LIBVIRT_POOL:-default}" SSH_KEY_FILE="${SSH_KEY_FILE:-}" -# Apply the 'node-role.kubernetes.io/worker' label on all worker nodes. +# Apply the 'node.kubernetes.io/worker' label on all worker nodes. # label_workers() { local workers - local label='node-role.kubernetes.io/worker' + local label='node.kubernetes.io/worker' workers="$(kubectl get nodes --no-headers | grep '\' | awk '{ print $1 }')" for nodename in $workers; do diff --git a/peerpodconfig-ctrl/controllers/peerpodconfig_controller.go b/peerpodconfig-ctrl/controllers/peerpodconfig_controller.go index f74265759..a93f2eca2 100644 --- a/peerpodconfig-ctrl/controllers/peerpodconfig_controller.go +++ b/peerpodconfig-ctrl/controllers/peerpodconfig_controller.go @@ -47,7 +47,7 @@ const ( // Name of env var containing the cloud-api-adaptor image name CloudApiAdaptorImageEnvName = "RELATED_IMAGE_CAA" DefaultCloudApiAdaptorImage = "quay.io/confidential-containers/cloud-api-adaptor" - defaultNodeSelectorLabel = "node-role.kubernetes.io/worker" + defaultNodeSelectorLabel = "node.kubernetes.io/worker" ) // PeerPodConfigReconciler reconciles a PeerPodConfig object diff --git a/test/provisioner/provision_azure.go b/test/provisioner/provision_azure.go index 060ab4a40..b10842eaa 100644 --- a/test/provisioner/provision_azure.go +++ b/test/provisioner/provision_azure.go @@ -305,8 +305,8 @@ func (p *AzureCloudProvisioner) CreateCluster(ctx context.Context, cfg *envconf. cfg.WithKubeconfigFile(kubeconfigPath) - // Use cli to label nodes until label is changed from "node-role.kubernetes.io/worker" to "node.kubernetes.io/worker" - cmd := exec.Command("kubectl", "label", "nodes", "--all", fmt.Sprintf("%s=%s", "node-role.kubernetes.io/worker", "")) + // Update this to use label while provisioning cluster + cmd := exec.Command("kubectl", "label", "nodes", "--all", fmt.Sprintf("%s=%s", "node.kubernetes.io/worker", "")) cmd.Env = append(cmd.Env, fmt.Sprintf("KUBECONFIG="+kubeconfigPath)) _, err = cmd.CombinedOutput() diff --git a/test/provisioner/provision_ibmcloud.go b/test/provisioner/provision_ibmcloud.go index a7f98dea2..2dc88d588 100644 --- a/test/provisioner/provision_ibmcloud.go +++ b/test/provisioner/provision_ibmcloud.go @@ -646,7 +646,7 @@ func (p *IBMCloudProvisioner) CreateCluster(ctx context.Context, cfg *envconf.Co }, }, Labels: map[string]string{ - "node-role.kubernetes.io/worker": "", + "node.kubernetes.io/worker": "", }, }, }, diff --git a/volumes/csi-wrapper/README.md b/volumes/csi-wrapper/README.md index b029d78a3..dfa391a90 100644 --- a/volumes/csi-wrapper/README.md +++ b/volumes/csi-wrapper/README.md @@ -65,7 +65,7 @@ node/liudali-csi-amd64-node-1 patched > reboot > ``` > After the worker node status changed to ready, please set the work role again: -> `kubectl label node liudali-csi-amd64-node-1 node-role.kubernetes.io/worker=` +> `kubectl label node liudali-csi-amd64-node-1 node.kubernetes.io/worker=` 4. Add labels to worker node: ```bash diff --git a/webhook/hack/extended-resources/ext-res-ds.yaml b/webhook/hack/extended-resources/ext-res-ds.yaml index bcca7d3e0..0f128ab1b 100644 --- a/webhook/hack/extended-resources/ext-res-ds.yaml +++ b/webhook/hack/extended-resources/ext-res-ds.yaml @@ -15,7 +15,7 @@ spec: app: ext-res-updater spec: nodeSelector: - node-role.kubernetes.io/worker: "" + node.kubernetes.io/worker: "" serviceAccountName: ext-res-updater containers: - image: curlimages/curl:8.1.1