diff --git a/examples/demo_cluster/variables.tf b/examples/demo_cluster/variables.tf index 527256d..cf46fa3 100644 --- a/examples/demo_cluster/variables.tf +++ b/examples/demo_cluster/variables.tf @@ -22,21 +22,23 @@ variable "deploy_demo" { } variable "clusters" { - description = "K3s cluster definition" + description = "Cluster definition" type = list(object({ - name = optional(string, "K3s demo cluster") + name = optional(string, "Demo cluster") metro = optional(string, "FR") plan_control_plane = optional(string, "c3.small.x86") plan_node = optional(string, "c3.small.x86") node_count = optional(number, 0) - k3s_ha = optional(bool, false) + ha = optional(bool, false) os = optional(string, "debian_11") - control_plane_hostnames = optional(string, "k3s-cp") - node_hostnames = optional(string, "k3s-node") - custom_k3s_token = optional(string, "") + control_plane_hostnames = optional(string, "cp") + node_hostnames = optional(string, "node") + custom_token = optional(string, "") ip_pool_count = optional(number, 0) - k3s_version = optional(string, "") + kube_version = optional(string, "") metallb_version = optional(string, "") + rancher_version = optional(string, "") + rancher_flavor = optional(string, "") })) default = [{}] } diff --git a/main.tf b/main.tf index d64c0e7..61cdfa9 100644 --- a/main.tf +++ b/main.tf @@ -5,11 +5,11 @@ locals { } ################################################################################ -# K3S Cluster In-line Module +# K8s Cluster In-line Module ################################################################################ -module "k3s_cluster" { - source = "./modules/k3s_cluster" +module "kube_cluster" { + source = "./modules/kube_cluster" for_each = { for cluster in var.clusters : cluster.name => cluster } @@ -18,14 +18,16 @@ module "k3s_cluster" { plan_control_plane = each.value.plan_control_plane plan_node = each.value.plan_node node_count = each.value.node_count - k3s_ha = each.value.k3s_ha + ha = each.value.ha os = each.value.os control_plane_hostnames = each.value.control_plane_hostnames node_hostnames = each.value.node_hostnames - custom_k3s_token = each.value.custom_k3s_token - k3s_version = each.value.k3s_version + custom_token = each.value.custom_token + kube_version = each.value.kube_version metallb_version = each.value.metallb_version ip_pool_count = each.value.ip_pool_count + rancher_flavor = each.value.rancher_flavor + rancher_version = each.value.rancher_version metal_project_id = var.metal_project_id deploy_demo = var.deploy_demo global_ip_cidr = local.global_ip_cidr diff --git a/modules/k3s_cluster/templates/user-data.tftpl b/modules/k3s_cluster/templates/user-data.tftpl deleted file mode 100644 index 0fb1ff4..0000000 --- a/modules/k3s_cluster/templates/user-data.tftpl +++ /dev/null @@ -1,388 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -wait_for_k3s_api(){ - # Wait for the node to be available, meaning the K8s API is available - while ! kubectl wait --for condition=ready node $(cat /etc/hostname | tr '[:upper:]' '[:lower:]') --timeout=60s; do sleep 2 ; done -} - -install_bird(){ - # Install bird - apt update && apt install bird jq -y - - # In order to configure bird, the metadata information is required. - # BGP info can take a few seconds to be populated, retry if that's the case - INTERNAL_IP="null" - while [ $${INTERNAL_IP} == "null" ]; do - echo "BGP data still not available..." - sleep 5 - METADATA=$(curl -s https://metadata.platformequinix.com/metadata) - INTERNAL_IP=$(echo $${METADATA} | jq -r '.bgp_neighbors[0].customer_ip') - done - PEER_IP_1=$(echo $${METADATA} | jq -r '.bgp_neighbors[0].peer_ips[0]') - PEER_IP_2=$(echo $${METADATA} | jq -r '.bgp_neighbors[0].peer_ips[1]') - ASN=$(echo $${METADATA} | jq -r '.bgp_neighbors[0].customer_as') - ASN_AS=$(echo $${METADATA} | jq -r '.bgp_neighbors[0].peer_as') - MULTIHOP=$(echo $${METADATA} | jq -r '.bgp_neighbors[0].multihop') - GATEWAY=$(echo $${METADATA} | jq -r '.network.addresses[] | select(.public == true and .address_family == 4) | .gateway') - - # Generate the bird configuration based on the metadata values - # https://deploy.equinix.com/developers/guides/configuring-bgp-with-bird/ - cat <<-EOF >/etc/bird/bird.conf - router id $${INTERNAL_IP}; - - protocol direct { - interface "lo"; - } - - protocol kernel { - persist; - scan time 60; - import all; - export all; - } - - protocol device { - scan time 60; - } - - protocol static { - route $${PEER_IP_1}/32 via $${GATEWAY}; - route $${PEER_IP_2}/32 via $${GATEWAY}; - } - - filter metal_bgp { - accept; - } - - protocol bgp neighbor_v4_1 { - export filter metal_bgp; - local as $${ASN}; - multihop; - neighbor $${PEER_IP_1} as $${ASN_AS}; - } - - protocol bgp neighbor_v4_2 { - export filter metal_bgp; - local as $${ASN}; - multihop; - neighbor $${PEER_IP_2} as $${ASN_AS}; - } - EOF - - # Wait for K3s to be up, otherwise the second and third control plane nodes will try to join localhost - wait_for_k3s_api - - # Configure the BGP interface - # https://deploy.equinix.com/developers/guides/configuring-bgp-with-bird/ - if ! grep -q 'lo:0' /etc/network/interfaces; then - cat <<-EOF >>/etc/network/interfaces - - auto lo:0 - iface lo:0 inet static - address ${API_IP} - netmask 255.255.255.255 - EOF - ifup lo:0 - fi - - # Enable IP forward for bird - # TODO: Check if this is done automatically with K3s, it doesn't hurt however - echo "net.ipv4.ip_forward=1" | tee /etc/sysctl.d/99-ip-forward.conf - sysctl --load /etc/sysctl.d/99-ip-forward.conf - - # Debian usually starts the service after being installed, but just in case - systemctl enable bird - systemctl restart bird -} - -install_metallb(){ - apt update && apt install -y curl jq - -%{ if metallb_version != "" ~} - export METALLB_VERSION=${metallb_version} -%{ else ~} - export METALLB_VERSION=$(curl --silent "https://api.github.com/repos/metallb/metallb/releases/latest" | jq -r .tag_name) -%{ endif ~} - - # Wait for K3s to be up. It should be up already but just in case. - wait_for_k3s_api - - # Apply the MetalLB manifest - kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/$${METALLB_VERSION}/config/manifests/metallb-native.yaml - - # Wait for MetalLB to be up - while ! kubectl wait --for condition=ready -n metallb-system $(kubectl get pods -n metallb-system -l component=controller -o name) --timeout=10s; do sleep 2 ; done - - # In order to configure MetalLB, the metadata information is required. - # BGP info can take a few seconds to be populated, retry if that's the case - INTERNAL_IP="null" - while [ $${INTERNAL_IP} == "null" ]; do - echo "BGP data still not available..." - sleep 5 - METADATA=$(curl -s https://metadata.platformequinix.com/metadata) - INTERNAL_IP=$(echo $${METADATA} | jq -r '.bgp_neighbors[0].customer_ip') - done - PEER_IP_1=$(echo $${METADATA} | jq -r '.bgp_neighbors[0].peer_ips[0]') - PEER_IP_2=$(echo $${METADATA} | jq -r '.bgp_neighbors[0].peer_ips[1]') - ASN=$(echo $${METADATA} | jq -r '.bgp_neighbors[0].customer_as') - ASN_AS=$(echo $${METADATA} | jq -r '.bgp_neighbors[0].peer_as') - -%{ if global_ip_cidr != "" ~} - # Configure the IPAddressPool for the Global IP if present - cat <<- EOF | kubectl apply -f - - apiVersion: metallb.io/v1beta1 - kind: IPAddressPool - metadata: - name: anycast-ip - namespace: metallb-system - spec: - addresses: - - ${global_ip_cidr} - autoAssign: false - EOF -%{ endif ~} - -%{ if ip_pool != "" ~} - # Configure the IPAddressPool for the IP pool if present - cat <<- EOF | kubectl apply -f - - apiVersion: metallb.io/v1beta1 - kind: IPAddressPool - metadata: - name: ippool - namespace: metallb-system - spec: - addresses: - - ${ip_pool} - autoAssign: false - EOF -%{ endif ~} - - # Configure the BGPPeer for each peer IP - cat <<- EOF | kubectl apply -f - - apiVersion: metallb.io/v1beta2 - kind: BGPPeer - metadata: - name: equinix-metal-peer-1 - namespace: metallb-system - spec: - peerASN: $${ASN_AS} - myASN: $${ASN} - peerAddress: $${PEER_IP_1} - sourceAddress: $${INTERNAL_IP} - EOF - - cat <<- EOF | kubectl apply -f - - apiVersion: metallb.io/v1beta2 - kind: BGPPeer - metadata: - name: equinix-metal-peer-1 - namespace: metallb-system - spec: - peerASN: $${ASN_AS} - myASN: $${ASN} - peerAddress: $${PEER_IP_2} - sourceAddress: $${INTERNAL_IP} - EOF - - # Enable the BGPAdvertisement, only to be executed in the control-plane nodes - cat <<- EOF | kubectl apply -f - - apiVersion: metallb.io/v1beta1 - kind: BGPAdvertisement - metadata: - name: bgp-peers - namespace: metallb-system - spec: - nodeSelectors: - - matchLabels: - node-role.kubernetes.io/control-plane: "true" - EOF -} - -install_k3s(){ - # Curl is needed to download the k3s binary - # Jq is needed to parse the Equinix Metal metadata (json format) - apt update && apt install curl jq -y - - # Download the K3s installer script - curl -L --output k3s_installer.sh https://get.k3s.io && install -m755 k3s_installer.sh /usr/local/bin/ - -%{ if node_type == "control-plane" ~} - # If the node to be installed is the second or third control plane or extra nodes, wait for the API to be up - # Wait for the first control plane node to be up - while ! curl -m 10 -s -k -o /dev/null https://${API_IP}:6443 ; do echo "API still not reachable"; sleep 2 ; done -%{ endif ~} -%{ if node_type == "node" ~} - # Wait for the first control plane node to be up - while ! curl -m 10 -s -k -o /dev/null https://${API_IP}:6443 ; do echo "API still not reachable"; sleep 2 ; done -%{ endif ~} - - export INSTALL_K3S_SKIP_START=false - export K3S_TOKEN="${k3s_token}" - export NODE_IP=$(curl -s https://metadata.platformequinix.com/metadata | jq -r '.network.addresses[] | select(.public == false and .address_family == 4) |.address') - export NODE_EXTERNAL_IP=$(curl -s https://metadata.platformequinix.com/metadata | jq -r '.network.addresses[] | select(.public == true and .address_family == 4) |.address') -%{ if node_type == "all-in-one" ~} -%{ if global_ip_cidr != "" ~} - export INSTALL_K3S_EXEC="server --write-kubeconfig-mode=644 --disable=servicelb --node-ip $${NODE_IP} --node-external-ip $${NODE_EXTERNAL_IP}" -%{ else ~} -%{ if ip_pool != "" ~} - export INSTALL_K3S_EXEC="server --write-kubeconfig-mode=644 --disable=servicelb --node-ip $${NODE_IP} --node-external-ip $${NODE_EXTERNAL_IP}" -%{ else ~} - export INSTALL_K3S_EXEC="server --write-kubeconfig-mode=644 --node-ip $${NODE_IP} --node-external-ip $${NODE_EXTERNAL_IP}" -%{ endif ~} -%{ endif ~} -%{ endif ~} -%{ if node_type == "control-plane-master" ~} - export INSTALL_K3S_EXEC="server --cluster-init --write-kubeconfig-mode=644 --tls-san=${API_IP} --tls-san=${API_IP}.sslip.io --disable=servicelb --node-ip $${NODE_IP} --node-external-ip $${NODE_EXTERNAL_IP}" -%{ endif ~} -%{ if node_type == "control-plane" ~} - export INSTALL_K3S_EXEC="server --server https://${API_IP}:6443 --write-kubeconfig-mode=644 --node-ip $${NODE_IP} --node-external-ip $${NODE_EXTERNAL_IP}" -%{ endif ~} -%{ if node_type == "node" ~} - export INSTALL_K3S_EXEC="agent --server https://${API_IP}:6443 --node-ip $${NODE_IP} --node-external-ip $${NODE_EXTERNAL_IP}" -%{ endif ~} -%{ if k3s_version != "" ~} - export INSTALL_K3S_VERSION=${k3s_version} -%{ endif ~} - /usr/local/bin/k3s_installer.sh - - systemctl enable --now k3s -} - -deploy_demo(){ - kubectl annotate svc -n kube-system traefik "metallb.universe.tf/address-pool=anycast-ip" - - # I cannot make split work in Terraform templates - IP=$(echo ${global_ip_cidr} | cut -d/ -f1) - cat <<- EOF | kubectl apply -f - - --- - apiVersion: v1 - kind: Namespace - metadata: - name: hello-kubernetes - --- - apiVersion: v1 - kind: ServiceAccount - metadata: - name: hello-kubernetes - namespace: hello-kubernetes - labels: - app.kubernetes.io/name: hello-kubernetes - --- - apiVersion: v1 - kind: Service - metadata: - name: hello-kubernetes - namespace: hello-kubernetes - labels: - app.kubernetes.io/name: hello-kubernetes - spec: - type: ClusterIP - ports: - - port: 80 - targetPort: http - protocol: TCP - name: http - selector: - app.kubernetes.io/name: hello-kubernetes - --- - apiVersion: apps/v1 - kind: Deployment - metadata: - name: hello-kubernetes - namespace: hello-kubernetes - labels: - app.kubernetes.io/name: hello-kubernetes - spec: - replicas: 2 - selector: - matchLabels: - app.kubernetes.io/name: hello-kubernetes - template: - metadata: - labels: - app.kubernetes.io/name: hello-kubernetes - spec: - serviceAccountName: hello-kubernetes - containers: - - name: hello-kubernetes - image: "paulbouwer/hello-kubernetes:1.10" - imagePullPolicy: IfNotPresent - ports: - - name: http - containerPort: 8080 - protocol: TCP - livenessProbe: - httpGet: - path: / - port: http - readinessProbe: - httpGet: - path: / - port: http - env: - - name: HANDLER_PATH_PREFIX - value: "" - - name: RENDER_PATH_PREFIX - value: "" - - name: KUBERNETES_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: KUBERNETES_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: CONTAINER_IMAGE - value: "paulbouwer/hello-kubernetes:1.10" - --- - apiVersion: networking.k8s.io/v1 - kind: Ingress - metadata: - name: hello-kubernetes-ingress - namespace: hello-kubernetes - spec: - rules: - - host: hellok3s.$${IP}.sslip.io - http: - paths: - - path: "/" - pathType: Prefix - backend: - service: - name: hello-kubernetes - port: - name: http - EOF -} - -install_k3s - -%{ if node_type == "control-plane-master" ~} -install_bird -install_metallb -%{ endif ~} -%{ if node_type == "control-plane" ~} -install_bird -install_metallb -%{ endif ~} - -%{ if node_type == "all-in-one" ~} -%{ if global_ip_cidr != "" ~} -INSTALL_METALLB=true -%{ else } -%{ if ip_pool != "" ~} -INSTALL_METALLB=true -%{ else } -INSTALL_METALLB=false -%{ endif ~} -%{ endif ~} -[ $${INSTALL_METALLB} == true ] && install_metallb || true -%{ endif ~} -%{ if deploy_demo != "" ~} -deploy_demo -%{ endif ~} diff --git a/modules/k3s_cluster/README.md b/modules/kube_cluster/README.md similarity index 100% rename from modules/k3s_cluster/README.md rename to modules/kube_cluster/README.md diff --git a/modules/k3s_cluster/main.tf b/modules/kube_cluster/main.tf similarity index 73% rename from modules/k3s_cluster/main.tf rename to modules/kube_cluster/main.tf index b9906fa..b14b617 100644 --- a/modules/k3s_cluster/main.tf +++ b/modules/kube_cluster/main.tf @@ -1,10 +1,11 @@ locals { - k3s_token = coalesce(var.custom_k3s_token, random_string.random_k3s_token.result) - api_vip = var.k3s_ha ? equinix_metal_reserved_ip_block.api_vip_addr[0].address : equinix_metal_device.all_in_one[0].network[0].address + token = coalesce(var.custom_token, random_string.random_token.result) + api_vip = var.ha ? equinix_metal_reserved_ip_block.api_vip_addr[0].address : equinix_metal_device.all_in_one[0].network[0].address + ingress_ip = var.ip_pool_count > 0 ? equinix_metal_reserved_ip_block.ingress_addr[0].address : "" ip_pool_cidr = var.ip_pool_count > 0 ? equinix_metal_reserved_ip_block.ip_pool[0].cidr_notation : "" } -resource "random_string" "random_k3s_token" { +resource "random_string" "random_token" { length = 16 special = false } @@ -20,32 +21,44 @@ resource "equinix_metal_device" "control_plane_master" { operating_system = var.os billing_cycle = "hourly" project_id = var.metal_project_id - count = var.k3s_ha ? 1 : 0 + count = var.ha ? 1 : 0 description = var.cluster_name user_data = templatefile("${path.module}/templates/user-data.tftpl", { - k3s_token = local.k3s_token, + token = local.token, API_IP = local.api_vip, + ingress_ip = local.ingress_ip, global_ip_cidr = var.global_ip_cidr, ip_pool = local.ip_pool_cidr, - k3s_version = var.k3s_version, + kube_version = var.kube_version, metallb_version = var.metallb_version, deploy_demo = var.deploy_demo, + rancher_flavor = var.rancher_flavor, + rancher_version = var.rancher_version, node_type = "control-plane-master" }) } resource "equinix_metal_bgp_session" "control_plane_master" { device_id = equinix_metal_device.control_plane_master[0].id address_family = "ipv4" - count = var.k3s_ha ? 1 : 0 + count = var.ha ? 1 : 0 } resource "equinix_metal_reserved_ip_block" "api_vip_addr" { - count = var.k3s_ha ? 1 : 0 + count = var.ha ? 1 : 0 project_id = var.metal_project_id metro = var.metal_metro type = "public_ipv4" quantity = 1 - description = "K3s API IP" + description = "Kubernetes API IP" +} + +resource "equinix_metal_reserved_ip_block" "ingress_addr" { + count = var.ip_pool_count > 0 ? 1 : 0 + project_id = var.metal_project_id + metro = var.metal_metro + type = "public_ipv4" + quantity = 1 + description = "Ingress IP" } resource "equinix_metal_device" "control_plane_others" { @@ -55,16 +68,19 @@ resource "equinix_metal_device" "control_plane_others" { operating_system = var.os billing_cycle = "hourly" project_id = var.metal_project_id - count = var.k3s_ha ? 2 : 0 + count = var.ha ? 2 : 0 description = var.cluster_name depends_on = [equinix_metal_device.control_plane_master] user_data = templatefile("${path.module}/templates/user-data.tftpl", { - k3s_token = local.k3s_token, + token = local.token, API_IP = local.api_vip, + ingress_ip = local.ingress_ip, global_ip_cidr = "", ip_pool = "", - k3s_version = var.k3s_version, + kube_version = var.kube_version, metallb_version = var.metallb_version, + rancher_flavor = var.rancher_flavor, + rancher_version = var.rancher_version, deploy_demo = false, node_type = "control-plane" }) } @@ -72,13 +88,13 @@ resource "equinix_metal_device" "control_plane_others" { resource "equinix_metal_bgp_session" "control_plane_second" { device_id = equinix_metal_device.control_plane_others[0].id address_family = "ipv4" - count = var.k3s_ha ? 1 : 0 + count = var.ha ? 1 : 0 } resource "equinix_metal_bgp_session" "control_plane_third" { device_id = equinix_metal_device.control_plane_others[1].id address_family = "ipv4" - count = var.k3s_ha ? 1 : 0 + count = var.ha ? 1 : 0 } ################################################################################ @@ -109,12 +125,15 @@ resource "equinix_metal_device" "nodes" { description = var.cluster_name depends_on = [equinix_metal_device.control_plane_master] user_data = templatefile("${path.module}/templates/user-data.tftpl", { - k3s_token = local.k3s_token, + token = local.token, API_IP = local.api_vip, + ingress_ip = local.ingress_ip, global_ip_cidr = "", ip_pool = "", - k3s_version = var.k3s_version, + kube_version = var.kube_version, metallb_version = var.metallb_version, + rancher_flavor = var.rancher_flavor, + rancher_version = var.rancher_version, deploy_demo = false, node_type = "node" }) } @@ -130,16 +149,19 @@ resource "equinix_metal_device" "all_in_one" { operating_system = var.os billing_cycle = "hourly" project_id = var.metal_project_id - count = var.k3s_ha ? 0 : 1 + count = var.ha ? 0 : 1 description = var.cluster_name user_data = templatefile("${path.module}/templates/user-data.tftpl", { - k3s_token = local.k3s_token, + token = local.token, global_ip_cidr = var.global_ip_cidr, ip_pool = local.ip_pool_cidr, API_IP = "", - k3s_version = var.k3s_version, + ingress_ip = local.ingress_ip, + kube_version = var.kube_version, metallb_version = var.metallb_version, deploy_demo = var.deploy_demo, + rancher_flavor = var.rancher_flavor, + rancher_version = var.rancher_version, node_type = "all-in-one" }) } @@ -147,5 +169,5 @@ resource "equinix_metal_device" "all_in_one" { resource "equinix_metal_bgp_session" "all_in_one" { device_id = equinix_metal_device.all_in_one[0].id address_family = "ipv4" - count = var.k3s_ha ? 0 : 1 + count = var.ha ? 0 : 1 } diff --git a/modules/k3s_cluster/outputs.tf b/modules/kube_cluster/outputs.tf similarity index 71% rename from modules/k3s_cluster/outputs.tf rename to modules/kube_cluster/outputs.tf index 7e6cb62..8d83951 100644 --- a/modules/k3s_cluster/outputs.tf +++ b/modules/kube_cluster/outputs.tf @@ -1,4 +1,4 @@ -output "k3s_api_ip" { +output "kube_api_ip" { value = try(equinix_metal_reserved_ip_block.api_vip_addr[0].address, equinix_metal_device.all_in_one[0].network[0].address) - description = "K3s API IPs" + description = "K8s API IPs" } diff --git a/modules/kube_cluster/templates/user-data.tftpl b/modules/kube_cluster/templates/user-data.tftpl new file mode 100644 index 0000000..e90b7ce --- /dev/null +++ b/modules/kube_cluster/templates/user-data.tftpl @@ -0,0 +1,655 @@ +#!/usr/bin/env bash +set -euo pipefail + +die(){ + echo $${1} >&2 + exit $${2} +} + +prechecks(){ + # Set OS + source /etc/os-release + case $${ID} in + "debian") + export PKGMANAGER="apt" + ;; + "sles") + export PKGMANAGER="zypper" + ;; + "sle-micro") + export PKGMANAGER="transactional-update" + ;; + *) + die "Unsupported OS $${ID}" 1 + ;; + esac + # Set ARCH + ARCH=$(uname -m) + case $${ARCH} in + "amd64") + export ARCH=amd64 + export SUFFIX= + ;; + "x86_64") + export ARCH=amd64 + export SUFFIX= + ;; + "arm64") + export ARCH=arm64 + export SUFFIX=-$${ARCH} + ;; + "s390x") + export ARCH=s390x + export SUFFIX=-$${ARCH} + ;; + "aarch64") + export ARCH=arm64 + export SUFFIX=-$${ARCH} + ;; + "arm*") + export ARCH=arm + export SUFFIX=-$${ARCH}hf + ;; + *) + die "Unsupported architecture $${ARCH}" 1 + ;; + esac +} + +prereqs(){ + # Required packages + case $${PKGMANAGER} in + "apt") + apt update + apt install -y jq curl + ;; + "zypper") + zypper refresh + zypper install -y jq curl + ;; + esac +} + +wait_for_kube_api(){ + # Wait for the node to be available, meaning the K8s API is available + while ! kubectl wait --for condition=ready node $(cat /etc/hostname | tr '[:upper:]' '[:lower:]') --timeout=60s; do sleep 2 ; done +} + +install_eco(){ + # Wait for K3s to be up. It should be up already but just in case. + wait_for_kube_api + + # Download helm as required to install endpoint-copier-operator + command -v helm || curl -fsSL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 |bash + + # Add the SUSE Edge charts and deploy ECO + helm repo add suse-edge https://suse-edge.github.io/charts + helm repo update + helm install --create-namespace -n endpoint-copier-operator endpoint-copier-operator suse-edge/endpoint-copier-operator + + # Configure the MetalLB IP Address pool for the VIP + cat <<-EOF | kubectl apply -f - + apiVersion: metallb.io/v1beta1 + kind: IPAddressPool + metadata: + name: kubernetes-vip-ip-pool + namespace: metallb-system + spec: + addresses: + - ${API_IP}/32 + serviceAllocation: + priority: 100 + namespaces: + - default + EOF + + # Create the kubernetes-vip service that will be updated by e-c-o with the control plane hosts + if [[ $${KUBETYPE} == "k3s" ]]; then + cat <<-EOF | kubectl apply -f - + apiVersion: v1 + kind: Service + metadata: + name: kubernetes-vip + namespace: default + spec: + internalTrafficPolicy: Cluster + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - name: k8s-api + port: 6443 + protocol: TCP + targetPort: 6443 + type: LoadBalancer + EOF + fi + if [[ $${KUBETYPE} == "rke2" ]]; then + cat <<-EOF | kubectl apply -f - + apiVersion: v1 + kind: Service + metadata: + name: kubernetes-vip + namespace: default + spec: + internalTrafficPolicy: Cluster + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - name: k8s-api + port: 6443 + protocol: TCP + targetPort: 6443 + - name: rke2-api + port: 9345 + protocol: TCP + targetPort: 9345 + type: LoadBalancer + EOF + fi +} + +install_metallb(){ +%{ if metallb_version != "" ~} + export METALLB_VERSION=${metallb_version} +%{ else ~} + export METALLB_VERSION=$(curl --silent "https://api.github.com/repos/metallb/metallb/releases/latest" | jq -r .tag_name) +%{ endif ~} + + # Wait for K3s to be up. It should be up already but just in case. + wait_for_kube_api + + # Apply the MetalLB manifest + kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/$${METALLB_VERSION}/config/manifests/metallb-native.yaml + + # Wait for MetalLB to be up + while ! kubectl wait --for condition=ready -n metallb-system $(kubectl get pods -n metallb-system -l component=controller -o name) --timeout=10s; do sleep 2 ; done + + # In order to configure MetalLB, the metadata information is required. + # BGP info can take a few seconds to be populated, retry if that's the case + INTERNAL_IP="null" + while [ $${INTERNAL_IP} == "null" ]; do + echo "BGP data still not available..." + sleep 5 + METADATA=$(curl -s https://metadata.platformequinix.com/metadata) + INTERNAL_IP=$(echo $${METADATA} | jq -r '.bgp_neighbors[0].customer_ip') + done + PEER_IP_1=$(echo $${METADATA} | jq -r '.bgp_neighbors[0].peer_ips[0]') + PEER_IP_2=$(echo $${METADATA} | jq -r '.bgp_neighbors[0].peer_ips[1]') + ASN=$(echo $${METADATA} | jq -r '.bgp_neighbors[0].customer_as') + ASN_AS=$(echo $${METADATA} | jq -r '.bgp_neighbors[0].peer_as') + +%{ if global_ip_cidr != "" ~} + # Configure the IPAddressPool for the Global IP if present + cat <<- EOF | kubectl apply -f - + apiVersion: metallb.io/v1beta1 + kind: IPAddressPool + metadata: + name: anycast-ip + namespace: metallb-system + spec: + addresses: + - ${global_ip_cidr} + autoAssign: false + EOF +%{ endif ~} + +%{ if ingress_ip != "" ~} + if [ "$${KUBETYPE}" == "k3s" ]; then + # Configure an IPAddressPool for Ingress only + cat <<- EOF | kubectl apply -f - + apiVersion: metallb.io/v1beta1 + kind: IPAddressPool + metadata: + name: ingress + namespace: metallb-system + spec: + addresses: + - ${ingress_ip}/32 + serviceAllocation: + priority: 100 + serviceSelectors: + - matchExpressions: + - {key: app.kubernetes.io/name, operator: In, values: [traefik]} + EOF + fi + if [ "$${KUBETYPE}" == "rke2" ]; then + # Configure an IPAddressPool for Ingress only + cat <<- EOF | kubectl apply -f - + apiVersion: metallb.io/v1beta1 + kind: IPAddressPool + metadata: + name: ingress + namespace: metallb-system + spec: + addresses: + - ${ingress_ip}/32 + serviceAllocation: + priority: 100 + serviceSelectors: + - matchExpressions: + - {key: app.kubernetes.io/name, operator: In, values: [rke2-ingress-nginx]} + EOF + fi +%{ endif ~} + +%{ if ip_pool != "" ~} + # Configure the IPAddressPool for the IP pool if present + cat <<- EOF | kubectl apply -f - + apiVersion: metallb.io/v1beta1 + kind: IPAddressPool + metadata: + name: ippool + namespace: metallb-system + spec: + addresses: + - ${ip_pool} + autoAssign: false + EOF +%{ endif ~} + + # Configure the BGPPeer for each peer IP + cat <<- EOF | kubectl apply -f - + apiVersion: metallb.io/v1beta2 + kind: BGPPeer + metadata: + name: equinix-metal-peer-1 + namespace: metallb-system + spec: + peerASN: $${ASN_AS} + myASN: $${ASN} + peerAddress: $${PEER_IP_1} + sourceAddress: $${INTERNAL_IP} + EOF + + cat <<- EOF | kubectl apply -f - + apiVersion: metallb.io/v1beta2 + kind: BGPPeer + metadata: + name: equinix-metal-peer-1 + namespace: metallb-system + spec: + peerASN: $${ASN_AS} + myASN: $${ASN} + peerAddress: $${PEER_IP_2} + sourceAddress: $${INTERNAL_IP} + EOF + + # Enable the BGPAdvertisement, only to be executed in the control-plane nodes + cat <<- EOF | kubectl apply -f - + apiVersion: metallb.io/v1beta1 + kind: BGPAdvertisement + metadata: + name: bgp-peers + namespace: metallb-system + spec: + nodeSelectors: + - matchLabels: + node-role.kubernetes.io/control-plane: "true" + EOF +} + +install_k3s(){ + # Download the K3s installer script + curl -L --output k3s_installer.sh https://get.k3s.io && install -m755 k3s_installer.sh /usr/local/bin/ + +%{ if node_type == "control-plane" ~} + # If the node to be installed is the second or third control plane or extra nodes, wait for the API to be up + # Wait for the first control plane node to be up + while ! curl -m 10 -s -k -o /dev/null https://${API_IP}:6443 ; do echo "API still not reachable"; sleep 2 ; done +%{ endif ~} +%{ if node_type == "node" ~} + # Wait for the first control plane node to be up + while ! curl -m 10 -s -k -o /dev/null https://${API_IP}:6443 ; do echo "API still not reachable"; sleep 2 ; done +%{ endif ~} + + export INSTALL_K3S_SKIP_ENABLE=false + export INSTALL_K3S_SKIP_START=false + export K3S_TOKEN="${token}" + export NODE_IP=$(curl -s https://metadata.platformequinix.com/metadata | jq -r '.network.addresses[] | select(.public == false and .address_family == 4) |.address') + export NODE_EXTERNAL_IP=$(curl -s https://metadata.platformequinix.com/metadata | jq -r '.network.addresses[] | select(.public == true and .address_family == 4) |.address') +%{ if node_type == "all-in-one" ~} +%{ if global_ip_cidr != "" ~} + export INSTALL_K3S_EXEC="server --write-kubeconfig-mode=644 --disable=servicelb --node-ip $${NODE_IP} --node-external-ip $${NODE_EXTERNAL_IP}" +%{ else ~} +%{ if ip_pool != "" ~} + export INSTALL_K3S_EXEC="server --write-kubeconfig-mode=644 --disable=servicelb --node-ip $${NODE_IP} --node-external-ip $${NODE_EXTERNAL_IP}" +%{ else ~} + export INSTALL_K3S_EXEC="server --write-kubeconfig-mode=644 --node-ip $${NODE_IP} --node-external-ip $${NODE_EXTERNAL_IP}" +%{ endif ~} +%{ endif ~} +%{ endif ~} +%{ if node_type == "control-plane-master" ~} + export INSTALL_K3S_EXEC="server --cluster-init --write-kubeconfig-mode=644 --tls-san=${API_IP} --tls-san=${API_IP}.sslip.io --disable=servicelb --node-ip $${NODE_IP} --node-external-ip $${NODE_EXTERNAL_IP}" +%{ endif ~} +%{ if node_type == "control-plane" ~} + export INSTALL_K3S_EXEC="server --server https://${API_IP}:6443 --write-kubeconfig-mode=644 --node-ip $${NODE_IP} --node-external-ip $${NODE_EXTERNAL_IP}" +%{ endif ~} +%{ if node_type == "node" ~} + export INSTALL_K3S_EXEC="agent --server https://${API_IP}:6443 --node-ip $${NODE_IP} --node-external-ip $${NODE_EXTERNAL_IP}" +%{ endif ~} +%{ if kube_version != "" ~} + export INSTALL_K3S_VERSION="${kube_version}" +%{ endif ~} + /usr/local/bin/k3s_installer.sh +} + +install_rke2(){ + # Download the RKE2 installer script + curl -L --output rke2_installer.sh https://get.rke2.io && install -m755 rke2_installer.sh /usr/local/bin/ + + # RKE2 configuration is set via config.yaml file + mkdir -p /etc/rancher/rke2/ + +%{ if node_type == "control-plane" ~} + # If the node to be installed is the second or third control plane or extra nodes, wait for the API to be up + # Wait for the first control plane node to be up + while ! curl -m 10 -s -k -o /dev/null https://${API_IP}:6443 ; do echo "API still not reachable"; sleep 2 ; done +%{ endif ~} +%{ if node_type == "node" ~} + # Wait for the first control plane node to be up + while ! curl -m 10 -s -k -o /dev/null https://${API_IP}:6443 ; do echo "API still not reachable"; sleep 2 ; done +%{ endif ~} + + export RKE2_TOKEN="${token}" + export NODE_IP=$(curl -s https://metadata.platformequinix.com/metadata | jq -r '.network.addresses[] | select(.public == false and .address_family == 4) |.address') + export NODE_EXTERNAL_IP=$(curl -s https://metadata.platformequinix.com/metadata | jq -r '.network.addresses[] | select(.public == true and .address_family == 4) |.address') +%{ if node_type == "all-in-one" ~} + export INSTALL_RKE2_TYPE="server" + cat <<- EOF >> /etc/rancher/rke2/config.yaml + token: $${RKE2_TOKEN} + write-kubeconfig-mode: "0644" + node-ip: $${NODE_IP} + node-external-ip: $${NODE_EXTERNAL_IP} + EOF +%{ endif ~} +%{ if node_type == "control-plane-master" ~} + export INSTALL_RKE2_TYPE="server" + cat <<- EOF >> /etc/rancher/rke2/config.yaml + token: $${RKE2_TOKEN} + write-kubeconfig-mode: "0644" + node-ip: $${NODE_IP} + node-external-ip: $${NODE_EXTERNAL_IP} + tls-san: + - "${API_IP}" + - "${API_IP}.sslip.io" + EOF +%{ endif ~} +%{ if node_type == "control-plane" ~} + export INSTALL_RKE2_TYPE="server" + cat <<- EOF >> /etc/rancher/rke2/config.yaml + server: https://${API_IP}:9345 + token: $${RKE2_TOKEN} + write-kubeconfig-mode: "0644" + node-ip: $${NODE_IP} + node-external-ip: $${NODE_EXTERNAL_IP} + EOF +%{ endif ~} +%{ if node_type == "node" ~} + export INSTALL_RKE2_TYPE="agent" + cat <<- EOF >> /etc/rancher/rke2/config.yaml + server: https://${API_IP}:9345 + token: $${RKE2_TOKEN} + write-kubeconfig-mode: "0644" + node-ip: $${NODE_IP} + node-external-ip: $${NODE_EXTERNAL_IP} + EOF +%{ endif ~} +%{ if kube_version != "" ~} + export INSTALL_RKE2_VERSION="${kube_version}" +%{ endif ~} + /usr/local/bin/rke2_installer.sh + systemctl enable --now rke2-$${INSTALL_RKE2_TYPE} +} + +# TODO(eminguez): Fix demo +deploy_demo(){ + # Check if the demo is already deployed + if kubectl get deployment -n hello-kubernetes hello-kubernetes -o name > /dev/null 2>&1; then exit 0; fi + #[[ $${KUBETYPE} == "k3s" ]] && kubectl annotate svc -n kube-system traefik "metallb.universe.tf/address-pool=anycast-ip" + + # I cannot make split work in Terraform templates + IP=$(echo ${global_ip_cidr} | cut -d/ -f1) + cat <<- EOF | kubectl apply -f - + --- + apiVersion: v1 + kind: Namespace + metadata: + name: hello-kubernetes + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: hello-kubernetes + namespace: hello-kubernetes + labels: + app.kubernetes.io/name: hello-kubernetes + --- + apiVersion: v1 + kind: Service + metadata: + name: hello-kubernetes + namespace: hello-kubernetes + labels: + app.kubernetes.io/name: hello-kubernetes + spec: + type: ClusterIP + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app.kubernetes.io/name: hello-kubernetes + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: hello-kubernetes + namespace: hello-kubernetes + labels: + app.kubernetes.io/name: hello-kubernetes + spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: hello-kubernetes + template: + metadata: + labels: + app.kubernetes.io/name: hello-kubernetes + spec: + serviceAccountName: hello-kubernetes + containers: + - name: hello-kubernetes + image: "paulbouwer/hello-kubernetes:1.10" + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 8080 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + env: + - name: HANDLER_PATH_PREFIX + value: "" + - name: RENDER_PATH_PREFIX + value: "" + - name: KUBERNETES_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: KUBERNETES_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CONTAINER_IMAGE + value: "paulbouwer/hello-kubernetes:1.10" + --- + apiVersion: networking.k8s.io/v1 + kind: Ingress + metadata: + name: hello-kubernetes-ingress + namespace: hello-kubernetes + spec: + rules: + - host: hellok3s.$${IP}.sslip.io + http: + paths: + - path: "/" + pathType: Prefix + backend: + service: + name: hello-kubernetes + port: + name: http + EOF +} + +install_rancher(){ + # Wait for Kube API to be up. It should be up already but just in case. + wait_for_kube_api + + # Download helm as required to install Rancher + command -v helm || curl -fsSL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 |bash + + # Get latest Cert-manager version + CMVERSION=$(curl -s "https://api.github.com/repos/cert-manager/cert-manager/releases/latest" | jq -r '.tag_name') + + RANCHERFLAVOR=${rancher_flavor} + # https://ranchermanager.docs.rancher.com/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster + case $${RANCHERFLAVOR} in + "latest" | "stable" | "alpha") + helm repo add rancher https://releases.rancher.com/server-charts/$${RANCHERFLAVOR} + ;; + "prime") + helm repo add rancher https://charts.rancher.com/server-charts/prime + ;; + *) + echo "Rancher flavor not detected, using latest" + helm repo add rancher https://releases.rancher.com/server-charts/latest + ;; + esac + + helm repo add jetstack https://charts.jetstack.io + helm repo update + + # Install the cert-manager Helm chart + helm install cert-manager jetstack/cert-manager \ + --namespace cert-manager \ + --create-namespace \ + --set installCRDs=true \ + --version $${CMVERSION} + + IP="" + # https://github.com/rancher/rke2/issues/3958 + if [ "$${KUBETYPE}" == "rke2" ]; then + # Wait for the rke2-ingress-nginx-controller DS to be available if using RKE2 + while ! kubectl rollout status daemonset -n kube-system rke2-ingress-nginx-controller --timeout=60s; do sleep 2 ; done + IP=$(kubectl get svc -n kube-system rke2-ingress-nginx -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + fi + + # Get the IP of the ingress object if provided + if [ "$${KUBETYPE}" == "k3s" ]; then + IP=$(kubectl get svc -n kube-system traefik -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + fi + + if [[ $${IP} == "" ]]; then + # Just use internal IPs + IP=$(hostname -I | awk '{print $1}') + fi + + # Install rancher using sslip.io as hostname and with just a single replica + helm install rancher rancher/rancher \ + --namespace cattle-system \ + --create-namespace \ + --set hostname=rancher.$${IP}.sslip.io \ + --set replicas=1 \ + --set global.cattle.psp.enabled=false %{ if rancher_version != "" ~}--version "${rancher_version}"%{ endif ~} + + while ! kubectl wait --for condition=ready -n cattle-system $(kubectl get pods -n cattle-system -l app=rancher -o name) --timeout=10s; do sleep 2 ; done +} + +setup_kubectl(){ + shopt -s expand_aliases + case $${KUBETYPE} in + "k3s") + alias kubectl="/usr/local/bin/kubectl" + alias k="kubectl" + export KUBECONFIG=/etc/rancher/k3s/k3s.yaml + ;; + "rke2") + alias kubectl="/var/lib/rancher/rke2/bin/kubectl" + alias k="kubectl" + export KUBECONFIG=/etc/rancher/rke2/rke2.yaml + ;; + *) + die "Kubernetes type $${KUBETYPE} not found" 2 + ;; + esac +} + +prechecks +prereqs +setup_kubectl + +if [[ "${kube_version}" =~ .*"k3s".* ]] || [[ "${kube_version}" == "" ]]; then + export KUBETYPE="k3s" + install_k3s +elif [[ "${kube_version}" =~ .*"rke2".* ]]; then + export KUBETYPE="rke2" + install_rke2 +else + die "Kubernetes version ${kube_version} not valid" 2 +fi + +DEPLOY_DEMO=false +INSTALL_METALLB=false +INSTALL_RANCHER=false +%{ if node_type == "control-plane-master" ~} +INSTALL_METALLB=true +%{ if deploy_demo != "" ~} +DEPLOY_DEMO=true +%{ endif ~} +%{ if rancher_flavor != "" ~} +INSTALL_RANCHER=true +%{ endif ~} +%{ endif ~} + +%{ if node_type == "all-in-one" ~} +%{ if global_ip_cidr != "" ~} +INSTALL_METALLB=true +%{ endif } +%{ if ip_pool != "" ~} +INSTALL_METALLB=true +%{ endif } +%{ if deploy_demo != "" ~} +DEPLOY_DEMO=true +%{ endif ~} +%{ if rancher_flavor != "" ~} +INSTALL_RANCHER=true +%{ endif ~} +%{ endif ~} + +[ $${INSTALL_METALLB} == true ] && install_metallb || true + +%{ if API_IP != "" ~} +%{ if node_type == "control-plane-master" ~} +install_eco +%{ endif ~} +%{ endif ~} + +[ $${DEPLOY_DEMO} == true ] && deploy_demo || true + +[ $${INSTALL_RANCHER} == true ] && install_rancher || true diff --git a/modules/k3s_cluster/variables.tf b/modules/kube_cluster/variables.tf similarity index 68% rename from modules/k3s_cluster/variables.tf rename to modules/kube_cluster/variables.tf index c3860a4..e63886c 100644 --- a/modules/k3s_cluster/variables.tf +++ b/modules/kube_cluster/variables.tf @@ -17,30 +17,30 @@ variable "deploy_demo" { variable "cluster_name" { type = string description = "Cluster name" - default = "K3s cluster" + default = "Cluster" } variable "plan_control_plane" { type = string - description = "K3s control plane type/size" + description = "Control plane type/size" default = "c3.small.x86" } variable "plan_node" { type = string - description = "K3s node type/size" + description = "Node type/size" default = "c3.small.x86" } variable "node_count" { type = number - description = "Number of K3s nodes" + description = "Number of nodes" default = "0" } -variable "k3s_ha" { +variable "ha" { type = bool - description = "K3s HA (aka 3 control plane nodes)" + description = "HA (aka 3 control plane nodes)" default = false } @@ -62,9 +62,9 @@ variable "node_hostnames" { default = "node" } -variable "custom_k3s_token" { +variable "custom_token" { type = string - description = "K3s token used for nodes to join the cluster (autogenerated otherwise)" + description = "Token used for nodes to join the cluster (autogenerated otherwise)" default = null } @@ -80,9 +80,9 @@ variable "global_ip_cidr" { default = null } -variable "k3s_version" { +variable "kube_version" { type = string - description = "K3s version to be installed. Empty for latest" + description = "K3s/RKE2 version to be installed. Empty for latest K3s" default = "" } @@ -91,3 +91,15 @@ variable "metallb_version" { description = "MetalLB version to be installed. Empty for latest" default = "" } + +variable "rancher_version" { + type = string + description = "Rancher version to be installed (vX.Y.Z). Empty for latest" + default = "" +} + +variable "rancher_flavor" { + type = string + description = "Rancher flavor to be installed (prime, latest, stable or alpha). Empty to not install it" + default = "" +} diff --git a/modules/k3s_cluster/versions.tf b/modules/kube_cluster/versions.tf similarity index 100% rename from modules/k3s_cluster/versions.tf rename to modules/kube_cluster/versions.tf diff --git a/outputs.tf b/outputs.tf index 432a199..6cb399a 100644 --- a/outputs.tf +++ b/outputs.tf @@ -8,9 +8,14 @@ output "demo_url" { description = "URL of the demo application to demonstrate a global IP shared across Metros" } -output "k3s_api" { +output "kube_api" { value = { - for cluster in var.clusters : cluster.name => module.k3s_cluster[cluster.name].k3s_api_ip + for cluster in var.clusters : cluster.name => module.kube_cluster[cluster.name].kube_api_ip } - description = "List of Clusters => K3s APIs" + description = "List of Clusters => K8s APIs" } + +output "rancher_url" { + value = try("http://rancher.${equinix_metal_reserved_ip_block.global_ip[0].address}.sslip.io", null) + description = "URL of the demo application to demonstrate a global IP shared across Metros" +} \ No newline at end of file diff --git a/variables.tf b/variables.tf index 490abdb..bc357ef 100644 --- a/variables.tf +++ b/variables.tf @@ -16,21 +16,23 @@ variable "deploy_demo" { } variable "clusters" { - description = "K3s cluster definition" + description = "Cluster definition" type = list(object({ - name = optional(string, "K3s demo cluster") + name = optional(string, "Demo cluster") metro = optional(string, "FR") plan_control_plane = optional(string, "c3.small.x86") plan_node = optional(string, "c3.small.x86") node_count = optional(number, 0) - k3s_ha = optional(bool, false) + ha = optional(bool, false) os = optional(string, "debian_11") - control_plane_hostnames = optional(string, "k3s-cp") - node_hostnames = optional(string, "k3s-node") - custom_k3s_token = optional(string, "") + control_plane_hostnames = optional(string, "cp") + node_hostnames = optional(string, "node") + custom_token = optional(string, "") ip_pool_count = optional(number, 0) - k3s_version = optional(string, "") + kube_version = optional(string, "") metallb_version = optional(string, "") + rancher_flavor = optional(string, "") + rancher_version = optional(string, "") })) default = [{}] }