diff --git a/.github/workflows/check-update-crd-reference.yaml b/.github/workflows/check-update-crd-reference.yaml new file mode 100644 index 0000000000..b8ba779710 --- /dev/null +++ b/.github/workflows/check-update-crd-reference.yaml @@ -0,0 +1,20 @@ +# Validates the configuration for the CRD reference update script +# in scripts/update-crd-reference + +name: check-update-crd-reference + +on: + push: + paths: + - scripts/update-crd-reference/* + +jobs: + check: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Validate configuration + run: | + make update-crd-reference diff --git a/.github/workflows/generate-review-issues.yaml b/.github/workflows/generate-review-issues.yaml index f0dbb9fb2f..b14084bd6f 100644 --- a/.github/workflows/generate-review-issues.yaml +++ b/.github/workflows/generate-review-issues.yaml @@ -4,7 +4,6 @@ on: # At 0:05 on Sundays - cron: '5 0 * * 0' - jobs: front-matter: runs-on: ubuntu-latest diff --git a/Makefile b/Makefile index 48440aabc3..62942c34ec 100644 --- a/Makefile +++ b/Makefile @@ -54,7 +54,6 @@ update-cluster-app-reference: # Generate the reference documentation for the custom resource # definitions (CRD) used in the Management API. update-crd-reference: - scripts/update-crd-reference/update_config.sh scripts/update-crd-reference/main.sh lint: lint-markdown lint-prose validate-front-matter diff --git a/renovate.json5 b/renovate.json5 index af7ff49aed..764c1d68cc 100644 --- a/renovate.json5 +++ b/renovate.json5 @@ -13,5 +13,16 @@ ], versioningTemplate: '{{#if versioning}}{{{versioning}}}{{else}}semver{{/if}}', }, + // Detect CRD source versions + { + customType: 'regex', + datasourceTemplate: 'github-tags', + fileMatch: ['^scripts/update-crd-reference/config\\.yaml$'], + matchStrings: [ + 'short_name:\\s*(??)\\s+commit_reference:\\s*(?\\S+)', + ], + packageNameTemplate: 'giantswarm/{{{depName}}}', + versioningTemplate: 'semver-coerced', + }, ], } diff --git a/scripts/update-crd-reference/main.sh b/scripts/update-crd-reference/main.sh index b39843fa88..64ca1a3808 100755 --- a/scripts/update-crd-reference/main.sh +++ b/scripts/update-crd-reference/main.sh @@ -3,7 +3,7 @@ set -e # renovate: datasource=docker depName=gsoci.azurecr.io/giantswarm/crd-docs-generator versioning=loose -CRD_DOCS_GENERATOR_VERSION=0.11.1 +CRD_DOCS_GENERATOR_VERSION=0.11.2 DESTINATION=src/content/reference/platform-api/crd diff --git a/scripts/update-crd-reference/update_config.sh b/scripts/update-crd-reference/update_config.sh deleted file mode 100755 index 3500ba6965..0000000000 --- a/scripts/update-crd-reference/update_config.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash - -# Check if jq is installed -if ! command -v jq &>/dev/null; then - echo "jq is required but not installed. Please install jq and try again." - exit 1 -fi - -# Check if yq is installed -if ! command -v yq &>/dev/null; then - echo "yq is required but not installed. Please install yq and try again." - exit 1 -fi - -CONFIG="config.yaml" - -# Function to update the commit_reference with the latest release -update_commit_reference() { - local url="$1" - local latest_release - - # Extract the repo name from the URL for API call - repo_name=$(echo "$url" | awk -F '/' '{print $(NF-1)"/"$NF}') - - # Fetch the latest release tag from GitHub API - latest_release=$(curl -s "https://api.github.com/repos/$repo_name/releases/latest" | jq -r '.tag_name') - - # If the API call was successful and we got a tag name - if [[ "$latest_release" != "null" ]]; then - echo "Updating $repo_name to latest release: $latest_release" - # Update the YAML file with the new commit_reference - yq eval -i "(.source_repositories[] | select(.url == \"$url\").commit_reference) = \"$latest_release\"" "$CONFIG" - else - echo "Failed to fetch latest release for $repo_name" - fi -} - -# Loop through each source repository in the YAML file -yq eval '.source_repositories[] | .url' "$CONFIG" | while read -r url; do - update_commit_reference "$url" -done - -echo "Update complete." diff --git a/src/content/tutorials/fleet-management/cluster-management/cluster-autoscaler/index.md b/src/content/tutorials/fleet-management/cluster-management/cluster-autoscaler/index.md new file mode 100644 index 0000000000..03199cc39d --- /dev/null +++ b/src/content/tutorials/fleet-management/cluster-management/cluster-autoscaler/index.md @@ -0,0 +1,118 @@ +--- +linkTitle: Cluster autoscaler +title: Advanced cluster autoscaler configuration +description: Here we describe how you can customize the configuration of the managed cluster autoscaler service in your workload clusters. +weight: 90 +menu: + principal: + parent: tutorials-fleet-management-clusters + identifier: tutorials-fleet-management-clusters-cluster-autoscaler +user_questions: + - Where can I find the ConfigMap to configure cluster-autoscaler? + - What cluster-autoscaler options can I configure? +last_review_date: 2024-12-13 +owner: + - https://github.com/orgs/giantswarm/teams/team-phoenix +--- + +In Giant Swarm platform, your workload clusters come with default autoscaling functionality. Today, it's supported by {{/*% autoscaling_supported_versions*/%}}, but our goal is to bring this feature to all supported providers. + +The cluster autoscaler runs in the workload cluster and is responsible for scaling the number of nodes in the cluster. The configuration though is managed in the management cluster though. The autoscaling controller has a default configuration for the [cluster-autoscaler addon](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler). To configure the `cluster-autoscaler` further, you need to access the platform API. [Learn how to access the platform API]({{< relref "/getting-started/access-to-platform-api" >}}). + +To extend the configuration, you need to override these defaults using a `ConfigMap` with the convention name `cluster-autoscaler-user-values`. + +## Where is the user values ConfigMap + +The following examples assume the cluster you are trying to configure has an id of `myclustername`. + +You will find the `ConfigMap` named `myclustername-cluster-autoscaler-user-values` in the organization namespace of your cluster: + +```text +$ kubectl -n org-company get cm myclustername-cluster-autoscaler-user-values +NAME DATA AGE +myclustername-cluster-autoscaler-user-values 0 11m +``` + +## How to set configuration options using the user values ConfigMap + +On the platform API, create or edit a ConfigMap named `myclustername-cluster-autoscaler-user-values` +in the workload cluster namespace: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: cluster-autoscaler + name: myclustername-cluster-autoscaler-user-values + namespace: myorg +data: + values: | + configmap: + scaleDownUtilizationThreshold: 0.30 +``` + +## Configuration reference + +The following sections explain some of the configuration options and what their defaults are. They show only the `data` field of the ConfigMap for brevity. + +The most recent source of truth for these values can be found in the [values.yaml](https://github.com/giantswarm/cluster-autoscaler-app/blob/v1.30.3-gs1/helm/cluster-autoscaler-app/values.yaml) file of the `cluster-autoscaler-app`. + +### Scale down utilization threshold + +The `scaleDownUtilizationThreshold` defines the proportion between requested resources and capacity. Once utilization drops below this value, cluster autoscaler will consider a node as removable. + +Our default value is 70%, which means in order to scale down, one of the nodes has to have less utilization (CPU/memory) than this threshold. You can adjust this value to your needs as shown below: + +```yaml +data: + values: | + configmap: + scaleDownUtilizationThreshold: 0.65 +``` + +### Scan interval + +Defines what interval is used to review the state for taking a decision to scale up/down. Our default value is 10 seconds. + +```yaml +data: + values: | + configmap: + scanInterval: "100s" +``` + +### Skip system pods + +By default, the cluster autoscaler will never delete nodes which run pods of the `kube-system` namespace (except `daemonset` pods). This rule can be deactivated by setting the following property to false. + +```yaml +data: + values: | + configmap: + skipNodesWithSystemPods: "false" +``` + +### Skip pods with local storage + +The cluster autoscaler by default deletes nodes with pods using local storage (`hostPath` or `emptyDir`). In case you want to protect these nodes from removal, you can to set the following property to true. + +```yaml +data: + values: | + configmap: + skipNodesWithLocalStorage: "true" +``` + +### Balance similar node groups + +The cluster autoscaler by default doesn't differentiate between node groups when scaling. In case you want to enable considering node groups, you need to set the following property to true. + +```yaml +data: + values: | + configmap: + balanceSimilarNodeGroups: "true" +``` + +Read [the Kubernetes autoscaler FAQ](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md) to learn more about the cluster autoscaler and its configuration options. diff --git a/src/layouts/shortcodes/autoscaling_supported_versions.html b/src/layouts/shortcodes/autoscaling_supported_versions.html new file mode 100644 index 0000000000..65408a38c3 --- /dev/null +++ b/src/layouts/shortcodes/autoscaling_supported_versions.html @@ -0,0 +1 @@ +AWS \ No newline at end of file