diff --git a/.devcontainer/ci/Dockerfile b/.devcontainer/ci/Dockerfile new file mode 100644 index 00000000..e6e945b4 --- /dev/null +++ b/.devcontainer/ci/Dockerfile @@ -0,0 +1,2 @@ +# Ref: https://github.com/devcontainers/ci/issues/191 +FROM mcr.microsoft.com/devcontainers/base:alpine diff --git a/.devcontainer/ci/devcontainer.json b/.devcontainer/ci/devcontainer.json new file mode 100644 index 00000000..2064da8c --- /dev/null +++ b/.devcontainer/ci/devcontainer.json @@ -0,0 +1,26 @@ +{ + "$schema": "https://raw.githubusercontent.com/devcontainers/spec/main/schemas/devContainer.schema.json", + "name": "Flux Cluster Template (CI)", + "build": { + "dockerfile": "./Dockerfile", + "context": "." + }, + "features": { + "./features": {} + }, + "customizations": { + "vscode": { + "settings": { + "terminal.integrated.profiles.linux": { + "bash": { + "path": "/usr/bin/fish" + } + }, + "terminal.integrated.defaultProfile.linux": "fish" + }, + "extensions": [ + "redhat.vscode-yaml" + ] + } + } +} diff --git a/.devcontainer/ci/features/devcontainer-feature.json b/.devcontainer/ci/features/devcontainer-feature.json new file mode 100644 index 00000000..5f771e34 --- /dev/null +++ b/.devcontainer/ci/features/devcontainer-feature.json @@ -0,0 +1,6 @@ +{ + "name": "Flux Cluster Template (Tools)", + "id": "cluster-template", + "version": "1.0.0", + "description": "Install Tools" +} diff --git a/.devcontainer/ci/features/install.sh b/.devcontainer/ci/features/install.sh new file mode 100644 index 00000000..bbb27428 --- /dev/null +++ b/.devcontainer/ci/features/install.sh @@ -0,0 +1,77 @@ +#!/usr/bin/env bash +set -e +set -o noglob + +apk add --no-cache \ + age bash bind-tools ca-certificates curl direnv gettext python3 \ + py3-pip moreutils jq git iputils openssh-client \ + starship fzf fish yq helm + +apk add --no-cache \ + --repository=https://dl-cdn.alpinelinux.org/alpine/edge/community \ + kubectl sops + +apk add --no-cache \ + --repository=https://dl-cdn.alpinelinux.org/alpine/edge/testing \ + lsd + +for app in \ + "budimanjojo/talhelper!!?as=talhelper&type=script" \ + "cilium/cilium-cli!!?as=cilium&type=script" \ + "cli/cli!!?as=gh&type=script" \ + "cloudflare/cloudflared!!?as=cloudflared&type=script" \ + "derailed/k9s!!?as=k9s&type=script" \ + "fluxcd/flux2!!?as=flux&type=script" \ + "go-task/task!!?as=task&type=script" \ + "helmfile/helmfile!!?as=helmfile&type=script" \ + "kubecolor/kubecolor!!?as=kubecolor&type=script" \ + "kubernetes-sigs/krew!!?as=krew&type=script" \ + "kubernetes-sigs/kustomize!!?as=kustomize&type=script" \ + "stern/stern!!?as=stern&type=script" \ + "siderolabs/talos!!?as=talosctl&type=script" \ + "yannh/kubeconform!!?as=kubeconform&type=script" +do + echo "=== Installing ${app} ===" + curl -fsSL "https://i.jpillora.com/${app}" | bash +done + +# Create the fish configuration directory +mkdir -p /home/vscode/.config/fish/{completions,conf.d} + +# Setup autocompletions for fish +for tool in cilium flux helm helmfile k9s kubectl kustomize talhelper talosctl; do + $tool completion fish > /home/vscode/.config/fish/completions/$tool.fish +done +gh completion --shell fish > /home/vscode/.config/fish/completions/gh.fish +stern --completion fish > /home/vscode/.config/fish/completions/stern.fish +yq shell-completion fish > /home/vscode/.config/fish/completions/yq.fish + +# Add hooks into fish +tee /home/vscode/.config/fish/conf.d/hooks.fish > /dev/null < /dev/null < /dev/null < /dev/null <\\S+) depName=(?\\S+)( repository=(?\\S+))?\\n.+: (&\\S+\\s)?(?\\S+)" + ], + "datasourceTemplate": "{{#if datasource}}{{{datasource}}}{{else}}github-releases{{/if}}" + } + ] +} diff --git a/.github/tests/config-talos.yaml b/.github/tests/config-talos.yaml new file mode 100644 index 00000000..3df4ce6d --- /dev/null +++ b/.github/tests/config-talos.yaml @@ -0,0 +1,44 @@ +--- +skip_tests: true + +boostrap_talos: + schematic_id: "376567988ad370138ad8b2698212367b8edcb69b5fd68c80be1f2ec7d603b4ba" +bootstrap_node_network: 10.10.10.0/24 +bootstrap_node_default_gateway: 10.10.10.1 +bootstrap_node_inventory: + - name: k8s-controller-0 + address: 10.10.10.100 + controller: true + disk: fake + mac_addr: fake + - name: k8s-worker-0 + address: 10.10.10.101 + controller: false + disk: fake + mac_addr: fake +bootstrap_dns_servers: ["1.1.1.1", "1.0.0.1"] +bootstrap_dntp_servers: ["time.cloudflare.com"] +bootstrap_pod_network: 10.69.0.0/16 +bootstrap_service_network: 10.96.0.0/16 +bootstrap_controller_vip: 10.10.10.254 +bootstrap_tls_sans: ["fake"] +bootstrap_sops_age_pubkey: $BOOTSTRAP_AGE_PUBLIC_KEY +bootstrap_bgp: + enabled: false +bootstrap_github_address: https://github.com/onedr0p/cluster-template +bootstrap_github_branch: main +bootstrap_github_webhook_token: fake +bootstrap_cloudflare: + enabled: true + domain: fake + token: take + acme: + email: fake@example.com + production: false + tunnel: + account_id: fake + id: fake + secret: fake + ingress_vip: 10.10.10.252 + ingress_vip: 10.10.10.251 + gateway_vip: 10.10.10.253 diff --git a/.github/workflows/devcontainer.yaml b/.github/workflows/devcontainer.yaml new file mode 100644 index 00000000..00d37c31 --- /dev/null +++ b/.github/workflows/devcontainer.yaml @@ -0,0 +1,57 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "devcontainer" + +on: + workflow_dispatch: + push: + branches: ["main"] + paths: [".devcontainer/ci/**"] + pull_request: + branches: ["main"] + paths: [".devcontainer/ci/**"] + schedule: + - cron: "0 0 * * 1" + +concurrency: + group: ${{ github.workflow }}-${{ github.event.number || github.ref }} + cancel-in-progress: true + +jobs: + devcontainer: + name: publish + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + platforms: linux/amd64,linux/arm64 + + - if: ${{ github.event_name != 'pull_request' }} + name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push + uses: devcontainers/ci@v0.3 + env: + BUILDX_NO_DEFAULT_ATTESTATIONS: true + with: + imageName: ghcr.io/${{ github.repository }}/devcontainer + # cacheFrom: ghcr.io/${{ github.repository }}/devcontainer + imageTag: base,latest + platform: linux/amd64,linux/arm64 + configFile: .devcontainer/ci/devcontainer.json + push: ${{ github.event_name == 'pull_request' && 'never' || 'always' }} diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml new file mode 100644 index 00000000..441b1e18 --- /dev/null +++ b/.github/workflows/e2e.yaml @@ -0,0 +1,107 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "e2e" + +on: + workflow_dispatch: + pull_request: + branches: ["main"] + +concurrency: + group: ${{ github.workflow }}-${{ github.event.number || github.ref }} + cancel-in-progress: true + +jobs: + configure: + name: configure + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + config-files: + - k3s-ipv4 + - k3s-ipv6 + - talos + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Homebrew + id: setup-homebrew + uses: Homebrew/actions/setup-homebrew@master + + - name: Setup Python + uses: actions/setup-python@v5 + id: setup-python + with: + python-version: "3.11" # minimum supported version + + - name: Cache homebrew packages + if: ${{ github.event_name == 'pull_request' }} + uses: actions/cache@v4 + id: cache-homebrew-packages + with: + key: homebrew-${{ runner.os }}-${{ steps.setup-homebrew.outputs.gems-hash }}-${{ hashFiles('.taskfiles/Workstation/Brewfile') }} + path: /home/linuxbrew/.linuxbrew + + - name: Cache venv + if: ${{ github.event_name == 'pull_request' }} + uses: actions/cache@v4 + with: + key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('requirements.txt', 'requirements.yaml') }} + path: .venv + + - name: Setup Workflow Tools + if: ${{ github.event_name == 'pull_request' && steps.cache-homebrew-packages.outputs.cache-hit != 'true' }} + shell: bash + run: brew install go-task + + - name: Run Workstation Brew tasks + if: ${{ github.event_name == 'pull_request' && steps.cache-homebrew-packages.outputs.cache-hit != 'true' }} + shell: bash + run: task workstation:brew + + - name: Run Workstation venv tasks + shell: bash + run: task workstation:venv + + - name: Run Workstation direnv tasks + shell: bash + run: task workstation:direnv + + - name: Run Sops Age key task + shell: bash + run: task sops:age-keygen + + - name: Run init tasks + shell: bash + run: | + task init + cp ./.github/tests/config-${{ matrix.config-files }}.yaml ./config.yaml + export BOOTSTRAP_AGE_PUBLIC_KEY=$(sed -n 's/# public key: //gp' age.key) + envsubst < ./config.yaml | sponge ./config.yaml + + - name: Run configure task + shell: bash + run: task configure --yes + + - name: Run Talos tasks + if: ${{ startsWith(matrix.config-files, 'talos') }} + shell: bash + run: | + task talos:bootstrap-gensecret + task talos:bootstrap-genconfig + + - name: Run Ansible tasks + if: ${{ startsWith(matrix.config-files, 'k3s') }} + shell: bash + run: | + task ansible:deps force=false + task ansible:lint + task ansible:list + + - name: Run repo clean and reset tasks + shell: bash + run: | + task repository:clean + task repository:reset --yes diff --git a/.github/workflows/flux-diff.yaml b/.github/workflows/flux-diff.yaml new file mode 100644 index 00000000..c771e167 --- /dev/null +++ b/.github/workflows/flux-diff.yaml @@ -0,0 +1,90 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Flux Diff" + +on: + pull_request: + branches: ["main"] + paths: ["kubernetes/**"] + +concurrency: + group: ${{ github.workflow }}-${{ github.event.number || github.ref }} + cancel-in-progress: true + +jobs: + flux-diff: + name: Flux Diff + runs-on: ubuntu-latest + permissions: + pull-requests: write + strategy: + matrix: + paths: ["kubernetes"] + resources: ["helmrelease", "kustomization"] + max-parallel: 4 + fail-fast: false + steps: + - name: Generate Token + uses: actions/create-github-app-token@v1 + id: app-token + with: + app-id: "${{ secrets.BOT_APP_ID }}" + private-key: "${{ secrets.BOT_APP_PRIVATE_KEY }}" + + - name: Checkout + uses: actions/checkout@v4 + with: + token: "${{ steps.app-token.outputs.token }}" + path: pull + + - name: Checkout Default Branch + uses: actions/checkout@v4 + with: + token: "${{ steps.app-token.outputs.token }}" + ref: "${{ github.event.repository.default_branch }}" + path: default + + - name: Diff Resources + uses: docker://ghcr.io/allenporter/flux-local:main + with: + args: >- + diff ${{ matrix.resources }} + --unified 6 + --path /github/workspace/pull/${{ matrix.paths }} + --path-orig /github/workspace/default/${{ matrix.paths }} + --strip-attrs "helm.sh/chart,checksum/config,app.kubernetes.io/version,chart" + --limit-bytes 10000 + --all-namespaces + --sources "home-kubernetes" + --output-file diff.patch + + - name: Generate Diff + id: diff + run: | + cat diff.patch + echo "diff<> $GITHUB_OUTPUT + cat diff.patch >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + - if: ${{ steps.diff.outputs.diff != '' }} + name: Add comment + uses: mshick/add-pr-comment@v2 + with: + repo-token: "${{ steps.app-token.outputs.token }}" + message-id: "${{ github.event.pull_request.number }}/${{ matrix.paths }}/${{ matrix.resources }}" + message-failure: Diff was not successful + message: | + ```diff + ${{ steps.diff.outputs.diff }} + ``` + + # Summarize matrix https://github.community/t/status-check-for-a-matrix-jobs/127354/7 + flux-diff-success: + if: ${{ always() }} + needs: ["flux-diff"] + name: Flux Diff Successful + runs-on: ubuntu-latest + steps: + - if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }} + name: Check matrix status + run: exit 1 diff --git a/.github/workflows/kubeconform.yaml b/.github/workflows/kubeconform.yaml new file mode 100644 index 00000000..58a63cc1 --- /dev/null +++ b/.github/workflows/kubeconform.yaml @@ -0,0 +1,29 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Kubeconform" + +on: + pull_request: + branches: ["main"] + paths: ["kubernetes/**"] + +env: + KUBERNETES_DIR: ./kubernetes + +jobs: + kubeconform: + name: Kubeconform + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Homebrew + uses: Homebrew/actions/setup-homebrew@master + + - name: Setup Workflow Tools + run: brew install fluxcd/tap/flux kubeconform kustomize + + - name: Run kubeconform + shell: bash + run: bash ./scripts/kubeconform.sh ${{ env.KUBERNETES_DIR }} diff --git a/.github/workflows/label-sync.yaml b/.github/workflows/label-sync.yaml new file mode 100644 index 00000000..90804e0a --- /dev/null +++ b/.github/workflows/label-sync.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Label Sync" + +on: + workflow_dispatch: + push: + branches: ["main"] + paths: [".github/labels.yaml"] + +jobs: + label-sync: + name: Label Sync + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Sync Labels + uses: EndBug/label-sync@v2 + with: + config-file: .github/labels.yaml + delete-other-labels: true diff --git a/.github/workflows/labeler.yaml b/.github/workflows/labeler.yaml new file mode 100644 index 00000000..d658c1d9 --- /dev/null +++ b/.github/workflows/labeler.yaml @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Labeler" + +on: + workflow_dispatch: + pull_request_target: + branches: ["main"] + +jobs: + labeler: + name: Labeler + runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: write + steps: + - name: Labeler + uses: actions/labeler@v5 + with: + configuration-path: .github/labeler.yaml diff --git a/.github/workflows/lychee.yaml b/.github/workflows/lychee.yaml new file mode 100644 index 00000000..10a60eaa --- /dev/null +++ b/.github/workflows/lychee.yaml @@ -0,0 +1,66 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Lychee" + +on: + workflow_dispatch: + push: + branches: ["main"] + paths: [".github/workflows/lychee.yaml"] + schedule: + - cron: "0 0 * * *" + +env: + LYCHEE_OUTPUT: lychee/out.md + WORKFLOW_ISSUE_TITLE: "Link Checker Dashboard 🔗" + +jobs: + lychee: + name: Lychee + runs-on: ubuntu-latest + steps: + - name: Generate Token + uses: actions/create-github-app-token@v1 + id: app-token + with: + app-id: "${{ secrets.BOT_APP_ID }}" + private-key: "${{ secrets.BOT_APP_PRIVATE_KEY }}" + + - name: Checkout + uses: actions/checkout@v4 + with: + token: "${{ steps.app-token.outputs.token }}" + + - name: Scan for broken links + uses: lycheeverse/lychee-action@v1 + id: lychee + with: + token: "${{ steps.app-token.outputs.token }}" + args: --verbose --no-progress --exclude-mail './**/*.md' + format: markdown + output: "${{ env.LYCHEE_OUTPUT }}" + debug: true + + - name: Find Link Checker Issue + id: find-issue + shell: bash + env: + GH_TOKEN: "${{ steps.app-token.outputs.token }}" + run: | + issue_number=$( \ + gh issue list \ + --search "in:title ${{ env.WORKFLOW_ISSUE_TITLE }}" \ + --state open \ + --json number \ + | jq --raw-output '.[0].number' \ + ) + echo "issue-number=${issue_number}" >> $GITHUB_OUTPUT + echo "${issue_number}" + + - name: Create or Update Issue + uses: peter-evans/create-issue-from-file@v5 + with: + token: "${{ steps.app-token.outputs.token }}" + title: "${{ env.WORKFLOW_ISSUE_TITLE }}" + issue-number: "${{ steps.find-issue.outputs.issue-number || '' }}" + content-filepath: "${{ env.LYCHEE_OUTPUT }}" diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 00000000..fb943f8f --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,43 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Release" + +on: + workflow_dispatch: + schedule: + - cron: "0 0 1 * *" + +jobs: + release: + name: Release + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Create Release + shell: bash + env: + GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + run: | + # Retrieve previous release tag + previous_tag="$(gh release list --limit 1 | awk '{ print $1 }')" + previous_major="${previous_tag%%\.*}" + previous_minor="${previous_tag#*.}" + previous_minor="${previous_minor%.*}" + previous_patch="${previous_tag##*.}" + # Determine next release tag + next_major_minor="$(date +'%Y').$(date +'%-m')" + if [[ "${previous_major}.${previous_minor}" == "${next_major_minor}" ]]; then + echo "Month release already exists for year, incrementing patch number by 1" + next_patch="$((previous_patch + 1))" + else + echo "Month release does not exist for year, setting patch number to 0" + next_patch="0" + fi + # Create release + release_tag="${next_major_minor}.${next_patch}" + gh release create "${release_tag}" \ + --repo="${GITHUB_REPOSITORY}" \ + --title="${release_tag}" \ + --generate-notes diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..6ba7ebdb --- /dev/null +++ b/.gitignore @@ -0,0 +1,27 @@ +# Trash +.DS_Store +Thumbs.db +# k8s +kubeconfig +.decrypted~*.yaml +.config.env +*.agekey +*.pub +*.key +# Private +.private +.bin +# Ansible +.venv* +# Taskfile +.task +# Brew +Brewfile.lock.json +# intellij +.idea +# wiki +wiki +# Bootstrap +/config.yaml +# Direnv +.direnv diff --git a/.lycheeignore b/.lycheeignore new file mode 100644 index 00000000..8cbc880a --- /dev/null +++ b/.lycheeignore @@ -0,0 +1,2 @@ +https://dash.cloudflare.com/profile/api-tokens +https://www.mend.io/free-developer-tools/renovate/ diff --git a/.sops.yaml b/.sops.yaml new file mode 100644 index 00000000..ef98b50f --- /dev/null +++ b/.sops.yaml @@ -0,0 +1,12 @@ +--- +creation_rules: + - # IMPORTANT: This rule MUST be above the others + path_regex: talos/.*\.sops\.ya?ml + key_groups: + - age: + - "age1k5xl02aujw4rsgghnnd0sdymmwd095w5nqgjvf76warwrdc0uqpqsm2x8m" + - path_regex: kubernetes/.*\.sops\.ya?ml + encrypted_regex: "^(data|stringData)$" + key_groups: + - age: + - "age1k5xl02aujw4rsgghnnd0sdymmwd095w5nqgjvf76warwrdc0uqpqsm2x8m" diff --git a/.taskfiles/ExternalSecrets/Taskfile.yaml b/.taskfiles/ExternalSecrets/Taskfile.yaml new file mode 100644 index 00000000..c5207685 --- /dev/null +++ b/.taskfiles/ExternalSecrets/Taskfile.yaml @@ -0,0 +1,35 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +tasks: + + sync: + desc: Sync an ExternalSecret for a cluster + summary: | + Args: + ns: Namespace the externalsecret is in (default: default) + secret: Secret to sync (required) + cmd: kubectl -n {{.ns}} annotate externalsecret {{.secret}} force-sync=$(date +%s) --overwrite + env: + KUBECONFIG: "{{.KUBERNETES_DIR}}/kubeconfig" + requires: + vars: ["secret"] + vars: + ns: '{{.ns | default "default"}}' + preconditions: + - kubectl -n {{.ns}} get externalsecret {{.secret}} + + sync-all: + desc: Sync all ExternalSecrets for a cluster + cmds: + - for: { var: secrets, split: '' } + task: sync + vars: + ns: '{{$a := split "|" .ITEM}}{{$a._0}}' + secret: '{{$a := split "|" .ITEM}}{{$a._1}}' + env: + KUBECONFIG: "{{.KUBERNETES_DIR}}/kubeconfig" + vars: + secrets: + sh: kubectl get externalsecret --all-namespaces --no-headers -A | awk '{print $1 "|" $2}' diff --git a/.taskfiles/Flux/Taskfile.yaml b/.taskfiles/Flux/Taskfile.yaml new file mode 100644 index 00000000..2fe84d3e --- /dev/null +++ b/.taskfiles/Flux/Taskfile.yaml @@ -0,0 +1,72 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +vars: + # renovate: datasource=github-releases depName=prometheus-operator/prometheus-operator + PROMETHEUS_OPERATOR_VERSION: v0.74.0 + CLUSTER_SECRET_SOPS_FILE: "{{.KUBERNETES_DIR}}/flux/vars/cluster-secrets.sops.yaml" + CLUSTER_SETTINGS_FILE: "{{.KUBERNETES_DIR}}/flux/vars/cluster-settings.yaml" + GITHUB_DEPLOY_KEY_FILE: "{{.KUBERNETES_DIR}}/bootstrap/flux/github-deploy-key.sops.yaml" + +tasks: + + bootstrap: + desc: Bootstrap Flux into a Kubernetes cluster + cmds: + # Install essential Prometheus Operator CRDs + - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/{{.PROMETHEUS_OPERATOR_VERSION}}/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml + - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/{{.PROMETHEUS_OPERATOR_VERSION}}/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml + - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/{{.PROMETHEUS_OPERATOR_VERSION}}/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml + - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/{{.PROMETHEUS_OPERATOR_VERSION}}/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml + # Install Flux + - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --kustomize {{.KUBERNETES_DIR}}/bootstrap/flux + # Set up secrets + - cat {{.AGE_FILE}} | kubectl -n flux-system create secret generic sops-age --from-file=age.agekey=/dev/stdin + - sops --decrypt {{.CLUSTER_SECRET_SOPS_FILE}} | kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename - + - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename {{.CLUSTER_SETTINGS_FILE}} + # Install Flux Kustomization resources + - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --kustomize {{.KUBERNETES_DIR}}/flux/config + preconditions: + - { msg: "Missing kubeconfig", sh: "test -f {{.KUBECONFIG_FILE}}" } + - { msg: "Missing Sops Age key file", sh: "test -f {{.AGE_FILE}}" } + + apply: + desc: Apply a Flux Kustomization resource for a cluster + summary: | + Args: + path: Path under apps containing the Flux Kustomization resource (ks.yaml) (required) + ns: Namespace the Flux Kustomization exists in (default: flux-system) + cmd: | + flux --kubeconfig {{.KUBECONFIG_FILE}} build ks $(basename {{.path}}) \ + --namespace {{.ns}} \ + --kustomization-file {{.KUBERNETES_DIR}}/apps/{{.path}}/ks.yaml \ + --path {{.KUBERNETES_DIR}}/apps/{{.path}} \ + {{- if contains "not found" .ks }}--dry-run \{{ end }} + | \ + kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side \ + --field-manager=kustomize-controller -f - + requires: + vars: ["path"] + vars: + ns: '{{.ns | default "flux-system"}}' + ks: + sh: flux --kubeconfig {{.KUBECONFIG_FILE}} --namespace {{.ns}} get kustomizations $(basename {{.path}}) 2>&1 + preconditions: + - { msg: "Missing kubeconfig", sh: "test -f {{.KUBECONFIG_FILE}}" } + - { msg: "Missing Flux Kustomization for app {{.path}}", sh: "test -f {{.KUBERNETES_DIR}}/apps/{{.path}}/ks.yaml" } + + reconcile: + desc: Force update Flux to pull in changes from your Git repository + cmd: flux --kubeconfig {{.KUBECONFIG_FILE}} reconcile --namespace flux-system kustomization cluster --with-source + preconditions: + - { msg: "Missing kubeconfig", sh: "test -f {{.KUBECONFIG_FILE}}" } + + github-deploy-key: + cmds: + - kubectl create namespace flux-system --dry-run=client -o yaml | kubectl --kubeconfig {{.KUBECONFIG_FILE}} apply --filename - + - sops --decrypt {{.GITHUB_DEPLOY_KEY_FILE}} | kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename - + preconditions: + - { msg: "Missing kubeconfig", sh: "test -f {{.KUBECONFIG_FILE}}" } + - { msg: "Missing Sops Age key file", sh: "test -f {{.AGE_FILE}}" } + - { msg: "Missing Github deploy key file", sh: "test -f {{.GITHUB_DEPLOY_KEY_FILE}}" } diff --git a/.taskfiles/Kubernetes/Taskfile.yaml b/.taskfiles/Kubernetes/Taskfile.yaml new file mode 100644 index 00000000..e4f52e0c --- /dev/null +++ b/.taskfiles/Kubernetes/Taskfile.yaml @@ -0,0 +1,35 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +vars: + KUBECONFORM_SCRIPT: "{{.SCRIPTS_DIR}}/kubeconform.sh" + +tasks: + + resources: + desc: Gather common resources in your cluster, useful when asking for support + cmds: + - for: { var: resource } + cmd: kubectl get {{.ITEM}} {{.CLI_ARGS | default "-A"}} + vars: + resource: >- + nodes + gitrepositories + kustomizations + helmrepositories + helmreleases + certificates + certificaterequests + ingresses + pods + + kubeconform: + desc: Validate Kubernetes manifests with kubeconform + cmd: bash {{.KUBECONFORM_SCRIPT}} {{.KUBERNETES_DIR}} + preconditions: + - { msg: "Missing kubeconform script", sh: "test -f {{.KUBECONFORM_SCRIPT}}" } + + .reset: + internal: true + cmd: rm -rf {{.KUBERNETES_DIR}} diff --git a/.taskfiles/Repository/Taskfile.yaml b/.taskfiles/Repository/Taskfile.yaml new file mode 100644 index 00000000..9e6bae36 --- /dev/null +++ b/.taskfiles/Repository/Taskfile.yaml @@ -0,0 +1,43 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +tasks: + + clean: + desc: Clean files and directories no longer needed after cluster bootstrap + cmds: + - mkdir -p {{.PRIVATE_DIR}} + # Clean up CI + - rm -rf {{.ROOT_DIR}}/.github/tests + - rm -rf {{.ROOT_DIR}}/.github/workflows/e2e.yaml + # Clean up devcontainer + - rm -rf {{.ROOT_DIR}}/.devcontainer/ci + - rm -rf {{.ROOT_DIR}}/.github/workflows/devcontainer.yaml + # Move bootstrap directory to gitignored directory + - mv {{.BOOTSTRAP_DIR}} {{.PRIVATE_DIR}}/bootstrap-{{now | date "150405"}} + - mv {{.MAKEJINJA_CONFIG_FILE}} {{.PRIVATE_DIR}}/makejinja-{{now | date "150405"}}.toml + # Update renovate.json5 + - sed -i {{if eq OS "darwin"}}''{{end}} 's/(..\.j2)\?//g' {{.ROOT_DIR}}/.github/renovate.json5 + preconditions: + - msg: Missing bootstrap directory + sh: test -d {{.BOOTSTRAP_DIR}} + - msg: Missing Renovate config file + sh: test -f {{.ROOT_DIR}}/.github/renovate.json5 + + reset: + desc: Reset templated configuration files + prompt: Reset templated configuration files... continue? + cmds: + - task: :kubernetes:.reset + - task: :sops:.reset + - task: :talos:.reset + + force-reset: + desc: Reset repo back to HEAD + prompt: Reset repo back to HEAD... continue? + cmds: + - task: reset + - git reset --hard HEAD + - git clean -f -d + - git pull origin main diff --git a/.taskfiles/Sops/Taskfile.yaml b/.taskfiles/Sops/Taskfile.yaml new file mode 100644 index 00000000..7880a005 --- /dev/null +++ b/.taskfiles/Sops/Taskfile.yaml @@ -0,0 +1,36 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +tasks: + + age-keygen: + desc: Initialize Age Key for Sops + cmd: age-keygen --output {{.AGE_FILE}} + status: ["test -f {{.AGE_FILE}}"] + + encrypt: + desc: Encrypt all Kubernetes SOPS secrets + cmds: + - for: { var: file } + task: .encrypt-file + vars: + file: "{{.ITEM}}" + vars: + file: + sh: find "{{.KUBERNETES_DIR}}" -type f -name "*.sops.*" -exec grep -L "ENC\[AES256_GCM" {} \; + + .encrypt-file: + internal: true + cmd: sops --encrypt --in-place {{.file}} + requires: + vars: ["file"] + preconditions: + - msg: Missing Sops config file + sh: test -f {{.SOPS_CONFIG_FILE}} + - msg: Missing Sops Age key file + sh: test -f {{.AGE_FILE}} + + .reset: + internal: true + cmd: rm -rf {{.SOPS_CONFIG_FILE}} diff --git a/.taskfiles/Talos/Taskfile.yaml b/.taskfiles/Talos/Taskfile.yaml new file mode 100644 index 00000000..d99dbc0a --- /dev/null +++ b/.taskfiles/Talos/Taskfile.yaml @@ -0,0 +1,84 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +vars: + TALHELPER_CLUSTER_DIR: "{{.KUBERNETES_DIR}}/bootstrap/talos/clusterconfig" + TALHELPER_SECRET_FILE: "{{.KUBERNETES_DIR}}/bootstrap/talos/talsecret.sops.yaml" + TALHELPER_CONFIG_FILE: "{{.KUBERNETES_DIR}}/bootstrap/talos/talconfig.yaml" + HELMFILE_FILE: "{{.KUBERNETES_DIR}}/bootstrap/helmfile.yaml" + +env: + TALOSCONFIG: "{{.TALHELPER_CLUSTER_DIR}}/talosconfig" + +tasks: + + bootstrap: + desc: Bootstrap the Talos cluster + cmds: + - | + if [ ! -f "{{.TALHELPER_SECRET_FILE}}" ]; then + talhelper gensecret > {{.TALHELPER_SECRET_FILE}} + sops --encrypt --in-place {{.TALHELPER_SECRET_FILE}} + fi + - talhelper genconfig --config-file {{.TALHELPER_CONFIG_FILE}} --secret-file {{.TALHELPER_SECRET_FILE}} --out-dir {{.TALHELPER_CLUSTER_DIR}} + - talhelper gencommand apply --config-file {{.TALHELPER_CONFIG_FILE}} --out-dir {{.TALHELPER_CLUSTER_DIR}} --extra-flags="--insecure" | bash + - until talhelper gencommand bootstrap --config-file {{.TALHELPER_CONFIG_FILE}} --out-dir {{.TALHELPER_CLUSTER_DIR}} | bash; do sleep 10; done + - task: fetch-kubeconfig + - task: install-helm-apps + - talosctl health --server=false + preconditions: + - msg: Missing talhelper config file + sh: test -f {{.TALHELPER_CONFIG_FILE}} + - msg: Missing Sops config file + sh: test -f {{.SOPS_CONFIG_FILE}} + - msg: Missing Sops Age key file + sh: test -f {{.AGE_FILE}} + + fetch-kubeconfig: + desc: Fetch kubeconfig + cmd: until talhelper gencommand kubeconfig --config-file {{.TALHELPER_CONFIG_FILE}} --out-dir {{.TALHELPER_CLUSTER_DIR}} --extra-flags="{{.ROOT_DIR}} --force" | bash; do sleep 10; done + preconditions: + - msg: Missing talhelper config file + sh: test -f {{.TALHELPER_CONFIG_FILE}} + + install-helm-apps: + desc: Bootstrap core apps needed for Talos + cmds: + - until kubectl --kubeconfig {{.KUBECONFIG_FILE}} wait --for=condition=Ready=False nodes --all --timeout=600s; do sleep 10; done + - helmfile --file {{.HELMFILE_FILE}} apply --skip-diff-on-install --suppress-diff + - until kubectl --kubeconfig {{.KUBECONFIG_FILE}} wait --for=condition=Ready nodes --all --timeout=600s; do sleep 10; done + env: + KUBECONFIG: "{{.KUBERNETES_DIR}}/kubeconfig" + preconditions: + - msg: Missing kubeconfig + sh: test -f {{.KUBECONFIG_FILE}} + - msg: Missing helmfile + sh: test -f {{.HELMFILE_FILE}} + + upgrade-talos: + desc: Upgrade talos on a node + cmd: talosctl --nodes {{.node}} upgrade --image {{.image}} --preserve=true --reboot-mode=default + requires: + vars: ["node", "image"] + preconditions: + - msg: Node not found + sh: talosctl --nodes {{.node}} get machineconfig + + upgrade-k8s: + desc: Upgrade k8s on a node + cmd: talosctl --nodes {{.node}} upgrade-k8s --to {{.to}} + requires: + vars: ["node", "to"] + preconditions: + - msg: Node not found + sh: talosctl --nodes {{.node}} get machineconfig + + nuke: + desc: Resets nodes back to maintenance mode + prompt: This will destroy your cluster and reset the nodes back to maintenance mode... continue? + cmd: talhelper gencommand reset --config-file {{.TALHELPER_CONFIG_FILE}} --out-dir {{.TALHELPER_CLUSTER_DIR}} --extra-flags="--reboot {{- if eq .CLI_FORCE false }} --system-labels-to-wipe STATE --system-labels-to-wipe EPHEMERAL{{ end }} --graceful=false --wait=false" | bash + + .reset: + internal: true + cmd: rm -rf {{.TALHELPER_CLUSTER_DIR}} {{.TALHELPER_SECRET_FILE}} {{.TALHELPER_CONFIG_FILE}} diff --git a/.taskfiles/VolSync/Taskfile.yaml b/.taskfiles/VolSync/Taskfile.yaml new file mode 100644 index 00000000..52031822 --- /dev/null +++ b/.taskfiles/VolSync/Taskfile.yaml @@ -0,0 +1,214 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +# This taskfile is used to manage certain VolSync tasks for a given application, limitations are described below. +# 1. Fluxtomization, HelmRelease, PVC, ReplicationSource all have the same name (e.g. plex) +# 2. ReplicationSource and ReplicationDestination are a Restic repository +# 3. Applications are deployed as either a Kubernetes Deployment or StatefulSet +# 4. Each application only has one PVC that is being replicated + +x-env: &env + app: "{{.app}}" + claim: "{{.claim}}" + controller: "{{.controller}}" + job: "{{.job}}" + ns: "{{.ns}}" + pgid: "{{.pgid}}" + previous: "{{.previous}}" + puid: "{{.puid}}" + +vars: + VOLSYNC_SCRIPTS_DIR: "{{.ROOT_DIR}}/.taskfiles/VolSync/scripts" + VOLSYNC_TEMPLATES_DIR: "{{.ROOT_DIR}}/.taskfiles/VolSync/templates" + +tasks: + + state-*: + desc: Suspend or Resume Volsync + summary: | + Args: + state: resume or suspend (required) + cmds: + - flux {{.state}} kustomization volsync + - flux -n {{.ns}} {{.state}} helmrelease volsync + - kubectl -n {{.ns}} scale deployment volsync --replicas {{if eq "suspend" .state}}0{{else}}1{{end}} + env: *env + vars: + ns: '{{.ns | default "volsync-system"}}' + state: '{{index .MATCH 0}}' + + list: + desc: List snapshots for an application + summary: | + Args: + ns: Namespace the PVC is in (default: default) + app: Application to list snapshots for (required) + cmds: + - envsubst < <(cat {{.VOLSYNC_TEMPLATES_DIR}}/list.tmpl.yaml) | kubectl apply -f - + - bash {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh {{.job}} {{.ns}} + - kubectl -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=1m + - kubectl -n {{.ns}} logs job/{{.job}} --container main + - kubectl -n {{.ns}} delete job {{.job}} + env: *env + requires: + vars: ["app"] + vars: + ns: '{{.ns | default "default"}}' + job: volsync-list-{{.app}} + preconditions: + - test -f {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh + - test -f {{.VOLSYNC_TEMPLATES_DIR}}/list.tmpl.yaml + silent: true + + unlock: + desc: Unlock a Restic repository for an application + summary: | + Args: + ns: Namespace the PVC is in (default: default) + app: Application to unlock (required) + cmds: + - envsubst < <(cat {{.VOLSYNC_TEMPLATES_DIR}}/unlock.tmpl.yaml) | kubectl apply -f - + - bash {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh {{.job}} {{.ns}} + - kubectl -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=1m + - kubectl -n {{.ns}} logs job/{{.job}} --container minio + - kubectl -n {{.ns}} delete job {{.job}} + env: *env + requires: + vars: ["app"] + vars: + ns: '{{.ns | default "default"}}' + job: volsync-unlock-{{.app}} + preconditions: + - test -f {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh + - test -f {{.VOLSYNC_TEMPLATES_DIR}}/unlock.tmpl.yaml + silent: true + + # To run backup jobs in parallel for all replicationsources: + # - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:snapshot app=$0 ns=$1' + snapshot: + desc: Snapshot a PVC for an application + summary: | + Args: + ns: Namespace the PVC is in (default: default) + app: Application to snapshot (required) + cmds: + - kubectl -n {{.ns}} patch replicationsources {{.app}} --type merge -p '{"spec":{"trigger":{"manual":"{{.now}}"}}}' + - bash {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh {{.job}} {{.ns}} + - kubectl -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=120m + env: *env + requires: + vars: ["app"] + vars: + now: '{{now | date "150405"}}' + ns: '{{.ns | default "default"}}' + job: volsync-src-{{.app}} + controller: + sh: true && {{.VOLSYNC_SCRIPTS_DIR}}/which-controller.sh {{.app}} {{.ns}} + preconditions: + - test -f {{.VOLSYNC_SCRIPTS_DIR}}/which-controller.sh + - test -f {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh + - kubectl -n {{.ns}} get replicationsources {{.app}} + + # To run restore jobs in parallel for all replicationdestinations: + # - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:restore app=$0 ns=$1' + restore: + desc: Restore a PVC for an application + summary: | + Args: + ns: Namespace the PVC is in (default: default) + app: Application to restore (required) + previous: Previous number of snapshots to restore (default: 2) + cmds: + - { task: .suspend, vars: *env } + - { task: .wipe, vars: *env } + - { task: .restore, vars: *env } + - { task: .resume, vars: *env } + env: *env + requires: + vars: ["app"] + vars: + ns: '{{.ns | default "default"}}' + previous: '{{.previous | default 2}}' + controller: + sh: "{{.VOLSYNC_SCRIPTS_DIR}}/which-controller.sh {{.app}} {{.ns}}" + claim: + sh: kubectl -n {{.ns}} get replicationsources/{{.app}} -o jsonpath="{.spec.sourcePVC}" + puid: + sh: kubectl -n {{.ns}} get replicationsources/{{.app}} -o jsonpath="{.spec.restic.moverSecurityContext.runAsUser}" + pgid: + sh: kubectl -n {{.ns}} get replicationsources/{{.app}} -o jsonpath="{.spec.restic.moverSecurityContext.runAsGroup}" + preconditions: + - test -f {{.VOLSYNC_SCRIPTS_DIR}}/which-controller.sh + - test -f {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh + - test -f {{.VOLSYNC_TEMPLATES_DIR}}/replicationdestination.tmpl.yaml + - test -f {{.VOLSYNC_TEMPLATES_DIR}}/wipe.tmpl.yaml + + cleanup: + desc: Delete volume populator PVCs in all namespaces + summary: | + Args: + cmds: + - for: { var: dest } + cmd: | + {{- $items := (split "/" .ITEM) }} + kubectl delete pvc -n {{ $items._0 }} {{ $items._1 }} + - for: { var: cache } + cmd: | + {{- $items := (split "/" .ITEM) }} + kubectl delete pvc -n {{ $items._0 }} {{ $items._1 }} + - for: { var: snaps } + cmd: | + {{- $items := (split "/" .ITEM) }} + kubectl delete volumesnapshot -n {{ $items._0 }} {{ $items._1 }} + env: *env + vars: + dest: + sh: kubectl get pvc --all-namespaces --no-headers | grep "dst-dest" | awk '{print $1 "/" $2}' + cache: + sh: kubectl get pvc --all-namespaces --no-headers | grep "dst-cache" | awk '{print $1 "/" $2}' + snaps: + sh: kubectl get volumesnapshot --all-namespaces --no-headers | grep "dst-dest" | awk '{print $1 "/" $2}' + + # Suspend the Flux ks and hr + .suspend: + internal: true + cmds: + - flux -n flux-system suspend kustomization {{.app}} + - flux -n {{.ns}} suspend helmrelease {{.app}} + - kubectl -n {{.ns}} scale {{.controller}} --replicas 0 + - kubectl -n {{.ns}} wait pod --for delete --selector="app.kubernetes.io/name={{.app}}" --timeout=2m + env: *env + + # Wipe the PVC of all data + .wipe: + internal: true + cmds: + - envsubst < <(cat {{.VOLSYNC_TEMPLATES_DIR}}/wipe.tmpl.yaml) | kubectl apply -f - + - bash {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh {{.job}} {{.ns}} + - kubectl -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=120m + - kubectl -n {{.ns}} logs job/{{.job}} --container main + - kubectl -n {{.ns}} delete job {{.job}} + env: *env + vars: + job: volsync-wipe-{{.app}} + + # Create VolSync replicationdestination CR to restore data + .restore: + internal: true + cmds: + - envsubst < <(cat {{.VOLSYNC_TEMPLATES_DIR}}/replicationdestination.tmpl.yaml) | kubectl apply -f - + - bash {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh {{.job}} {{.ns}} + - kubectl -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=120m + - kubectl -n {{.ns}} delete replicationdestination {{.job}} + env: *env + vars: + job: volsync-dst-{{.app}} + + # Resume Flux ks and hr + .resume: + internal: true + cmds: + - flux -n {{.ns}} resume helmrelease {{.app}} + - flux -n flux-system resume kustomization {{.app}} + env: *env diff --git a/.taskfiles/VolSync/scripts/wait-for-job.sh b/.taskfiles/VolSync/scripts/wait-for-job.sh new file mode 100644 index 00000000..8cb3fbe1 --- /dev/null +++ b/.taskfiles/VolSync/scripts/wait-for-job.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +JOB=$1 +NAMESPACE="${2:-default}" + +[[ -z "${JOB}" ]] && echo "Job name not specified" && exit 1 +while true; do + STATUS="$(kubectl -n "${NAMESPACE}" get pod -l job-name="${JOB}" -o jsonpath='{.items[*].status.phase}')" + if [ "${STATUS}" == "Pending" ]; then + break + fi + sleep 1 +done diff --git a/.taskfiles/VolSync/templates/list.tmpl.yaml b/.taskfiles/VolSync/templates/list.tmpl.yaml new file mode 100644 index 00000000..f538ab63 --- /dev/null +++ b/.taskfiles/VolSync/templates/list.tmpl.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: ${job} + namespace: ${ns} +spec: + ttlSecondsAfterFinished: 3600 + template: + spec: + automountServiceAccountToken: false + restartPolicy: OnFailure + containers: + - name: main + image: docker.io/restic/restic:0.16.4 + args: ["snapshots"] + envFrom: + - secretRef: + name: ${app}-volsync-secret + resources: {} diff --git a/.taskfiles/VolSync/templates/unlock.tmpl.yaml b/.taskfiles/VolSync/templates/unlock.tmpl.yaml new file mode 100644 index 00000000..fceac382 --- /dev/null +++ b/.taskfiles/VolSync/templates/unlock.tmpl.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: ${job} + namespace: ${ns} +spec: + ttlSecondsAfterFinished: 3600 + template: + spec: + automountServiceAccountToken: false + restartPolicy: OnFailure + containers: + - name: minio + image: docker.io/restic/restic:0.16.4 + args: ["unlock", "--remove-all"] + envFrom: + - secretRef: + name: ${app}-volsync-secret + resources: {} diff --git a/.taskfiles/VolSync/templates/wipe.tmpl.yaml b/.taskfiles/VolSync/templates/wipe.tmpl.yaml new file mode 100644 index 00000000..9d6852e3 --- /dev/null +++ b/.taskfiles/VolSync/templates/wipe.tmpl.yaml @@ -0,0 +1,26 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: ${job} + namespace: ${ns} +spec: + ttlSecondsAfterFinished: 3600 + template: + spec: + automountServiceAccountToken: false + restartPolicy: OnFailure + containers: + - name: main + image: ghcr.io/onedr0p/alpine:rolling + command: ["/bin/sh", "-c", "cd /config; find . -delete"] + volumeMounts: + - name: config + mountPath: /config + securityContext: + privileged: true + resources: {} + volumes: + - name: config + persistentVolumeClaim: + claimName: ${claim} diff --git a/.taskfiles/Workstation/Archfile b/.taskfiles/Workstation/Archfile new file mode 100644 index 00000000..b1ad3160 --- /dev/null +++ b/.taskfiles/Workstation/Archfile @@ -0,0 +1,17 @@ +age +cloudflared-bin +direnv +flux-bin +go-task +go-yq +helm +helmfile +jq +kubeconform +kubectl-bin +kustomize +moreutils +sops +stern-bin +talhelper-bin +talosctl diff --git a/.taskfiles/Workstation/Brewfile b/.taskfiles/Workstation/Brewfile new file mode 100644 index 00000000..0d31dc67 --- /dev/null +++ b/.taskfiles/Workstation/Brewfile @@ -0,0 +1,20 @@ +tap "fluxcd/tap" +tap "go-task/tap" +tap "siderolabs/talos" +brew "age" +brew "cloudflared" +brew "direnv" +brew "fluxcd/tap/flux" +brew "go-task/tap/go-task" +brew "helm" +brew "helmfile" +brew "jq" +brew "kubeconform" +brew "kubernetes-cli" +brew "kustomize" +brew "moreutils" +brew "sops" +brew "stern" +brew "talhelper" +brew "talosctl" +brew "yq" diff --git a/.taskfiles/Workstation/Taskfile.yaml b/.taskfiles/Workstation/Taskfile.yaml new file mode 100644 index 00000000..09f309f6 --- /dev/null +++ b/.taskfiles/Workstation/Taskfile.yaml @@ -0,0 +1,71 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +vars: + ARCHFILE: "{{.ROOT_DIR}}/.taskfiles/Workstation/Archfile" + BREWFILE: "{{.ROOT_DIR}}/.taskfiles/Workstation/Brewfile" + GENERIC_BIN_DIR: "{{.ROOT_DIR}}/.bin" + +tasks: + + direnv: + desc: Run direnv hooks + cmd: direnv allow . + status: + - "[[ $(direnv status --json | jq '.state.foundRC.allowed') == 0 ]]" + - "[[ $(direnv status --json | jq '.state.loadedRC.allowed') == 0 ]]" + + venv: + desc: Set up virtual environment + cmds: + - "{{.PYTHON_BIN}} -m venv {{.VIRTUAL_ENV}}" + - '{{.VIRTUAL_ENV}}/bin/python3 -m pip install --upgrade pip setuptools wheel' + - '{{.VIRTUAL_ENV}}/bin/python3 -m pip install --upgrade --requirement "{{.PIP_REQUIREMENTS_FILE}}"' + sources: + - "{{.PIP_REQUIREMENTS_FILE}}" + generates: + - "{{.VIRTUAL_ENV}}/pyvenv.cfg" + preconditions: + - { msg: "Missing Pip requirements file", sh: "test -f {{.PIP_REQUIREMENTS_FILE}}" } + + brew: + desc: Install workstation dependencies with Brew + cmd: brew bundle --file {{.BREWFILE}} + preconditions: + - { msg: "Missing Homebrew", sh: "command -v brew" } + - { msg: "Missing Brewfile", sh: "test -f {{.BREWFILE}}" } + + arch: + desc: Install Arch workstation dependencies with Paru Or Yay + cmd: "{{.helper}} -Syu --needed --noconfirm --noprogressbar $(cat {{.ARCHFILE}} | xargs)" + vars: + helper: + sh: "command -v yay || command -v paru" + preconditions: + - { msg: "Missing Archfile", sh: "test -f {{.ARCHFILE}}" } + + generic-linux: + desc: Install CLI tools into the projects .bin directory using curl + dir: "{{.GENERIC_BIN_DIR}}" + platforms: ["linux/amd64", "linux/arm64"] + cmds: + - for: + - budimanjojo/talhelper?as=talhelper&type=script + - cloudflare/cloudflared?as=cloudflared&type=script + - FiloSottile/age?as=age&type=script + - fluxcd/flux2?as=flux&type=script + - getsops/sops?as=sops&type=script + - helmfile/helmfile?as=helmfile&type=script + - jqlang/jq?as=jq&type=script + - kubernetes-sigs/kustomize?as=kustomize&type=script + - siderolabs/talos?as=talosctl&type=script + - yannh/kubeconform?as=kubeconform&type=script + - mikefarah/yq?as=yq&type=script + cmd: curl -fsSL "https://i.jpillora.com/{{.ITEM}}" | bash + - cmd: curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + platforms: ["linux/amd64"] + - cmd: curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl" + platforms: ["linux/arm64"] + - cmd: chmod +x kubectl + - cmd: curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | USE_SUDO="false" HELM_INSTALL_DIR="." bash diff --git a/README.md b/README.md new file mode 100644 index 00000000..14766af4 --- /dev/null +++ b/README.md @@ -0,0 +1,108 @@ +
+ + + +# My Homelab automation Repository + +_... managed with FluxCD_ 🤖 + +
+ +--- + +## 🍼 Overview + +👋 Welcome to my Kubernetes Homelab Cluster repository! This project serves as a practical learning environment for +exploring Kubernetes and Infrastructure as Code (IaC) practices using tools like [FluxCD](https://fluxcd.io), +[Renovate](https://github.com/renovatebot/renovate), [go-task](https://github.com/go-task/task) and other + +## 📖 Table of contents + +- [🍼 Overview](#-overview) + - [📖 Table of contents](#-table-of-contents) + - [📚 Documentation](#-documentation) + - [🖥️ Technological Stack](#-technological-stack) + - [🔧 Hardware](#-hardware) + - [☁️ External Dependencies](#-external-dependencies) + - [🤖 Automation](#-automation) + - [🤝 Thanks](#-thanks) + +## 📚 Documentation + +## 🖥️ Technological Stack + +| | Name | Description | +|--------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------| +| | [Proxmox](https://www.proxmox.com) | Virtualization platform | +| | [Kubernetes](https://kubernetes.io/) | An open-source system for automating deployment, scaling, and management of containerized applications | +| | [Helm](https://helm.sh) | The Kubernetes package manager | +| | [FluxCD](https://fluxcd.io/) | GitOps tool for deploying applications to Kubernetes | +| | [Talos Linux](https://www.talos.dev/) | Talos Linux is Linux designed for Kubernetes | +| | [Cert Manager](https://cert-manager.io/) | X.509 certificate management for Kubernetes | +| | [Cilium](https://cilium.io/) | Internal Kubernetes container networking interface. | +| | [Ingress-nginx](https://github.com/kubernetes/ingress-nginx) | Kubernetes ingress controller using NGINX as a reverse proxy and load balancer. | +| | [Cloudflared](https://github.com/cloudflare/cloudflared) | Enables Cloudflare secure access to certain ingresses. | +| | [CoreDNS](https://coredns.io/) | Cluster DNS server | +| | [Spegel](https://github.com/spegel-org/spegel) | Stateless cluster local OCI registry mirror. | +| | [External-dns](https://github.com/kubernetes-sigs/external-dns/tree/master) | Automatically syncs ingress DNS records to a DNS provider. | +| | [External Secrets](https://github.com/external-secrets/external-secrets) | Managed Kubernetes secrets using [1Password Connect](https://github.com/1Password/connect). | +| | [Sops](https://github.com/getsops/sops) | Managed secrets for Kubernetes and which are commited to Git. | +| | [Longhorn](https://longhorn.io) | Cloud native distributed block storage for Kubernetes | +| | [VolSync](https://github.com/backube/volsync) | Backup and recovery of persistent volume claims. | +| | [Prometheus](https://prometheus.io) | Monitoring system and time series database | +| | [Thanos](https://thanos.io) | Highly available Prometheus setup with long-term storage capabilities | +| | [Grafana](https://grafana.com) | Data and logs visualization | +| | [Loki](https://grafana.com/oss/loki/) | Horizontally-scalable, highly-available, multi-tenant log aggregation system | +| | [Vector](https://github.com/vectordotdev/vector) | Collects, transform and routes logs to Loki | + + +## 🔧 Hardware + +
+ Rack photo + + rack +
+ +| Device | Count | Disk Size | RAM | OS | Purpose | +|----------------------------|-------|-----------|------|---------|-------------------------| +| Lenovo M910Q Tiny i5-6500T | 3 | 256G | 32GB | Talos | Kubernetes Master Nodes | +| Raspberry Pi 5 | 1 | | 8GB | RpiOS | DNS, SmartHome | +| Synology RS422+ | 1 | 4x16TB | 2GB | DSM | NAS | +| UPS 5UTRA91227 | 1 | | | | UPS | +| UniFi UDM Pro | 1 | | | UnifiOS | Router | +| UniFi USW PRO 24 Gen2 | 1 | | | | Switch | +| UniFi USW Lite 8 | 1 | | | | Switch | +| UniFi U6 In-Wall | 1 | | | | Access Point | +| UniFi U6 Mesh | 1 | | | | Access Point | + +## ☁️ External Dependencies + +This list does not include cloud services that I use for personal reasons and don't yet want to migrate to self-hosted, +such as Google (Gmail, Photos, Drive), streaming services, Apple, and some applications. Legacy cloud services listed +at the bottom are remnants from previous attempts to set up smart home observability dashboards and will be migrated +and shut down ~~never~~ as soon as I have time to transfer all the configurations. + +| Service | Description | Costs | +|-------------------------------------------|------------------------------------------------------------------------------|------------------| +| [1Password](https://1password.com) | Secrets managements | 76$/year | +| [Cloudflare](https://www.cloudflare.com/) | Domain and DNS | Free | +| [GitHub](https://github.com/) | Repository Hosting | Free | +| [Discord](https://discord.com) | Notifications | Free | +| [Let's Encrypt](https://discord.com) | Certificates | Free | +| [Notifiarr](https://notifiarr.com) | Notifications push | 5$ one time | +| [AWS Route 53](https://aws.amazon.com/) | Domain | 0,5$/month | +| [AWS EC2 ](https://aws.amazon.com/) | (Legacy) Grafana, InfluxDB hosting for smart home analytics. Need to migrate | ~15$/month | +| [InfluxDB Cloud](https://aws.amazon.com/) | (Legacy) Smart home data storage. Need to migrate | ~14$/month | +| [AWS Other ](https://aws.amazon.com/) | (Legacy) Email hosting. Need to migrate | ~10$/month | +| | | Total: 45$/month | + + + +## 🤝 Thanks + +This project was mostly ~~copypasted from~~ inspired by a [onedr0p/home-ops](https://github.com/onedr0p/home-ops) +and [onedr0p/cluster-template](https://github.com/onedr0p/cluster-template) repositories. +A big thanks to the members of the [Home Operations](https://discord.gg/home-operations) community +for their support and for sharing their repositories. +Additional thanks to the [Kubesearch](https://kubesearch.dev/) project for ability to search for different configurations. diff --git a/Taskfile.yaml b/Taskfile.yaml new file mode 100644 index 00000000..3f7dd6f5 --- /dev/null +++ b/Taskfile.yaml @@ -0,0 +1,80 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +vars: + # Directories + ANSIBLE_DIR: "{{.ROOT_DIR}}/ansible" + BOOTSTRAP_DIR: "{{.ROOT_DIR}}/bootstrap" + KUBERNETES_DIR: "{{.ROOT_DIR}}/kubernetes" + PRIVATE_DIR: "{{.ROOT_DIR}}/.private" + SCRIPTS_DIR: "{{.ROOT_DIR}}/scripts" + # Files + AGE_FILE: "{{.ROOT_DIR}}/age.key" + BOOTSTRAP_CONFIG_FILE: "{{.ROOT_DIR}}/config.yaml" + KUBECONFIG_FILE: "{{.ROOT_DIR}}/kubeconfig" + MAKEJINJA_CONFIG_FILE: "{{.ROOT_DIR}}/makejinja.toml" + PIP_REQUIREMENTS_FILE: "{{.ROOT_DIR}}/requirements.txt" + # Binaries + PYTHON_BIN: python3 + +env: + KUBECONFIG: "{{.KUBECONFIG_FILE}}" + PYTHONDONTWRITEBYTECODE: "1" + SOPS_AGE_KEY_FILE: "{{.AGE_FILE}}" + VIRTUAL_ENV: "{{.ROOT_DIR}}/.venv" + +includes: + kubernetes: + aliases: ["k8s"] + taskfile: .taskfiles/Kubernetes/Taskfile.yaml + flux: .taskfiles/Flux/Taskfile.yaml + repository: + aliases: ["repo"] + taskfile: .taskfiles/Repository/Taskfile.yaml + talos: .taskfiles/Talos/Taskfile.yaml + sops: .taskfiles/Sops/Taskfile.yaml + workstation: .taskfiles/Workstation/Taskfile.yaml + volsync: .taskfiles/VolSync/Taskfile.yaml + secrets: .taskfiles/ExternalSecrets/Taskfile.yaml + +tasks: + + default: task -l + + init: + desc: Initialize configuration files + cmds: + - mkdir -p {{.PRIVATE_DIR}} + - cp -n {{.BOOTSTRAP_CONFIG_FILE | replace ".yaml" ".sample.yaml"}} {{.BOOTSTRAP_CONFIG_FILE}} + - cmd: echo === Configuration file copied === + silent: true + - cmd: echo Proceed with updating the configuration files... + silent: true + - cmd: echo {{.BOOTSTRAP_CONFIG_FILE}} + silent: true + status: + - test -f "{{.BOOTSTRAP_CONFIG_FILE}}" + + configure: + desc: Configure repository from bootstrap vars + prompt: Any conflicting config in the root kubernetes and ansible directories will be overwritten... continue? + deps: ["workstation:direnv", "workstation:venv", "sops:age-keygen", "init"] + cmds: + - task: .template + - task: sops:encrypt + - task: .validate + + .template: + internal: true + cmd: "{{.VIRTUAL_ENV}}/bin/makejinja" + preconditions: + - { msg: "Missing virtual environment", sh: "test -d {{.VIRTUAL_ENV}}" } + - { msg: "Missing Makejinja config file", sh: "test -f {{.MAKEJINJA_CONFIG_FILE}}" } + - { msg: "Missing Makejinja plugin file", sh: "test -f {{.BOOTSTRAP_DIR}}/scripts/plugin.py" } + - { msg: "Missing bootstrap config file", sh: "test -f {{.BOOTSTRAP_CONFIG_FILE}}" } + + .validate: + internal: true + cmds: + - task: kubernetes:kubeconform diff --git a/bootstrap/overrides/readme.partial.yaml.j2 b/bootstrap/overrides/readme.partial.yaml.j2 new file mode 100644 index 00000000..36dac44d --- /dev/null +++ b/bootstrap/overrides/readme.partial.yaml.j2 @@ -0,0 +1,5 @@ +<% Place user jinja template overrides in this file's directory %> +<% Docs: https://mirkolenz.github.io/makejinja/makejinja.html %> +<% Example: https://github.com/mirkolenz/makejinja/blob/main/tests/data/makejinja.toml %> +<% Example: https://github.com/mirkolenz/makejinja/blob/main/tests/data/input1/not-empty.yaml.jinja %> +<% Example: https://github.com/mirkolenz/makejinja/blob/main/tests/data/input2/not-empty.yaml.jinja %> diff --git a/bootstrap/scripts/plugin.py b/bootstrap/scripts/plugin.py new file mode 100644 index 00000000..8944f38d --- /dev/null +++ b/bootstrap/scripts/plugin.py @@ -0,0 +1,63 @@ +import importlib.util +import sys +from collections.abc import Callable +from pathlib import Path +from typing import Any + +from typing import Any +from netaddr import IPNetwork + +import makejinja +import validation + + +def nthhost(value: str, query: int) -> str: + value = IPNetwork(value) + try: + nth = int(query) + if value.size > nth: + return str(value[nth]) + except ValueError: + return False + return value + + +def import_filter(file: Path) -> Callable[[dict[str, Any]], bool]: + module_path = file.relative_to(Path.cwd()).with_suffix("") + module_name = str(module_path).replace("/", ".") + spec = importlib.util.spec_from_file_location(module_name, file) + assert spec is not None + module = importlib.util.module_from_spec(spec) + sys.modules[module_name] = module + assert spec.loader is not None + spec.loader.exec_module(module) + return module.main + + +class Plugin(makejinja.plugin.Plugin): + def __init__(self, data: dict[str, Any], config: makejinja.config.Config): + self._data = data + self._config = config + + self._excluded_dirs: set[Path] = set() + for input_path in config.inputs: + for filter_file in input_path.rglob(".mjfilter.py"): + filter_func = import_filter(filter_file) + if filter_func(data) is False: + self._excluded_dirs.add(filter_file.parent) + + validation.validate(data) + + + def filters(self) -> makejinja.plugin.Filters: + return [nthhost] + + + def path_filters(self): + return [self._mjfilter_func] + + + def _mjfilter_func(self, path: Path) -> bool: + return not any( + path.is_relative_to(excluded_dir) for excluded_dir in self._excluded_dirs + ) diff --git a/bootstrap/scripts/validation.py b/bootstrap/scripts/validation.py new file mode 100644 index 00000000..b3a75a07 --- /dev/null +++ b/bootstrap/scripts/validation.py @@ -0,0 +1,113 @@ +from functools import wraps +from shutil import which +from typing import Callable, cast +from zoneinfo import available_timezones +import netaddr +import re +import socket +import sys + +GLOBAL_CLI_TOOLS = ["age", "flux", "helmfile", "sops", "jq", "kubeconform", "kustomize", "talosctl", "talhelper"] +CLOUDFLARE_TOOLS = ["cloudflared"] + + +def required(*keys: str): + def wrapper_outter(func: Callable): + @wraps(func) + def wrapper(data: dict, *_, **kwargs) -> None: + for key in keys: + if data.get(key) is None: + raise ValueError(f"Missing required key {key}") + return func(*[data[key] for key in keys], **kwargs) + + return wrapper + + return wrapper_outter + + +def validate_python_version() -> None: + required_version = (3, 11, 0) + if sys.version_info < required_version: + raise ValueError(f"Python {sys.version_info} is below 3.11. Please upgrade.") + + +def validate_ip(ip: str) -> str: + try: + netaddr.IPAddress(ip) + except netaddr.core.AddrFormatError as e: + raise ValueError(f"Invalid IP address {ip}") from e + return ip + + +def validate_network(cidr: str, family: int) -> str: + try: + network = netaddr.IPNetwork(cidr) + if network.version != family: + raise ValueError(f"Invalid CIDR family {network.version}") + except netaddr.core.AddrFormatError as e: + raise ValueError(f"Invalid CIDR {cidr}") from e + return cidr + + +def validate_node(node: dict, node_cidr: str) -> None: + if not node.get("name"): + raise ValueError(f"A node is missing a name") + if not re.match(r"^[a-z0-9-]+$", node.get('name')): + raise ValueError(f"Node {node.get('name')} has an invalid name") + if not node.get("disk"): + raise ValueError(f"Node {node.get('name')} is missing disk") + if not node.get("mac_addr"): + raise ValueError(f"Node {node.get('name')} is missing mac_addr") + if not re.match(r"(?:[0-9a-fA-F]:?){12}", node.get("mac_addr")): + raise ValueError(f"Node {node.get('name')} has an invalid mac_addr, is this a MAC address?") + if node.get("address"): + ip = validate_ip(node.get("address")) + if netaddr.IPAddress(ip, 4) not in netaddr.IPNetwork(node_cidr): + raise ValueError(f"Node {node.get('name')} is not in the node CIDR {node_cidr}") + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: + sock.settimeout(5) + result = sock.connect_ex((ip, 50000)) + if result != 0: + raise ValueError(f"Node {node.get('name')} port 50000 is not open") + + +@required("bootstrap_cloudflare") +def validate_cli_tools(cloudflare: dict, **_) -> None: + for tool in GLOBAL_CLI_TOOLS: + if not which(tool): + raise ValueError(f"Missing required CLI tool {tool}") + for tool in CLOUDFLARE_TOOLS if cloudflare.get("enabled", False) else []: + if not which(tool): + raise ValueError(f"Missing required CLI tool {tool}") + + +@required("bootstrap_sops_age_pubkey") +def validate_age(key: str, **_) -> None: + if not re.match(r"^age1[a-z0-9]{0,58}$", key): + raise ValueError(f"Invalid Age public key {key}") + + +@required("bootstrap_node_network", "bootstrap_node_inventory") +def validate_nodes(node_cidr: str, nodes: dict[list], **_) -> None: + node_cidr = validate_network(node_cidr, 4) + + controllers = [node for node in nodes if node.get('controller') == True] + if len(controllers) < 1: + raise ValueError(f"Must have at least one controller node") + if len(controllers) % 2 == 0: + raise ValueError(f"Must have an odd number of controller nodes") + for node in controllers: + validate_node(node, node_cidr) + + workers = [node for node in nodes if node.get('controller') == False] + for node in workers: + validate_node(node, node_cidr) + + +def validate(data: dict) -> None: + validate_python_version() + validate_cli_tools(data) + validate_age(data) + + if not data.get("skip_tests", False): + validate_nodes(data) diff --git a/bootstrap/templates/.sops.yaml.j2 b/bootstrap/templates/.sops.yaml.j2 new file mode 100644 index 00000000..cb7aa764 --- /dev/null +++ b/bootstrap/templates/.sops.yaml.j2 @@ -0,0 +1,12 @@ +--- +creation_rules: + - # IMPORTANT: This rule MUST be above the others + path_regex: talos/.*\.sops\.ya?ml + key_groups: + - age: + - "#{ bootstrap_sops_age_pubkey }#" + - path_regex: kubernetes/.*\.sops\.ya?ml + encrypted_regex: "^(data|stringData)$" + key_groups: + - age: + - "#{ bootstrap_sops_age_pubkey }#" diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml.j2 new file mode 100644 index 00000000..fb668ce6 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml.j2 @@ -0,0 +1,36 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2beta2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: cert-manager +spec: + interval: 30m + chart: + spec: + chart: cert-manager + version: v1.14.5 + sourceRef: + kind: HelmRepository + name: jetstack + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + installCRDs: true + dns01RecursiveNameservers: 1.1.1.1:53,9.9.9.9:53 + dns01RecursiveNameserversOnly: true + podDnsPolicy: None + podDnsConfig: + nameservers: + - "1.1.1.1" + - "9.9.9.9" + prometheus: + enabled: true + servicemonitor: + enabled: true diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml.j2 new file mode 100644 index 00000000..23df7724 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml.j2 @@ -0,0 +1,5 @@ +--- + +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/.mjfilter.py b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/.mjfilter.py new file mode 100644 index 00000000..d9ae82b4 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/.mjfilter.py @@ -0,0 +1 @@ +main = lambda data: data.get("bootstrap_cloudflare", {}).get("enabled", False) == True diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/issuers.yaml.j2 b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/issuers.yaml.j2 new file mode 100644 index 00000000..1cf7148a --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/issuers.yaml.j2 @@ -0,0 +1,39 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-production +spec: + acme: + server: https://acme-v02.api.letsencrypt.org/directory + email: "${SECRET_ACME_EMAIL}" + privateKeySecretRef: + name: letsencrypt-production + solvers: + - dns01: + cloudflare: + apiTokenSecretRef: + name: cert-manager-secret + key: api-token + selector: + dnsZones: + - "${SECRET_DOMAIN}" +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-staging +spec: + acme: + server: https://acme-staging-v02.api.letsencrypt.org/directory + email: "${SECRET_ACME_EMAIL}" + privateKeySecretRef: + name: letsencrypt-staging + solvers: + - dns01: + cloudflare: + apiTokenSecretRef: + name: cert-manager-secret + key: api-token + selector: + dnsZones: + - "${SECRET_DOMAIN}" diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/kustomization.yaml.j2 new file mode 100644 index 00000000..fd43d965 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/kustomization.yaml.j2 @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./secret.sops.yaml + - ./issuers.yaml diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/secret.sops.yaml.j2 b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/secret.sops.yaml.j2 new file mode 100644 index 00000000..f5bf887f --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/secret.sops.yaml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: cert-manager-secret +stringData: + api-token: "#{ bootstrap_cloudflare.token }#" diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/ks.yaml.j2 new file mode 100644 index 00000000..bd2f7357 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/ks.yaml.j2 @@ -0,0 +1,46 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cert-manager + namespace: flux-system +spec: + targetNamespace: cert-manager + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/cert-manager/cert-manager/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m +#% if bootstrap_cloudflare.enabled %# +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cert-manager-issuers + namespace: flux-system +spec: + targetNamespace: cert-manager + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cert-manager + path: ./kubernetes/apps/cert-manager/cert-manager/issuers + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m +#% endif %# diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/cert-manager/kustomization.yaml.j2 new file mode 100644 index 00000000..abbe7755 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/cert-manager/kustomization.yaml.j2 @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./cert-manager/ks.yaml diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/namespace.yaml.j2 b/bootstrap/templates/kubernetes/apps/cert-manager/namespace.yaml.j2 new file mode 100644 index 00000000..ed788350 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/cert-manager/namespace.yaml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: cert-manager + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/bootstrap/templates/kubernetes/apps/flux-system/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/flux-system/kustomization.yaml.j2 new file mode 100644 index 00000000..29d0612d --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/flux-system/kustomization.yaml.j2 @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./webhooks/ks.yaml diff --git a/bootstrap/templates/kubernetes/apps/flux-system/namespace.yaml.j2 b/bootstrap/templates/kubernetes/apps/flux-system/namespace.yaml.j2 new file mode 100644 index 00000000..b48db452 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/flux-system/namespace.yaml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: flux-system + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/ingress.yaml.j2 b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/ingress.yaml.j2 new file mode 100644 index 00000000..e704eed3 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/ingress.yaml.j2 @@ -0,0 +1,25 @@ +#% if bootstrap_cloudflare.enabled %# +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: flux-webhook + annotations: + external-dns.alpha.kubernetes.io/target: "external.${SECRET_DOMAIN}" +spec: + ingressClassName: external + rules: + - host: &host "flux-webhook.${SECRET_DOMAIN}" + http: + paths: + - path: /hook/ + pathType: Prefix + backend: + service: + name: webhook-receiver + port: + number: 80 + tls: + - hosts: + - *host +#% endif %# diff --git a/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/kustomization.yaml.j2 new file mode 100644 index 00000000..b40a47d6 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/kustomization.yaml.j2 @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./secret.sops.yaml + #% if bootstrap_cloudflare.enabled %# + - ./ingress.yaml + #% endif %# + - ./receiver.yaml diff --git a/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/receiver.yaml.j2 b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/receiver.yaml.j2 new file mode 100644 index 00000000..cca5931b --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/receiver.yaml.j2 @@ -0,0 +1,25 @@ +--- +apiVersion: notification.toolkit.fluxcd.io/v1 +kind: Receiver +metadata: + name: github-receiver +spec: + type: github + events: + - ping + - push + secretRef: + name: github-webhook-token-secret + resources: + - apiVersion: source.toolkit.fluxcd.io/v1 + kind: GitRepository + name: home-kubernetes + namespace: flux-system + - apiVersion: kustomize.toolkit.fluxcd.io/v1 + kind: Kustomization + name: cluster + namespace: flux-system + - apiVersion: kustomize.toolkit.fluxcd.io/v1 + kind: Kustomization + name: cluster-apps + namespace: flux-system diff --git a/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/secret.sops.yaml.j2 b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/secret.sops.yaml.j2 new file mode 100644 index 00000000..34ac7daf --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/secret.sops.yaml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: github-webhook-token-secret +stringData: + token: "#{ bootstrap_github_webhook_token }#" diff --git a/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/kustomization.yaml.j2 new file mode 100644 index 00000000..08c1780f --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/kustomization.yaml.j2 @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./github diff --git a/bootstrap/templates/kubernetes/apps/flux-system/webhooks/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/ks.yaml.j2 new file mode 100644 index 00000000..72081666 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/ks.yaml.j2 @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app flux-webhooks + namespace: flux-system +spec: + targetNamespace: flux-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/flux-system/webhooks/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/cilium-bgp.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/cilium-bgp.yaml.j2 new file mode 100644 index 00000000..7e15b6f6 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/cilium-bgp.yaml.j2 @@ -0,0 +1,37 @@ +--- +# https://docs.cilium.io/en/latest/network/bgp-control-plane/ +apiVersion: cilium.io/v2alpha1 +kind: CiliumBGPPeeringPolicy +metadata: + name: policy +spec: + nodeSelector: + matchLabels: + kubernetes.io/os: linux + virtualRouters: + - localASN: #{ bootstrap_bgp.local_asn }# + neighbors: + #% if bootstrap_bgp.peers %# + #% for item in bootstrap_bgp.peers %# + - peerAddress: "#{ item }#/32" + peerASN: #{ bootstrap_bgp.peer_asn }# + #% endfor %# + #% else %# + #% if bootstrap_node_default_gateway %# + - peerAddress: "#{ bootstrap_node_default_gateway }#/32" + #% else %# + - peerAddress: "#{ bootstrap_node_network | nthhost(1) }#/32" + #% endif %# + peerASN: #{ bootstrap_bgp.peer_asn }# + #% endif %# + serviceSelector: + matchExpressions: + - {key: somekey, operator: NotIn, values: ['never-used-value']} +--- +apiVersion: cilium.io/v2alpha1 +kind: CiliumLoadBalancerIPPool +metadata: + name: pool +spec: + cidrs: + - cidr: "${BGP_ADVERTISED_CIDR}" diff --git a/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/cilium-l2.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/cilium-l2.yaml.j2 new file mode 100644 index 00000000..caa35cab --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/cilium-l2.yaml.j2 @@ -0,0 +1,22 @@ +--- +# https://docs.cilium.io/en/latest/network/l2-announcements +apiVersion: cilium.io/v2alpha1 +kind: CiliumL2AnnouncementPolicy +metadata: + name: policy +spec: + loadBalancerIPs: true + # NOTE: This might need to be set if you have more than one active NIC on your hosts + # interfaces: + # - ^eno[0-9]+ + nodeSelector: + matchLabels: + kubernetes.io/os: linux +--- +apiVersion: cilium.io/v2alpha1 +kind: CiliumLoadBalancerIPPool +metadata: + name: pool +spec: + cidrs: + - cidr: "${NODE_CIDR}" diff --git a/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml.j2 new file mode 100644 index 00000000..a7869100 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml.j2 @@ -0,0 +1,26 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: cilium +spec: + interval: 30m + chart: + spec: + chart: cilium + version: 1.15.5 + sourceRef: + kind: HelmRepository + name: cilium + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + #% filter indent(width=4, first=True) %# + #% include 'partials/cilium-values-full.partial.yaml.j2' %# + #% endfilter %# diff --git a/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/kustomization.yaml.j2 new file mode 100644 index 00000000..8829dd64 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/kustomization.yaml.j2 @@ -0,0 +1,12 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + #% if bootstrap_bgp.enabled %# + - ./cilium-bgp.yaml + #% endif %# + #% if ((not bootstrap_bgp.enabled) and (not bootstrap_feature_gates.dual_stack_ipv4_first)) %# + - ./cilium-l2.yaml + #% endif %# + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/cilium/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/cilium/ks.yaml.j2 new file mode 100644 index 00000000..9df69105 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/cilium/ks.yaml.j2 @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cilium + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/cilium/app + prune: false # never should be deleted + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/.mjfilter.py b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/.mjfilter.py new file mode 100644 index 00000000..3ace63df --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/.mjfilter.py @@ -0,0 +1 @@ +main = lambda data: data.get("bootstrap_distribution", "k3s") in ["talos"] diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml.j2 new file mode 100644 index 00000000..87572093 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml.j2 @@ -0,0 +1,30 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: kubelet-csr-approver +spec: + interval: 30m + chart: + spec: + chart: kubelet-csr-approver + version: 1.2.1 + sourceRef: + kind: HelmRepository + name: postfinance + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + #% filter indent(width=4, first=True) %# + #% include 'partials/kubelet-csr-approver-values.partial.yaml.j2' %# + #% endfilter %# + metrics: + enable: true + serviceMonitor: + enabled: true diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml.j2 new file mode 100644 index 00000000..17cbc72b --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml.j2 @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/ks.yaml.j2 new file mode 100644 index 00000000..f43156a8 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/ks.yaml.j2 @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app kubelet-csr-approver + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/kubelet-csr-approver/app + prune: false # never should be deleted + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kustomization.yaml.j2 new file mode 100644 index 00000000..b4203ca2 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/kustomization.yaml.j2 @@ -0,0 +1,15 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./cilium/ks.yaml + #% if bootstrap_distribution in ["talos"] %# + - ./kubelet-csr-approver/ks.yaml + #% endif %# + - ./metrics-server/ks.yaml + #% if bootstrap_distribution in ["talos"] %# + - ./spegel/ks.yaml + #% endif %# + - ./reloader/ks.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml.j2 new file mode 100644 index 00000000..ff9e8e0d --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml.j2 @@ -0,0 +1,32 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: metrics-server +spec: + interval: 30m + chart: + spec: + chart: metrics-server + version: 3.12.1 + sourceRef: + kind: HelmRepository + name: metrics-server + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + args: + - --kubelet-insecure-tls + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + metrics: + enabled: true + serviceMonitor: + enabled: true diff --git a/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml.j2 new file mode 100644 index 00000000..17cbc72b --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml.j2 @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/ks.yaml.j2 new file mode 100644 index 00000000..6a21d99c --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/ks.yaml.j2 @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app metrics-server + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/metrics-server/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/kube-system/namespace.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/namespace.yaml.j2 new file mode 100644 index 00000000..5eeb2c91 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/namespace.yaml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: kube-system + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/bootstrap/templates/kubernetes/apps/kube-system/reloader/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/reloader/app/helmrelease.yaml.j2 new file mode 100644 index 00000000..f5cd4317 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/reloader/app/helmrelease.yaml.j2 @@ -0,0 +1,29 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: reloader +spec: + interval: 30m + chart: + spec: + chart: reloader + version: 1.0.97 + sourceRef: + kind: HelmRepository + name: stakater + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + fullnameOverride: reloader + reloader: + readOnlyRootFileSystem: true + podMonitor: + enabled: true + namespace: "{{ .Release.Namespace }}" diff --git a/bootstrap/templates/kubernetes/apps/kube-system/reloader/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/reloader/app/kustomization.yaml.j2 new file mode 100644 index 00000000..17cbc72b --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/reloader/app/kustomization.yaml.j2 @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/reloader/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/reloader/ks.yaml.j2 new file mode 100644 index 00000000..0aae5261 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/reloader/ks.yaml.j2 @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app reloader + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/reloader/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/kube-system/spegel/.mjfilter.py b/bootstrap/templates/kubernetes/apps/kube-system/spegel/.mjfilter.py new file mode 100644 index 00000000..3ace63df --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/spegel/.mjfilter.py @@ -0,0 +1 @@ +main = lambda data: data.get("bootstrap_distribution", "k3s") in ["talos"] diff --git a/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/helmrelease.yaml.j2 new file mode 100644 index 00000000..d03f9c5d --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/helmrelease.yaml.j2 @@ -0,0 +1,31 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: spegel +spec: + interval: 30m + chart: + spec: + chart: spegel + version: v0.0.18 + sourceRef: + kind: HelmRepository + name: xenitab + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + spegel: + containerdSock: /run/containerd/containerd.sock + containerdRegistryConfigPath: /etc/cri/conf.d/hosts + service: + registry: + hostPort: 29999 + serviceMonitor: + enabled: true diff --git a/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/kustomization.yaml.j2 new file mode 100644 index 00000000..17cbc72b --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/kustomization.yaml.j2 @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/spegel/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/spegel/ks.yaml.j2 new file mode 100644 index 00000000..8f129bd6 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/spegel/ks.yaml.j2 @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app spegel + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/spegel/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/network/.mjfilter.py b/bootstrap/templates/kubernetes/apps/network/.mjfilter.py new file mode 100644 index 00000000..d9ae82b4 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/.mjfilter.py @@ -0,0 +1 @@ +main = lambda data: data.get("bootstrap_cloudflare", {}).get("enabled", False) == True diff --git a/bootstrap/templates/kubernetes/apps/network/cloudflared/app/configs/config.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/configs/config.yaml.j2 new file mode 100644 index 00000000..05bcef5c --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/configs/config.yaml.j2 @@ -0,0 +1,10 @@ +--- +originRequest: + originServerName: "external.${SECRET_DOMAIN}" + +ingress: + - hostname: "${SECRET_DOMAIN}" + service: https://ingress-nginx-external-controller.network.svc.cluster.local:443 + - hostname: "*.${SECRET_DOMAIN}" + service: https://ingress-nginx-external-controller.network.svc.cluster.local:443 + - service: http_status:404 diff --git a/bootstrap/templates/kubernetes/apps/network/cloudflared/app/dnsendpoint.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/dnsendpoint.yaml.j2 new file mode 100644 index 00000000..43d7d7b2 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/dnsendpoint.yaml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: externaldns.k8s.io/v1alpha1 +kind: DNSEndpoint +metadata: + name: cloudflared +spec: + endpoints: + - dnsName: "external.${SECRET_DOMAIN}" + recordType: CNAME + targets: ["${SECRET_CLOUDFLARE_TUNNEL_ID}.cfargotunnel.com"] diff --git a/bootstrap/templates/kubernetes/apps/network/cloudflared/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/helmrelease.yaml.j2 new file mode 100644 index 00000000..562fbfad --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/helmrelease.yaml.j2 @@ -0,0 +1,113 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: cloudflared +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + controllers: + cloudflared: + replicas: 2 + strategy: RollingUpdate + annotations: + reloader.stakater.com/auto: "true" + containers: + app: + image: + repository: docker.io/cloudflare/cloudflared + tag: 2024.5.0 + env: + NO_AUTOUPDATE: true + TUNNEL_CRED_FILE: /etc/cloudflared/creds/credentials.json + TUNNEL_METRICS: 0.0.0.0:8080 + TUNNEL_ORIGIN_ENABLE_HTTP2: true + TUNNEL_TRANSPORT_PROTOCOL: quic + TUNNEL_POST_QUANTUM: true + TUNNEL_ID: + valueFrom: + secretKeyRef: + name: cloudflared-secret + key: TUNNEL_ID + args: + - tunnel + - --config + - /etc/cloudflared/config/config.yaml + - run + - "$(TUNNEL_ID)" + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /ready + port: &port 8080 + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + startup: + enabled: false + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + seccompProfile: + type: RuntimeDefault + resources: + requests: + cpu: 10m + limits: + memory: 256Mi + pod: + securityContext: + runAsUser: 65534 + runAsGroup: 65534 + runAsNonRoot: true + service: + app: + controller: cloudflared + ports: + http: + port: *port + serviceMonitor: + app: + serviceName: cloudflared + endpoints: + - port: http + scheme: http + path: /metrics + interval: 1m + scrapeTimeout: 10s + persistence: + config: + type: configMap + name: cloudflared-configmap + globalMounts: + - path: /etc/cloudflared/config/config.yaml + subPath: config.yaml + readOnly: true + creds: + type: secret + name: cloudflared-secret + globalMounts: + - path: /etc/cloudflared/creds/credentials.json + subPath: credentials.json + readOnly: true diff --git a/bootstrap/templates/kubernetes/apps/network/cloudflared/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/kustomization.yaml.j2 new file mode 100644 index 00000000..37b1f4e4 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/kustomization.yaml.j2 @@ -0,0 +1,14 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./dnsendpoint.yaml + - ./secret.sops.yaml + - ./helmrelease.yaml +configMapGenerator: + - name: cloudflared-configmap + files: + - ./configs/config.yaml +generatorOptions: + disableNameSuffixHash: true diff --git a/bootstrap/templates/kubernetes/apps/network/cloudflared/app/secret.sops.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/secret.sops.yaml.j2 new file mode 100644 index 00000000..67d169ed --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/secret.sops.yaml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloudflared-secret +stringData: + TUNNEL_ID: "#{ bootstrap_cloudflare.tunnel.id }#" + credentials.json: | + { + "AccountTag": "#{ bootstrap_cloudflare.tunnel.account_id }#", + "TunnelSecret": "#{ bootstrap_cloudflare.tunnel.secret }#", + "TunnelID": "#{ bootstrap_cloudflare.tunnel.id }#" + } diff --git a/bootstrap/templates/kubernetes/apps/network/cloudflared/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/cloudflared/ks.yaml.j2 new file mode 100644 index 00000000..deb7873e --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/cloudflared/ks.yaml.j2 @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cloudflared + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-dns + path: ./kubernetes/apps/network/cloudflared/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/network/echo-server/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/echo-server/app/helmrelease.yaml.j2 new file mode 100644 index 00000000..0993f6cd --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/echo-server/app/helmrelease.yaml.j2 @@ -0,0 +1,95 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: echo-server +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + controllers: + echo-server: + strategy: RollingUpdate + containers: + app: + image: + repository: ghcr.io/mendhak/http-https-echo + tag: 33 + env: + HTTP_PORT: &port 8080 + LOG_WITHOUT_NEWLINE: true + LOG_IGNORE_PATH: /healthz + PROMETHEUS_ENABLED: true + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /healthz + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + seccompProfile: + type: RuntimeDefault + resources: + requests: + cpu: 10m + limits: + memory: 64Mi + pod: + securityContext: + runAsUser: 65534 + runAsGroup: 65534 + runAsNonRoot: true + service: + app: + controller: echo-server + ports: + http: + port: *port + serviceMonitor: + app: + serviceName: echo-server + endpoints: + - port: http + scheme: http + path: /metrics + interval: 1m + scrapeTimeout: 10s + ingress: + app: + className: external + annotations: + external-dns.alpha.kubernetes.io/target: "external.${SECRET_DOMAIN}" + hosts: + - host: &host "{{ .Release.Name }}.${SECRET_DOMAIN}" + paths: + - path: / + service: + identifier: app + port: http + tls: + - hosts: + - *host diff --git a/bootstrap/templates/kubernetes/apps/network/echo-server/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/echo-server/app/kustomization.yaml.j2 new file mode 100644 index 00000000..17cbc72b --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/echo-server/app/kustomization.yaml.j2 @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/network/echo-server/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/echo-server/ks.yaml.j2 new file mode 100644 index 00000000..0cfc7559 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/echo-server/ks.yaml.j2 @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app echo-server + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/network/echo-server/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/network/external-dns/app/dnsendpoint-crd.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/external-dns/app/dnsendpoint-crd.yaml.j2 new file mode 100644 index 00000000..9254f89d --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/external-dns/app/dnsendpoint-crd.yaml.j2 @@ -0,0 +1,93 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-sigs/external-dns/pull/2007" + creationTimestamp: null + name: dnsendpoints.externaldns.k8s.io +spec: + group: externaldns.k8s.io + names: + kind: DNSEndpoint + listKind: DNSEndpointList + plural: dnsendpoints + singular: dnsendpoint + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DNSEndpointSpec defines the desired state of DNSEndpoint + properties: + endpoints: + items: + description: Endpoint is a high-level way of a connection between a service and an IP + properties: + dnsName: + description: The hostname of the DNS record + type: string + labels: + additionalProperties: + type: string + description: Labels stores labels defined for the Endpoint + type: object + providerSpecific: + description: ProviderSpecific stores provider specific config + items: + description: ProviderSpecificProperty holds the name and value of a configuration which is specific to individual DNS providers + properties: + name: + type: string + value: + type: string + type: object + type: array + recordTTL: + description: TTL for the record + format: int64 + type: integer + recordType: + description: RecordType type of record, e.g. CNAME, A, SRV, TXT etc + type: string + setIdentifier: + description: Identifier to distinguish multiple records with the same name and type (e.g. Route53 records with routing policies other than 'simple') + type: string + targets: + description: The targets the DNS record points to + items: + type: string + type: array + type: object + type: array + type: object + status: + description: DNSEndpointStatus defines the observed state of DNSEndpoint + properties: + observedGeneration: + description: The generation observed by the external-dns controller. + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/bootstrap/templates/kubernetes/apps/network/external-dns/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/external-dns/app/helmrelease.yaml.j2 new file mode 100644 index 00000000..8a6e5d7e --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/external-dns/app/helmrelease.yaml.j2 @@ -0,0 +1,45 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app external-dns +spec: + interval: 30m + chart: + spec: + chart: external-dns + version: 1.14.4 + sourceRef: + kind: HelmRepository + name: external-dns + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + fullnameOverride: *app + provider: cloudflare + env: + - name: CF_API_TOKEN + valueFrom: + secretKeyRef: + name: external-dns-secret + key: api-token + extraArgs: + - --ingress-class=external + - --cloudflare-proxied + - --crd-source-apiversion=externaldns.k8s.io/v1alpha1 + - --crd-source-kind=DNSEndpoint + policy: sync + sources: ["crd", "ingress"] + txtPrefix: k8s. + txtOwnerId: default + domainFilters: ["${SECRET_DOMAIN}"] + serviceMonitor: + enabled: true + podAnnotations: + secret.reloader.stakater.com/reload: external-dns-secret diff --git a/bootstrap/templates/kubernetes/apps/network/external-dns/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/external-dns/app/kustomization.yaml.j2 new file mode 100644 index 00000000..406c4615 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/external-dns/app/kustomization.yaml.j2 @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./dnsendpoint-crd.yaml + - ./secret.sops.yaml + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/network/external-dns/app/secret.sops.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/external-dns/app/secret.sops.yaml.j2 new file mode 100644 index 00000000..c067b329 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/external-dns/app/secret.sops.yaml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: external-dns-secret +stringData: + api-token: "#{ bootstrap_cloudflare.token }#" diff --git a/bootstrap/templates/kubernetes/apps/network/external-dns/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/external-dns/ks.yaml.j2 new file mode 100644 index 00000000..5c554ef1 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/external-dns/ks.yaml.j2 @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app external-dns + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/network/external-dns/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/kustomization.yaml.j2 new file mode 100644 index 00000000..87bf7948 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/kustomization.yaml.j2 @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./staging.yaml + #% if bootstrap_cloudflare.acme.production %# + - ./production.yaml + #% endif %# diff --git a/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/production.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/production.yaml.j2 new file mode 100644 index 00000000..b5afdf41 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/production.yaml.j2 @@ -0,0 +1,14 @@ +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: "${SECRET_DOMAIN/./-}-production" +spec: + secretName: "${SECRET_DOMAIN/./-}-production-tls" + issuerRef: + name: letsencrypt-production + kind: ClusterIssuer + commonName: "${SECRET_DOMAIN}" + dnsNames: + - "${SECRET_DOMAIN}" + - "*.${SECRET_DOMAIN}" diff --git a/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/staging.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/staging.yaml.j2 new file mode 100644 index 00000000..9c869425 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/staging.yaml.j2 @@ -0,0 +1,14 @@ +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: "${SECRET_DOMAIN/./-}-staging" +spec: + secretName: "${SECRET_DOMAIN/./-}-staging-tls" + issuerRef: + name: letsencrypt-staging + kind: ClusterIssuer + commonName: "${SECRET_DOMAIN}" + dnsNames: + - "${SECRET_DOMAIN}" + - "*.${SECRET_DOMAIN}" diff --git a/bootstrap/templates/kubernetes/apps/network/ingress-nginx/external/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/external/helmrelease.yaml.j2 new file mode 100644 index 00000000..60b83c6b --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/external/helmrelease.yaml.j2 @@ -0,0 +1,91 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: ingress-nginx-external +spec: + interval: 30m + chart: + spec: + chart: ingress-nginx + version: 4.10.1 + sourceRef: + kind: HelmRepository + name: ingress-nginx + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + dependsOn: + - name: cloudflared + namespace: network + values: + fullnameOverride: ingress-nginx-external + controller: + replicaCount: 1 + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: "external.${SECRET_DOMAIN}" + io.cilium/lb-ipam-ips: "#{ bootstrap_cloudflare.tunnel.ingress_vip }#" + externalTrafficPolicy: Cluster + ingressClassResource: + name: external + default: false + controllerValue: k8s.io/external + admissionWebhooks: + objectSelector: + matchExpressions: + - key: ingress-class + operator: In + values: ["external"] + config: + client-body-buffer-size: 100M + client-body-timeout: 120 + client-header-timeout: 120 + enable-brotli: "true" + enable-real-ip: "true" + hsts-max-age: 31449600 + keep-alive-requests: 10000 + keep-alive: 120 + log-format-escape-json: "true" + log-format-upstream: > + {"time": "$time_iso8601", "remote_addr": "$proxy_protocol_addr", "x_forwarded_for": "$proxy_add_x_forwarded_for", + "request_id": "$req_id", "remote_user": "$remote_user", "bytes_sent": $bytes_sent, "request_time": $request_time, + "status": $status, "vhost": "$host", "request_proto": "$server_protocol", "path": "$uri", "request_query": "$args", + "request_length": $request_length, "duration": $request_time, "method": "$request_method", "http_referrer": "$http_referer", + "http_user_agent": "$http_user_agent"} + proxy-body-size: 0 + proxy-buffer-size: 16k + ssl-protocols: TLSv1.3 TLSv1.2 + metrics: + enabled: true + serviceMonitor: + enabled: true + namespaceSelector: + any: true + extraArgs: + #% if bootstrap_cloudflare.acme.production %# + default-ssl-certificate: "network/${SECRET_DOMAIN/./-}-production-tls" + #% else %# + default-ssl-certificate: "network/${SECRET_DOMAIN/./-}-staging-tls" + #% endif %# + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx-external + app.kubernetes.io/component: controller + resources: + requests: + cpu: 100m + limits: + memory: 500Mi + defaultBackend: + enabled: false diff --git a/bootstrap/templates/kubernetes/apps/network/ingress-nginx/external/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/external/kustomization.yaml.j2 new file mode 100644 index 00000000..17cbc72b --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/external/kustomization.yaml.j2 @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/network/ingress-nginx/internal/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/internal/helmrelease.yaml.j2 new file mode 100644 index 00000000..045eed32 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/internal/helmrelease.yaml.j2 @@ -0,0 +1,88 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: ingress-nginx-internal + namespace: network +spec: + interval: 30m + chart: + spec: + chart: ingress-nginx + version: 4.10.1 + sourceRef: + kind: HelmRepository + name: ingress-nginx + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + fullnameOverride: ingress-nginx-internal + controller: + replicaCount: 1 + service: + annotations: + io.cilium/lb-ipam-ips: "#{ bootstrap_cloudflare.ingress_vip }#" + externalTrafficPolicy: Cluster + ingressClassResource: + name: internal + default: true + controllerValue: k8s.io/internal + admissionWebhooks: + objectSelector: + matchExpressions: + - key: ingress-class + operator: In + values: ["internal"] + config: + client-body-buffer-size: 100M + client-body-timeout: 120 + client-header-timeout: 120 + enable-brotli: "true" + enable-real-ip: "true" + hsts-max-age: 31449600 + keep-alive-requests: 10000 + keep-alive: 120 + log-format-escape-json: "true" + log-format-upstream: > + {"time": "$time_iso8601", "remote_addr": "$proxy_protocol_addr", "x_forwarded_for": "$proxy_add_x_forwarded_for", + "request_id": "$req_id", "remote_user": "$remote_user", "bytes_sent": $bytes_sent, "request_time": $request_time, + "status": $status, "vhost": "$host", "request_proto": "$server_protocol", "path": "$uri", "request_query": "$args", + "request_length": $request_length, "duration": $request_time, "method": "$request_method", "http_referrer": "$http_referer", + "http_user_agent": "$http_user_agent"} + proxy-body-size: 0 + proxy-buffer-size: 16k + ssl-protocols: TLSv1.3 TLSv1.2 + metrics: + enabled: true + serviceMonitor: + enabled: true + namespaceSelector: + any: true + extraArgs: + #% if bootstrap_cloudflare.acme.production %# + default-ssl-certificate: "network/${SECRET_DOMAIN/./-}-production-tls" + #% else %# + default-ssl-certificate: "network/${SECRET_DOMAIN/./-}-staging-tls" + #% endif %# + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx-internal + app.kubernetes.io/component: controller + resources: + requests: + cpu: 100m + limits: + memory: 500Mi + defaultBackend: + enabled: false diff --git a/bootstrap/templates/kubernetes/apps/network/ingress-nginx/internal/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/internal/kustomization.yaml.j2 new file mode 100644 index 00000000..17cbc72b --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/internal/kustomization.yaml.j2 @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/network/ingress-nginx/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/ks.yaml.j2 new file mode 100644 index 00000000..4121eab5 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/ks.yaml.j2 @@ -0,0 +1,69 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app ingress-nginx-certificates + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cert-manager-issuers + path: ./kubernetes/apps/network/ingress-nginx/certificates + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app ingress-nginx-internal + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: ingress-nginx-certificates + path: ./kubernetes/apps/network/ingress-nginx/internal + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app ingress-nginx-external + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: ingress-nginx-certificates + path: ./kubernetes/apps/network/ingress-nginx/external + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/network/k8s-gateway/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/k8s-gateway/app/helmrelease.yaml.j2 new file mode 100644 index 00000000..b8e7db69 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/k8s-gateway/app/helmrelease.yaml.j2 @@ -0,0 +1,32 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: k8s-gateway +spec: + interval: 30m + chart: + spec: + chart: k8s-gateway + version: 2.4.0 + sourceRef: + kind: HelmRepository + name: k8s-gateway + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + fullnameOverride: k8s-gateway + domain: "${SECRET_DOMAIN}" + ttl: 1 + service: + type: LoadBalancer + port: 53 + annotations: + io.cilium/lb-ipam-ips: "#{ bootstrap_cloudflare.gateway_vip }#" + externalTrafficPolicy: Cluster diff --git a/bootstrap/templates/kubernetes/apps/network/k8s-gateway/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/k8s-gateway/app/kustomization.yaml.j2 new file mode 100644 index 00000000..17cbc72b --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/k8s-gateway/app/kustomization.yaml.j2 @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/network/k8s-gateway/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/k8s-gateway/ks.yaml.j2 new file mode 100644 index 00000000..6709e768 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/k8s-gateway/ks.yaml.j2 @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app k8s-gateway + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/network/k8s-gateway/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/network/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/kustomization.yaml.j2 new file mode 100644 index 00000000..2dc9a0db --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/kustomization.yaml.j2 @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./cloudflared/ks.yaml + - ./echo-server/ks.yaml + - ./external-dns/ks.yaml + - ./ingress-nginx/ks.yaml + - ./k8s-gateway/ks.yaml diff --git a/bootstrap/templates/kubernetes/apps/network/namespace.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/namespace.yaml.j2 new file mode 100644 index 00000000..4d78d7b1 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/namespace.yaml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: network + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/.mjfilter.py b/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/.mjfilter.py new file mode 100644 index 00000000..0979f9a6 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/.mjfilter.py @@ -0,0 +1 @@ +main = lambda data: data.get("bootstrap_distribution", "k3s") in ["k3s"] diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/kustomization.yaml.j2 new file mode 100644 index 00000000..9afab41b --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/kustomization.yaml.j2 @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./plan.yaml diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/plan.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/plan.yaml.j2 new file mode 100644 index 00000000..5412ea57 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/plan.yaml.j2 @@ -0,0 +1,50 @@ +--- +apiVersion: upgrade.cattle.io/v1 +kind: Plan +metadata: + name: controllers +spec: + version: "${KUBE_VERSION}" + upgrade: + image: rancher/k3s-upgrade + serviceAccountName: system-upgrade + concurrency: 1 + cordon: true + nodeSelector: + matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + operator: Exists + - key: node-role.kubernetes.io/etcd + effect: NoExecute + operator: Exists + - key: CriticalAddonsOnly + operator: Exists +--- +apiVersion: upgrade.cattle.io/v1 +kind: Plan +metadata: + name: workers +spec: + version: "${KUBE_VERSION}" + serviceAccountName: system-upgrade + concurrency: 1 + nodeSelector: + matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: DoesNotExist + prepare: + image: rancher/k3s-upgrade + args: ["prepare", "server"] + upgrade: + image: rancher/k3s-upgrade diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/ks.yaml.j2 new file mode 100644 index 00000000..4c7c55a4 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/ks.yaml.j2 @@ -0,0 +1,27 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app system-upgrade-k3s + namespace: flux-system +spec: + targetNamespace: system-upgrade + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: system-upgrade-controller + path: ./kubernetes/apps/system-upgrade/k3s/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m + postBuild: + substitute: + # renovate: datasource=github-releases depName=k3s-io/k3s + KUBE_VERSION: v1.30.0+k3s1 diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/kustomization.yaml.j2 new file mode 100644 index 00000000..dd2adbef --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/kustomization.yaml.j2 @@ -0,0 +1,15 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + #% if bootstrap_distribution in ['k3s', 'talos'] %# + - ./system-upgrade-controller/ks.yaml + #% endif %# + #% if bootstrap_distribution in ["k3s"] %# + - ./k3s/ks.yaml + #% endif %# + #% if bootstrap_distribution in ["talos"] and bootstrap_talos.schematic_id %# + - ./talos/ks.yaml + #% endif %# diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/namespace.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/namespace.yaml.j2 new file mode 100644 index 00000000..5ea024dd --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/namespace.yaml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: system-upgrade + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/.mjfilter.py b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/.mjfilter.py new file mode 100644 index 00000000..394f9d1e --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/.mjfilter.py @@ -0,0 +1 @@ +main = lambda data: data.get("bootstrap_distribution", "k3s") in ["k3s", "talos"] diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/helmrelease.yaml.j2 new file mode 100644 index 00000000..d5f72848 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/helmrelease.yaml.j2 @@ -0,0 +1,102 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app system-upgrade-controller +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + controllers: + system-upgrade-controller: + strategy: RollingUpdate + containers: + app: + image: + repository: docker.io/rancher/system-upgrade-controller + tag: v0.13.4 + env: + SYSTEM_UPGRADE_CONTROLLER_DEBUG: false + SYSTEM_UPGRADE_CONTROLLER_THREADS: 2 + SYSTEM_UPGRADE_JOB_ACTIVE_DEADLINE_SECONDS: 900 + SYSTEM_UPGRADE_JOB_BACKOFF_LIMIT: 99 + SYSTEM_UPGRADE_JOB_IMAGE_PULL_POLICY: IfNotPresent + SYSTEM_UPGRADE_JOB_KUBECTL_IMAGE: registry.k8s.io/kubectl:v1.30.1 + SYSTEM_UPGRADE_JOB_PRIVILEGED: true + SYSTEM_UPGRADE_JOB_TTL_SECONDS_AFTER_FINISH: 900 + SYSTEM_UPGRADE_PLAN_POLLING_INTERVAL: 15m + SYSTEM_UPGRADE_CONTROLLER_NAME: *app + SYSTEM_UPGRADE_CONTROLLER_NAMESPACE: + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + seccompProfile: + type: RuntimeDefault + pod: + securityContext: + runAsUser: 65534 + runAsGroup: 65534 + runAsNonRoot: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + serviceAccount: + create: true + name: system-upgrade + persistence: + tmp: + type: emptyDir + globalMounts: + - path: /tmp + etc-ssl: + type: hostPath + hostPath: /etc/ssl + hostPathType: DirectoryOrCreate + globalMounts: + - path: /etc/ssl + readOnly: true + etc-pki: + type: hostPath + hostPath: /etc/pki + hostPathType: DirectoryOrCreate + globalMounts: + - path: /etc/pki + readOnly: true + etc-ca-certificates: + type: hostPath + hostPath: /etc/ca-certificates + hostPathType: DirectoryOrCreate + globalMounts: + - path: /etc/ca-certificates + readOnly: true diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml.j2 new file mode 100644 index 00000000..b27bf573 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml.j2 @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # renovate: datasource=github-releases depName=rancher/system-upgrade-controller + - https://github.com/rancher/system-upgrade-controller/releases/download/v0.13.4/crd.yaml + - helmrelease.yaml + - rbac.yaml diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/rbac.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/rbac.yaml.j2 new file mode 100644 index 00000000..ddc6127f --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/rbac.yaml.j2 @@ -0,0 +1,23 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system-upgrade +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: system-upgrade + namespace: system-upgrade +#% if bootstrap_distribution in ["talos"] %# +--- +apiVersion: talos.dev/v1alpha1 +kind: ServiceAccount +metadata: + name: talos +spec: + roles: + - os:admin +#% endif %# diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/ks.yaml.j2 new file mode 100644 index 00000000..212eccec --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/ks.yaml.j2 @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app system-upgrade-controller + namespace: flux-system +spec: + targetNamespace: system-upgrade + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/system-upgrade/system-upgrade-controller/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/talos/.mjfilter.py b/bootstrap/templates/kubernetes/apps/system-upgrade/talos/.mjfilter.py new file mode 100644 index 00000000..82712ee6 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/talos/.mjfilter.py @@ -0,0 +1,4 @@ +main = lambda data: ( + data.get("bootstrap_distribution", "k3s") in ["talos"] and + data.get("talos", {}).get("schematic_id", {}) +) diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/talos/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/talos/app/kustomization.yaml.j2 new file mode 100644 index 00000000..9afab41b --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/talos/app/kustomization.yaml.j2 @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./plan.yaml diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/talos/app/plan.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/talos/app/plan.yaml.j2 new file mode 100644 index 00000000..88228c88 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/talos/app/plan.yaml.j2 @@ -0,0 +1,93 @@ +--- +apiVersion: upgrade.cattle.io/v1 +kind: Plan +metadata: + name: kubernetes +spec: + version: "${KUBE_VERSION}" + serviceAccountName: system-upgrade + secrets: + - name: talos + path: /var/run/secrets/talos.dev + ignoreUpdates: true + concurrency: 1 + exclusive: true + nodeSelector: + matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + prepare: &prepare + image: "ghcr.io/siderolabs/talosctl:${SYSTEM_VERSION}" + envs: + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + args: + - --nodes=$(NODE_IP) + - health + - --server=false + upgrade: + <<: *prepare + args: + - --nodes=$(NODE_IP) + - upgrade-k8s + - --to=$(SYSTEM_UPGRADE_PLAN_LATEST_VERSION) +--- +apiVersion: upgrade.cattle.io/v1 +kind: Plan +metadata: + name: talos +spec: + version: "${SYSTEM_VERSION}" + serviceAccountName: system-upgrade + secrets: + - name: talos + path: /var/run/secrets/talos.dev + ignoreUpdates: true + concurrency: 1 + cordon: true + drain: + deleteLocalData: true + disableEviction: false + ignoreDaemonSets: true + exclusive: true + nodeSelector: + matchExpressions: + - key: kubernetes.io/os + operator: In + values: ["linux"] + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/controlplane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/etcd + operator: Exists + effect: NoSchedule + prepare: &prepare + image: ghcr.io/siderolabs/talosctl + envs: + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + args: + - --nodes=$(NODE_IP) + - health + - --server=false + upgrade: + <<: *prepare + args: + - --nodes=$(NODE_IP) + - upgrade + - --image=factory.talos.dev/installer/#{ bootstrap_talos.schematic_id }#:$(SYSTEM_UPGRADE_PLAN_LATEST_VERSION) + - --preserve=true + - --wait=false diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/talos/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/talos/ks.yaml.j2 new file mode 100644 index 00000000..7dd32833 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/talos/ks.yaml.j2 @@ -0,0 +1,29 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app system-upgrade-talos + namespace: flux-system +spec: + targetNamespace: system-upgrade + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: system-upgrade-controller + path: ./kubernetes/apps/system-upgrade/talos/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m + postBuild: + substitute: + # renovate: datasource=docker depName=ghcr.io/siderolabs/kubelet + KUBE_VERSION: v1.30.1 + # renovate: datasource=docker depName=ghcr.io/siderolabs/installer + SYSTEM_VERSION: v1.7.2 diff --git a/bootstrap/templates/kubernetes/bootstrap/flux/github-deploy-key.sops.yaml.j2 b/bootstrap/templates/kubernetes/bootstrap/flux/github-deploy-key.sops.yaml.j2 new file mode 100644 index 00000000..0ef1f6e8 --- /dev/null +++ b/bootstrap/templates/kubernetes/bootstrap/flux/github-deploy-key.sops.yaml.j2 @@ -0,0 +1,17 @@ +#% if bootstrap_github_private_key %# +--- +apiVersion: v1 +kind: Secret +metadata: + name: github-deploy-key + namespace: flux-system +stringData: + identity: | + #% filter indent(width=4, first=False) %# + #{ bootstrap_github_private_key }# + #%- endfilter %# + known_hosts: | + github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl + github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg= + github.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk= +#% endif %# diff --git a/bootstrap/templates/kubernetes/bootstrap/flux/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/bootstrap/flux/kustomization.yaml.j2 new file mode 100644 index 00000000..1d9ad47f --- /dev/null +++ b/bootstrap/templates/kubernetes/bootstrap/flux/kustomization.yaml.j2 @@ -0,0 +1,62 @@ +# IMPORTANT: This file is not tracked by flux and should never be. Its +# purpose is to only install the Flux components and CRDs into your cluster. +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - github.com/fluxcd/flux2/manifests/install?ref=v2.3.0 +patches: + # Remove the default network policies + - patch: |- + $patch: delete + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: not-used + target: + group: networking.k8s.io + kind: NetworkPolicy + # Resources renamed to match those installed by oci://ghcr.io/fluxcd/flux-manifests + - target: + kind: ResourceQuota + name: critical-pods + patch: | + - op: replace + path: /metadata/name + value: critical-pods-flux-system + - target: + kind: ClusterRoleBinding + name: cluster-reconciler + patch: | + - op: replace + path: /metadata/name + value: cluster-reconciler-flux-system + - target: + kind: ClusterRoleBinding + name: crd-controller + patch: | + - op: replace + path: /metadata/name + value: crd-controller-flux-system + - target: + kind: ClusterRole + name: crd-controller + patch: | + - op: replace + path: /metadata/name + value: crd-controller-flux-system + - target: + kind: ClusterRole + name: flux-edit + patch: | + - op: replace + path: /metadata/name + value: flux-edit-flux-system + - target: + kind: ClusterRole + name: flux-view + patch: | + - op: replace + path: /metadata/name + value: flux-view-flux-system diff --git a/bootstrap/templates/kubernetes/bootstrap/talos/helmfile.yaml.j2 b/bootstrap/templates/kubernetes/bootstrap/talos/helmfile.yaml.j2 new file mode 100644 index 00000000..f97743bd --- /dev/null +++ b/bootstrap/templates/kubernetes/bootstrap/talos/helmfile.yaml.j2 @@ -0,0 +1,59 @@ +--- +repositories: + - name: cilium + url: https://helm.cilium.io + - name: coredns + url: https://coredns.github.io/helm + - name: postfinance + url: https://postfinance.github.io/kubelet-csr-approver + +helmDefaults: + wait: true + waitForJobs: true + timeout: 600 + recreatePods: true + force: true + +releases: + - name: prometheus-operator-crds + namespace: observability + chart: oci://ghcr.io/prometheus-community/charts/prometheus-operator-crds + version: 11.0.0 + - name: cilium + namespace: kube-system + chart: cilium/cilium + version: 1.15.5 + values: + - ../../apps/kube-system/cilium/app/helm-values.yaml + needs: + - observability/prometheus-operator-crds + - name: coredns + namespace: kube-system + chart: coredns/coredns + version: 1.29.0 + values: + - ../../apps/kube-system/coredns/app/helm-values.yaml + needs: + - observability/prometheus-operator-crds + - kube-system/cilium + - name: kubelet-csr-approver + namespace: kube-system + chart: postfinance/kubelet-csr-approver + version: 1.2.1 + values: + - ../../apps/kube-system/kubelet-csr-approver/app/helm-values.yaml + needs: + - observability/prometheus-operator-crds + - kube-system/cilium + - kube-system/coredns + - name: spegel + namespace: kube-system + chart: oci://ghcr.io/spegel-org/helm-charts/spegel + version: v0.0.22 + values: + - ../../apps/kube-system/spegel/app/helm-values.yaml + needs: + - observability/prometheus-operator-crds + - kube-system/cilium + - kube-system/coredns + - kube-system/kubelet-csr-approver diff --git a/bootstrap/templates/kubernetes/bootstrap/talos/talconfig.yaml.j2 b/bootstrap/templates/kubernetes/bootstrap/talos/talconfig.yaml.j2 new file mode 100644 index 00000000..3dff2814 --- /dev/null +++ b/bootstrap/templates/kubernetes/bootstrap/talos/talconfig.yaml.j2 @@ -0,0 +1,251 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/budimanjojo/talhelper/master/pkg/config/schemas/talconfig.json +--- +# renovate: datasource=docker depName=ghcr.io/siderolabs/installer +talosVersion: v1.7.2 +# renovate: datasource=docker depName=ghcr.io/siderolabs/kubelet +kubernetesVersion: v1.30.1 + +clusterName: "#{ bootstrap_cluster_name | default('home-kubernetes', true) }#" +endpoint: https://#{ bootstrap_controller_vip }#:6443 +clusterPodNets: + - "#{ bootstrap_pod_network.split(',')[0] }#" +clusterSvcNets: + - "#{ bootstrap_service_network.split(',')[0] }#" +additionalApiServerCertSans: &sans + - "#{ bootstrap_controller_vip }#" + - 127.0.0.1 # KubePrism + #% for item in bootstrap_tls_sans %# + - "#{ item }#" + #% endfor %# +additionalMachineCertSans: *sans + +# Disable built-in Flannel to use Cilium +cniConfig: + name: none + +nodes: + #% for item in bootstrap_node_inventory %# + - hostname: "#{ item.name }#" + ipAddress: "#{ item.address }#" + #% if item.disk.startswith('/') %# + installDisk: "#{ item.disk }#" + #% else %# + installDiskSelector: + serial: "#{ item.disk }#" + #% endif %# + #% if bootstrap_secureboot.enabled %# + machineSpec: + secureboot: true + talosImageURL: factory.talos.dev/installer-secureboot/#{ bootstrap_schematic_id }# + #% else %# + talosImageURL: factory.talos.dev/installer/#{ bootstrap_schematic_id }# + #% endif %# + controlPlane: #{ (item.controller) | string | lower }# + networkInterfaces: + - deviceSelector: + hardwareAddr: "#{ item.mac_addr | lower }#" + #% if bootstrap_vlan %# + vlans: + - vlanId: #{ bootstrap_vlan }# + addresses: + - "#{ item.address }#/#{ bootstrap_node_network.split('/') | last }#" + mtu: #{ item.mtu | default(1500) }# + routes: + - network: 0.0.0.0/0 + #% if bootstrap_node_default_gateway %# + gateway: "#{ bootstrap_node_default_gateway }#" + #% else %# + gateway: "#{ bootstrap_node_network | nthhost(1) }#" + #% endif %# + #% if item.controller %# + vip: + ip: "#{ bootstrap_controller_vip }#" + #% endif %# + #% else %# + #% if item.address %# + dhcp: false + addresses: + - "#{ item.address }#/#{ bootstrap_node_network.split('/') | last }#" + routes: + - network: 0.0.0.0/0 + #% if bootstrap_node_default_gateway %# + gateway: "#{ bootstrap_node_default_gateway }#" + #% else %# + gateway: "#{ bootstrap_node_network | nthhost(1) }#" + #% endif %# + #% else %# + dhcp: true + #% endif %# + mtu: #{ item.mtu | default(1500) }# + #% if item.controller %# + vip: + ip: "#{ bootstrap_controller_vip }#" + #% endif %# + #% endif %# + #% if bootstrap_user_patches %# + patches: + - "@./patches/node_#{ item.name }#.yaml" + #% endif %# + #% endfor %# + +patches: + # Configure containerd + - |- + machine: + files: + - op: create + path: /etc/cri/conf.d/20-customization.part + content: |- + [plugins."io.containerd.grpc.v1.cri"] + enable_unprivileged_ports = true + enable_unprivileged_icmp = true + [plugins."io.containerd.grpc.v1.cri".containerd] + discard_unpacked_layers = false + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + discard_unpacked_layers = false + + # Disable search domain everywhere + - |- + machine: + network: + disableSearchDomain: true + + # Enable cluster discovery + - |- + cluster: + discovery: + registries: + kubernetes: + disabled: false + service: + disabled: false + + # Configure kubelet + - |- + machine: + kubelet: + extraArgs: + rotate-server-certificates: true + nodeIP: + validSubnets: + - #{ bootstrap_node_network }# + + #% if bootstrap_dns_servers | length %# + # Force nameserver + - |- + machine: + network: + nameservers: + #% for item in bootstrap_dns_servers %# + - #{ item }# + #% endfor %# + #% endif %# + + #% if bootstrap_ntp_servers | length %# + # Configure NTP + - |- + machine: + time: + disabled: false + servers: + #% for item in bootstrap_ntp_servers %# + - #{ item }# + #% endfor %# + #% endif %# + + # Custom sysctl settings + - |- + machine: + sysctls: + fs.inotify.max_queued_events: "65536" + fs.inotify.max_user_watches: "524288" + fs.inotify.max_user_instances: "8192" + net.core.rmem_max: "2500000" + net.core.wmem_max: "2500000" + + # Mount openebs-hostpath in kubelet + - |- + machine: + kubelet: + extraMounts: + - destination: /var/openebs/local + type: bind + source: /var/openebs/local + options: + - bind + - rshared + - rw + + #% if bootstrap_secureboot.enabled and bootstrap_secureboot.encrypt_disk_with_tpm %# + # Encrypt system disk with TPM + - |- + machine: + systemDiskEncryption: + ephemeral: + provider: luks2 + keys: + - slot: 0 + tpm: {} + state: + provider: luks2 + keys: + - slot: 0 + tpm: {} + #% endif %# + + #% if bootstrap_user_patches %# + # User specified global patches + - "@./patches/global.yaml" + #% endif %# + +controlPlane: + patches: + # Cluster configuration + - |- + cluster: + allowSchedulingOnControlPlanes: true + controllerManager: + extraArgs: + bind-address: 0.0.0.0 + coreDNS: + disabled: true + proxy: + disabled: true + scheduler: + extraArgs: + bind-address: 0.0.0.0 + + # ETCD configuration + - |- + cluster: + etcd: + extraArgs: + listen-metrics-urls: http://0.0.0.0:2381 + advertisedSubnets: + - #{ bootstrap_node_network }# + + # Disable default API server admission plugins. + - |- + - op: remove + path: /cluster/apiServer/admissionControl + + # Enable K8s Talos API Access + - |- + machine: + features: + kubernetesTalosAPIAccess: + enabled: true + allowedRoles: + - os:admin + allowedKubernetesNamespaces: + - system-upgrade + #% if bootstrap_user_patches %# + # User specified controlPlane patches + - "@./patches/controlPlane.yaml" + #% endif %# +#% if ((bootstrap_user_patches) and (bootstrap_node_inventory | selectattr('controller', 'equalto', False) | list | length)) %# +worker: + patches: + # User specified worker patches + - "@./patches/worker.yaml" +#% endif %# diff --git a/bootstrap/templates/kubernetes/flux/apps.yaml.j2 b/bootstrap/templates/kubernetes/flux/apps.yaml.j2 new file mode 100644 index 00000000..6d260916 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/apps.yaml.j2 @@ -0,0 +1,57 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps + namespace: flux-system +spec: + interval: 30m + path: ./kubernetes/apps + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substituteFrom: + - kind: ConfigMap + name: cluster-settings + - kind: Secret + name: cluster-secrets + - kind: ConfigMap + name: cluster-settings-user + optional: true + - kind: Secret + name: cluster-secrets-user + optional: true + patches: + - patch: |- + apiVersion: kustomize.toolkit.fluxcd.io/v1 + kind: Kustomization + metadata: + name: not-used + spec: + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substituteFrom: + - kind: ConfigMap + name: cluster-settings + - kind: Secret + name: cluster-secrets + - kind: ConfigMap + name: cluster-settings-user + optional: true + - kind: Secret + name: cluster-secrets-user + optional: true + target: + group: kustomize.toolkit.fluxcd.io + kind: Kustomization + labelSelector: substitution.flux.home.arpa/disabled notin (true) diff --git a/bootstrap/templates/kubernetes/flux/config/cluster.yaml.j2 b/bootstrap/templates/kubernetes/flux/config/cluster.yaml.j2 new file mode 100644 index 00000000..06057f4f --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/config/cluster.yaml.j2 @@ -0,0 +1,46 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/gitrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: home-kubernetes + namespace: flux-system +spec: + interval: 30m + url: "#{ bootstrap_github_address }#" + #% if bootstrap_github_private_key %# + secretRef: + name: github-deploy-key + #% endif %# + ref: + branch: "#{ bootstrap_github_branch|default('main', true) }#" + ignore: | + # exclude all + /* + # include kubernetes directory + !/kubernetes +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster + namespace: flux-system +spec: + interval: 30m + path: ./kubernetes/flux + prune: true + wait: false + sourceRef: + kind: GitRepository + name: home-kubernetes + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substituteFrom: + - kind: ConfigMap + name: cluster-settings + - kind: Secret + name: cluster-secrets diff --git a/bootstrap/templates/kubernetes/flux/config/flux.yaml.j2 b/bootstrap/templates/kubernetes/flux/config/flux.yaml.j2 new file mode 100644 index 00000000..fb1f3f7f --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/config/flux.yaml.j2 @@ -0,0 +1,88 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/ocirepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: flux-manifests + namespace: flux-system +spec: + interval: 10m + url: oci://ghcr.io/fluxcd/flux-manifests + ref: + tag: v2.3.0 +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: flux + namespace: flux-system +spec: + interval: 10m + path: ./ + prune: true + wait: true + sourceRef: + kind: OCIRepository + name: flux-manifests + patches: + # Remove the network policies + - patch: | + $patch: delete + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: not-used + target: + group: networking.k8s.io + kind: NetworkPolicy + # Increase the number of reconciliations that can be performed in parallel and bump the resources limits + # https://fluxcd.io/flux/cheatsheets/bootstrap/#increase-the-number-of-workers + - patch: | + - op: add + path: /spec/template/spec/containers/0/args/- + value: --concurrent=8 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --kube-api-qps=500 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --kube-api-burst=1000 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --requeue-dependency=5s + target: + kind: Deployment + name: (kustomize-controller|helm-controller|source-controller) + - patch: | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: not-used + spec: + template: + spec: + containers: + - name: manager + resources: + limits: + cpu: 2000m + memory: 2Gi + target: + kind: Deployment + name: (kustomize-controller|helm-controller|source-controller) + # Enable Helm near OOM detection + # https://fluxcd.io/flux/cheatsheets/bootstrap/#enable-helm-near-oom-detection + - patch: | + - op: add + path: /spec/template/spec/containers/0/args/- + value: --feature-gates=OOMWatch=true + - op: add + path: /spec/template/spec/containers/0/args/- + value: --oom-watch-memory-threshold=95 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --oom-watch-interval=500ms + target: + kind: Deployment + name: helm-controller diff --git a/bootstrap/templates/kubernetes/flux/config/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/flux/config/kustomization.yaml.j2 new file mode 100644 index 00000000..2ff3c784 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/config/kustomization.yaml.j2 @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./flux.yaml + - ./cluster.yaml diff --git a/bootstrap/templates/kubernetes/flux/repositories/git/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/git/kustomization.yaml.j2 new file mode 100644 index 00000000..8fb7c142 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/git/kustomization.yaml.j2 @@ -0,0 +1,5 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: [] diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/bjw-s.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/bjw-s.yaml.j2 new file mode 100644 index 00000000..c32ccd8d --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/bjw-s.yaml.j2 @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: bjw-s + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/bjw-s/helm diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/cilium.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/cilium.yaml.j2 new file mode 100644 index 00000000..d6736ba4 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/cilium.yaml.j2 @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: cilium + namespace: flux-system +spec: + interval: 1h + url: https://helm.cilium.io diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/coredns.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/coredns.yaml.j2 new file mode 100644 index 00000000..bf97567c --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/coredns.yaml.j2 @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: coredns + namespace: flux-system +spec: + interval: 1h + url: https://coredns.github.io/helm diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/external-dns.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/external-dns.yaml.j2 new file mode 100644 index 00000000..725cf4dd --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/external-dns.yaml.j2 @@ -0,0 +1,12 @@ +#% if bootstrap_cloudflare.enabled %# +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: external-dns + namespace: flux-system +spec: + interval: 1h + url: https://kubernetes-sigs.github.io/external-dns +#% endif %# diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/ingress-nginx.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/ingress-nginx.yaml.j2 new file mode 100644 index 00000000..da3f6023 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/ingress-nginx.yaml.j2 @@ -0,0 +1,12 @@ +#% if bootstrap_cloudflare.enabled %# +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: ingress-nginx + namespace: flux-system +spec: + interval: 1h + url: https://kubernetes.github.io/ingress-nginx +#% endif %# diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/jetstack.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/jetstack.yaml.j2 new file mode 100644 index 00000000..654e0e58 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/jetstack.yaml.j2 @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: jetstack + namespace: flux-system +spec: + interval: 1h + url: https://charts.jetstack.io diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/k8s-gateway.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/k8s-gateway.yaml.j2 new file mode 100644 index 00000000..b043b89a --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/k8s-gateway.yaml.j2 @@ -0,0 +1,12 @@ +#% if bootstrap_cloudflare.enabled %# +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: k8s-gateway + namespace: flux-system +spec: + interval: 1h + url: https://ori-edge.github.io/k8s_gateway +#% endif %# diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/kustomization.yaml.j2 new file mode 100644 index 00000000..f4b825b5 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/kustomization.yaml.j2 @@ -0,0 +1,19 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./bjw-s.yaml + - ./cilium.yaml + - ./coredns.yaml + #% if bootstrap_cloudflare.enabled %# + - ./external-dns.yaml + - ./ingress-nginx.yaml + - ./k8s-gateway.yaml + #% endif %# + - ./jetstack.yaml + - ./metrics-server.yaml + - ./postfinance.yaml + - ./prometheus-community.yaml + - ./spegel.yaml + - ./stakater.yaml diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/metrics-server.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/metrics-server.yaml.j2 new file mode 100644 index 00000000..1d93ab19 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/metrics-server.yaml.j2 @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: metrics-server + namespace: flux-system +spec: + interval: 1h + url: https://kubernetes-sigs.github.io/metrics-server diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/postfinance.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/postfinance.yaml.j2 new file mode 100644 index 00000000..cd629ceb --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/postfinance.yaml.j2 @@ -0,0 +1,12 @@ +#% if bootstrap_distribution in ["talos"] %# +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: postfinance + namespace: flux-system +spec: + interval: 1h + url: https://postfinance.github.io/kubelet-csr-approver +#% endif %# diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/prometheus-community.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/prometheus-community.yaml.j2 new file mode 100644 index 00000000..8a127039 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/prometheus-community.yaml.j2 @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: prometheus-community + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/prometheus-community/charts diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/spegel.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/spegel.yaml.j2 new file mode 100644 index 00000000..6ccbe8f6 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/spegel.yaml.j2 @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: spegel + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/spegel-org/helm-charts diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/stakater.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/stakater.yaml.j2 new file mode 100644 index 00000000..017f4ca7 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/stakater.yaml.j2 @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: stakater + namespace: flux-system +spec: + interval: 1h + url: https://stakater.github.io/stakater-charts diff --git a/bootstrap/templates/kubernetes/flux/repositories/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/kustomization.yaml.j2 new file mode 100644 index 00000000..ae7e0ad4 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/kustomization.yaml.j2 @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./git + - ./helm + - ./oci diff --git a/bootstrap/templates/kubernetes/flux/repositories/oci/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/oci/kustomization.yaml.j2 new file mode 100644 index 00000000..8fb7c142 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/oci/kustomization.yaml.j2 @@ -0,0 +1,5 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: [] diff --git a/bootstrap/templates/kubernetes/flux/vars/cluster-secrets.sops.yaml.j2 b/bootstrap/templates/kubernetes/flux/vars/cluster-secrets.sops.yaml.j2 new file mode 100644 index 00000000..bdcb9f57 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/vars/cluster-secrets.sops.yaml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: cluster-secrets + namespace: flux-system +stringData: + #% if bootstrap_cloudflare.enabled %# + SECRET_DOMAIN: "#{ bootstrap_cloudflare.domain }#" + SECRET_ACME_EMAIL: "#{ bootstrap_cloudflare.acme.email }#" + SECRET_CLOUDFLARE_TUNNEL_ID: "#{ bootstrap_cloudflare.tunnel.id }#" + #% endif %# diff --git a/bootstrap/templates/kubernetes/flux/vars/cluster-settings.yaml.j2 b/bootstrap/templates/kubernetes/flux/vars/cluster-settings.yaml.j2 new file mode 100644 index 00000000..f176c7f5 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/vars/cluster-settings.yaml.j2 @@ -0,0 +1,16 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cluster-settings + namespace: flux-system +data: + TIMEZONE: "#{ bootstrap_timezone }#" + CLUSTER_CIDR: "#{ bootstrap_pod_network.split(',')[0] }#" + NODE_CIDR: "#{ bootstrap_node_network }#" + #% if bootstrap_feature_gates.dual_stack_ipv4_first %# + CLUSTER_CIDR_V6: "#{ bootstrap_pod_network.split(',')[1] }#" + #% endif %# + #% if bootstrap_bgp.enabled %# + BGP_ADVERTISED_CIDR: "#{ bootstrap_bgp.advertised_network }#" + #% endif %# diff --git a/bootstrap/templates/kubernetes/flux/vars/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/flux/vars/kustomization.yaml.j2 new file mode 100644 index 00000000..9ea91972 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/vars/kustomization.yaml.j2 @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./cluster-settings.yaml + - ./cluster-secrets.sops.yaml diff --git a/config.sample.yaml b/config.sample.yaml new file mode 100644 index 00000000..ec3b4c7d --- /dev/null +++ b/config.sample.yaml @@ -0,0 +1,207 @@ +--- + +# +# 1. (Required) Cluster details - Cluster represents the Kubernetes cluster layer and any additional customizations +# + +# (Required) Timezone is your IANA formatted timezone (e.g. America/New_York) +bootstrap_timezone: "" + +# (Required) Distribution can either be k3s or talos +bootstrap_distribution: k3s + +# (Required: Talos) Talos Specific Options +bootstrap_talos: + # (Required: Talos) If you need any additional System Extensions, and/or add kernel arguments generate a schematic ID. + # Go to https://factory.talos.dev/ and choose the System Extensions, and/or add kernel arguments. + schematic_id: "" + # (Optional: Talos) Add vlan tag to network master device + # See: https://www.talos.dev/latest/advanced/advanced-networking/#vlans + vlan: "" + # (Optional: Talos) Secureboot and TPM-based disk encryption + secureboot: + # (Optional) Enable secureboot on UEFI systems. Not supported on x86 platforms in BIOS mode. + # See: https://www.talos.dev/latest/talos-guides/install/bare-metal-platforms/secureboot + enabled: false + # (Optional) Enable TPM-based disk encryption. Requires TPM 2.0 + # See: https://www.talos.dev/v1.6/talos-guides/install/bare-metal-platforms/secureboot/#disk-encryption-with-tpm + encrypt_disk_with_tpm: false + # (Optional) Add includes for user provided patches to generated talconfig.yaml. + # See: https://github.com/budimanjojo/talhelper/blob/179ba9ed42f70069c7842109bea24f769f7af6eb/example/extraKernelArgs-patch.yaml + # Patches are applied in this order. (global overrides cp/worker which overrides node-specific). + # Create these files to allow talos:bootstrap-genconfig to complete (empty files are ok). + # kubernetes/bootstrap/talos/patches/node_.yaml # Patches for individual nodes + # kubernetes/bootstrap/talos/patches/controlPlane.yaml # Patches for controlplane nodes + # kubernetes/bootstrap/talos/patches/worker.yaml # Patches for worker nodes + # kubernetes/bootstrap/talos/patches/global.yaml # Patches for ALL nodes + user_patches: false + +# (Required) The CIDR your nodes are on (e.g. 192.168.1.0/24) +bootstrap_node_network: "" + +# (Optional) The default gateway for the nodes +# Default is .1 derrived from bootstrap_node_network: 'x.x.x.1' +bootstrap_node_default_gateway: "" + +# (Required) Use only 1, 3 or more ODD number of controller nodes, recommended is 3 +# Worker nodes are optional +bootstrap_node_inventory: [] + # - name: "" # Name of the node (must match [a-z0-9-\.]+) + # address: "" # IP address of the node + # controller: true # (Required) Set to true if this is a controller node + # ssh_user: "" # (Required: k3s) SSH username of the node + # talos_disk: "" # (Required: Talos) Device path or serial number of the disk for this node + # ... + +# (Optional) The DNS server to use for the cluster, this can be an existing +# local DNS server or a public one. +# Default is ["1.1.1.1", "1.0.0.1"] +# If using a local DNS server make sure it meets the following requirements: +# 1. your nodes can reach it +# 2. it is configured to forward requests to a public DNS server +# 3. you are not force redirecting DNS requests to it - this will break cert generation over DNS01 +# If using multiple DNS servers make sure they are setup the same way, there is no +# guarantee that the first DNS server will always be used for every lookup. +bootstrap_dns_servers: [] + +# (Optional) The DNS search domain to use for the nodes. +# Default is "." +# Use the default or leave empty to avoid possible DNS issues inside the cluster. +bootstrap_search_domain: "" + +# (Required) The pod CIDR for the cluster, this must NOT overlap with any +# existing networks and is usually a /16 (64K IPs). +# If you want to use IPv6 check the advanced flags below +bootstrap_pod_network: "10.69.0.0/16" + +# (Required) The service CIDR for the cluster, this must NOT overlap with any +# existing networks and is usually a /16 (64K IPs). +# If you want to use IPv6 check the advanced flags below +bootstrap_service_network: "10.96.0.0/16" + +# (Required) The IP address of the Kube API, choose an available IP in +# your nodes host network that is NOT being used. This is announced over L2. +# For k3s kube-vip is used, built-in functionality is used with Talos +bootstrap_controllers_vip: "" + +# (Optional) Add additional SANs to the Kube API cert, this is useful +# if you want to call the Kube API by hostname rather than IP +bootstrap_tls_sans: [] + +# (Required) Age Public Key (e.g. age1...) +# 1. Generate a new key with the following command: +# > task sops:age-keygen +# 2. Copy the public key and paste it below +bootstrap_sops_age_pubkey: "" + +# (Optional) Use cilium BGP control plane when L2 announcements won't traverse VLAN network segments. +# Needs a BGP capable router setup with the node IPs as peers. +# See: https://docs.cilium.io/en/latest/network/bgp-control-plane/ +bootstrap_bgp: + enabled: false + # (Optional) If using multiple BGP peers add them here. + # Default is .1 derrived from host_network: ['x.x.x.1'] + peers: [] + # (Required) Set the BGP Autonomous System Number for the router(s) and nodes. + # If these match, iBGP will be used. If not, eBGP will be used. + peer_asn: "" # Router(s) AS + local_asn: "" # Node(s) AS + # (Required) The advertised CIDR for the cluster, this must NOT overlap with any + # existing networks and is usually a /16 (64K IPs). + # If you want to use IPv6 check the advanced flags below + advertised_network: "" + +# +# 2. (Required) Flux details - Flux is used to manage the cluster configuration. +# + +# (Required) GitHub repository URL (for private repos use the ssh:// URL) +bootstrap_github_address: "" + +# (Required) GitHub repository branch +bootstrap_github_branch: "main" + +# (Required) Token for GitHub push-based sync +# 1. Generate a new token with the following command: +# > openssl rand -hex 16 +# 2. Copy the token and paste it below +bootstrap_github_webhook_token: "" + +# (Optional) Private key for Flux to access the GitHub repository +# 1. Generate a new key with the following command: +# > ssh-keygen -t ecdsa -b 521 -C "github-deploy-key" -f github-deploy.key -q -P "" +# 2. Make sure to paste public key from "github-deploy.key.pub" into +# the deploy keys section of your repository settings. +# 3. Uncomment and paste the private key below +# 4. Optionally set your repository on GitHub to private +# bootstrap_github_private_key: | +# -----BEGIN OPENSSH PRIVATE KEY----- +# ... +# -----END OPENSSH PRIVATE KEY----- + +# +# 3. (Optional) Cloudflare details - Cloudflare is used for DNS, TLS certificates and tunneling. +# + +bootstrap_cloudflare: + # (Required) Disable to use a different DNS provider + enabled: false + # (Required) Cloudflare Domain + domain: "" + # (Required) Cloudflare API Token (NOT API Key) + # 1. Head over to Cloudflare and create a API Token by going to + # https://dash.cloudflare.com/profile/api-tokens + # 2. Under the `API Tokens` section click the blue `Create Token` button. + # 3. Click the blue `Use template` button for the `Edit zone DNS` template. + # 4. Name your token something like `home-kubernetes` + # 5. Under `Permissions`, click `+ Add More` and add each permission below: + # `Zone - DNS - Edit` + # `Account - Cloudflare Tunnel - Read` + # 6. Limit the permissions to a specific account and zone resources. + # 7. Click the blue `Continue to Summary` button and then the blue `Create Token` button. + # 8. Copy the token and paste it below. + token: "" + # (Required) Optionals for Cloudflare Acme + acme: + # (Required) Any email you want to be associated with the ACME account (used for TLS certs via letsencrypt.org) + email: "" + # (Required) Use the ACME production server when requesting the wildcard certificate. + # By default the ACME staging server is used. This is to prevent being rate-limited. + # Update this option to `true` when you have verified the staging certificate + # works and then re-run `task configure` and push your changes to Github. + production: false + # (Required) Provide LAN access to the cluster ingresses for internal ingress classes + # The Load balancer IP for internal ingress, choose an available IP + # in your nodes host network that is NOT being used. This is announced over L2. + ingress_vip: "" + # (Required) Gateway is used for providing DNS to your cluster on LAN + # The Load balancer IP for k8s_gateway, choose an available IP + # in your nodes host network that is NOT being used. This is announced over L2. + gateway_vip: "" + # (Required) Options for Cloudflare Tunnel + # 1. Authenticate cloudflared to your domain + # > cloudflared tunnel login + # 2. Create the tunnel + # > cloudflared tunnel create k8s + # 3. Copy the AccountTag, TunnelID, and TunnelSecret from the tunnel configuration file and paste them below + tunnel: + # (Required) Cloudflare Account ID (cat ~/.cloudflared/*.json | jq -r .AccountTag) + account_id: "" + # (Required) Cloudflared Tunnel ID (cat ~/.cloudflared/*.json | jq -r .TunnelID) + id: "" + # (Required) Cloudflared Tunnel Secret (cat ~/.cloudflared/*.json | jq -r .TunnelSecret) + secret: "" + # (Required) Provide WAN access to the cluster ingresses for external ingress classes + # The Load balancer IP for external ingress, choose an available IP + # in your nodes host network that is NOT being used. This is announced over L2. + ingress_vip: "" + +# (Optional) Feature gates are used to enable experimental features +# bootstrap_feature_gates: +# # Enable Dual Stack IPv4 first +# # IMPORTANT: I am looking for people to help maintain IPv6 support since I cannot test it. +# # Ref: https://github.com/onedr0p/cluster-template/issues/1148 +# # Keep in mind that Cilium does not currently support IPv6 L2 announcements. +# # Make sure you set cluster.pod_cidr and cluster.service_cidr +# # to a valid dual stack CIDRs, e.g. "10.42.0.0/16,fd00:10:244::/64" +# dual_stack_ipv4_first: false diff --git a/docs/assets/logo.png b/docs/assets/logo.png new file mode 100644 index 00000000..05162e5d Binary files /dev/null and b/docs/assets/logo.png differ diff --git a/docs/assets/rack.jpg b/docs/assets/rack.jpg new file mode 100644 index 00000000..88565c84 Binary files /dev/null and b/docs/assets/rack.jpg differ diff --git a/docs/set-up.md b/docs/set-up.md new file mode 100644 index 00000000..202fec00 --- /dev/null +++ b/docs/set-up.md @@ -0,0 +1,46 @@ +# Set up guide + +## Install and configure Proxmox + +1. Download official image from an official Proxmox [site](https://www.proxmox.com/en/downloads/proxmox-virtual-environment/iso) +2. Flush image and install it to the machines. During installation specify and write down static ip address that will be +used by the machine. +3. Disable subscription repositories. Go to Repositories setting menu and disable all components marked as `enterprise` and +`pve-enterprise` +4. ssh to the node and run `apt get update` following by `apt get upgrade` +5. Go to Network, select Linux Bridge and check `VLAN aware checkox` in order to be able to assign virtual machines to a +different VLANs. +6. Set up a simple proxmox cluster using menu wizard. No need to make it HA since kubernetes will handle the HA. + +### Set up GPU passthrough +3. Edit `/etc/default/grub` with the following changes: + ``` + GRUB_CMDLINE_LINUX_DEFAULT="quiet intel_iommu=on" + ``` +4. Run `update-grub` and reboot the node +5. Verify that IOMMU is enabled +``` +dmesg | grep -e DMAR -e IOMMU +``` +There should be a line that looks like `DMAR: IOMMU enabled` +6. For any troubleshouting check out [this guide](https://3os.org/infrastructure/proxmox/gpu-passthrough/igpu-passthrough-to-vm/#proxmox-configuration-for-igpu-full-passthrough) + +## Create and install Talos images +1. Head over to https://factory.talos.dev and follow the instructions which will eventually lead you to download a Talos +Linux iso file. Make sure to note the schematic ID you will need this later on. Add following extensions + - siderolabs/iscsi-tools -- for longhorn + - siderolabs/util-linux-tools -- for longhorn + - siderolabs/qemu-guest-agent -- for being able to manage VM from a proxmox UI +2. Create VM with following configuration: + - Startup on boot + - Bios: SeaBios + - Machine: q35 + - Memory: baloon disabled + - CPU: type host, cpu units 1024 + - Network: vlan 20, firewall disabled, mac address one of the following: BC:24:11:B5:DD:1F, BC:24:11:0C:FD:22, BC:24:11:A8:19:33 +3. Add PCI device `Inter HD Graphics` + +## Bootstrap kubernetes claster +``` +task talos:bootstrap +``` diff --git a/flake.lock b/flake.lock new file mode 100644 index 00000000..bb05c0bf --- /dev/null +++ b/flake.lock @@ -0,0 +1,133 @@ +{ + "nodes": { + "flake-parts": { + "inputs": { + "nixpkgs-lib": "nixpkgs-lib" + }, + "locked": { + "lastModified": 1712014858, + "narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "9126214d0a59633752a136528f5f3b9aa8565b7d", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1716358718, + "narHash": "sha256-NQbegJb2ZZnAqp2EJhWwTf6DrZXSpA6xZCEq+RGV1r0=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "3f316d2a50699a78afe5e77ca486ad553169061e", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-lib": { + "locked": { + "dir": "lib", + "lastModified": 1711703276, + "narHash": "sha256-iMUFArF0WCatKK6RzfUJknjem0H9m4KgorO/p3Dopkk=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "d8fe5e6c92d0d190646fb9f1056741a229980089", + "type": "github" + }, + "original": { + "dir": "lib", + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1713537308, + "narHash": "sha256-XtTSSIB2DA6tOv+l0FhvfDMiyCmhoRbNB+0SeInZkbk=", + "owner": "nixos", + "repo": "nixpkgs", + "rev": "5c24cf2f0a12ad855f444c30b2421d044120c66f", + "type": "github" + }, + "original": { + "owner": "nixos", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "flake-utils": "flake-utils", + "nixpkgs": "nixpkgs", + "talhelper": "talhelper" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + "talhelper": { + "inputs": { + "flake-parts": "flake-parts", + "nixpkgs": "nixpkgs_2" + }, + "locked": { + "lastModified": 1716432544, + "narHash": "sha256-GC8DjBa2yhiJF2LBrnECZ/JExR+5bXF7HUYSDlEjkG0=", + "owner": "budimanjojo", + "repo": "talhelper", + "rev": "176906fc96fbbd671b92575b4245bfbbccd9ebde", + "type": "github" + }, + "original": { + "owner": "budimanjojo", + "repo": "talhelper", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 00000000..3aff929d --- /dev/null +++ b/flake.nix @@ -0,0 +1,32 @@ +{ + description = "A basic flake with a shell"; + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; + talhelper.url = "github:budimanjojo/talhelper"; + flake-utils.url = "github:numtide/flake-utils"; + }; + + outputs = { nixpkgs, talhelper, flake-utils, ... }: + flake-utils.lib.eachDefaultSystem (system: + let + pkgs = nixpkgs.legacyPackages.${system}; + in + { + devShells.default = pkgs.mkShell { + packages = [ + pkgs.age + pkgs.cloudflared + pkgs.helmfile + pkgs.jq + pkgs.kubeconform + pkgs.kustomize + pkgs.moreutils + pkgs.sops + pkgs.stern + pkgs.yq-go + pkgs.envsubst + talhelper.packages.${system}.default + ]; + }; + }); +} diff --git a/kubernetes/apps/actions-runner-system/alert.yaml b/kubernetes/apps/actions-runner-system/alert.yaml new file mode 100644 index 00000000..019ef83b --- /dev/null +++ b/kubernetes/apps/actions-runner-system/alert.yaml @@ -0,0 +1,29 @@ +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: actions-runner-system +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: actions-runner-system +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/apps/actions-runner-system/gha-runner-scale-set-controller/app/externalsecret.yaml b/kubernetes/apps/actions-runner-system/gha-runner-scale-set-controller/app/externalsecret.yaml new file mode 100644 index 00000000..875e9f14 --- /dev/null +++ b/kubernetes/apps/actions-runner-system/gha-runner-scale-set-controller/app/externalsecret.yaml @@ -0,0 +1,24 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: actions-runner-controller-auth +spec: + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + target: + name: actions-runner-controller-auth-secret + template: + engineVersion: v2 + data: + ACTION_RUNNER_CONTROLLER_GITHUB_APP_ID: |- + {{ .ACTION_RUNNER_CONTROLLER_GITHUB_APP_ID }} + ACTION_RUNNER_CONTROLLER_GITHUB_INSTALLATION_ID: |- + {{ .ACTION_RUNNER_CONTROLLER_GITHUB_INSTALLATION_ID }} + ACTION_RUNNER_CONTROLLER_GITHUB_PRIVATE_KEY: |- + {{ .ACTION_RUNNER_CONTROLLER_GITHUB_PRIVATE_KEY }} + dataFrom: + - extract: + key: actions-runner-controller diff --git a/kubernetes/apps/actions-runner-system/gha-runner-scale-set-controller/app/helmrelease.yaml b/kubernetes/apps/actions-runner-system/gha-runner-scale-set-controller/app/helmrelease.yaml new file mode 100644 index 00000000..fc71a43a --- /dev/null +++ b/kubernetes/apps/actions-runner-system/gha-runner-scale-set-controller/app/helmrelease.yaml @@ -0,0 +1,28 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: gha-runner-scale-set-controller +spec: + interval: 30m + chart: + spec: + chart: gha-runner-scale-set-controller + version: 0.9.1 + sourceRef: + kind: HelmRepository + name: actions-runner-controller + namespace: flux-system + install: + crds: CreateReplace + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + crds: CreateReplace + remediation: + strategy: rollback + retries: 3 + values: + fullnameOverride: gha-runner-scale-set-controller diff --git a/kubernetes/apps/actions-runner-system/gha-runner-scale-set-controller/app/kustomization.yaml b/kubernetes/apps/actions-runner-system/gha-runner-scale-set-controller/app/kustomization.yaml new file mode 100644 index 00000000..4eed917b --- /dev/null +++ b/kubernetes/apps/actions-runner-system/gha-runner-scale-set-controller/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml diff --git a/kubernetes/apps/actions-runner-system/gha-runner-scale-set-controller/ks.yaml b/kubernetes/apps/actions-runner-system/gha-runner-scale-set-controller/ks.yaml new file mode 100644 index 00000000..823539ea --- /dev/null +++ b/kubernetes/apps/actions-runner-system/gha-runner-scale-set-controller/ks.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app gha-runner-scale-set-controller + namespace: flux-system +spec: + targetNamespace: actions-runner-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/apps/actions-runner-system/gha-runner-scale-set-controller/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/actions-runner-system/gha-runner-scale-set/app/helmrelease.yaml b/kubernetes/apps/actions-runner-system/gha-runner-scale-set/app/helmrelease.yaml new file mode 100644 index 00000000..a0498efc --- /dev/null +++ b/kubernetes/apps/actions-runner-system/gha-runner-scale-set/app/helmrelease.yaml @@ -0,0 +1,49 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: gha-runner-scale-set +spec: + interval: 30m + chart: + spec: + chart: gha-runner-scale-set + version: 0.9.1 + sourceRef: + kind: HelmRepository + name: actions-runner-controller + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: gha-runner-scale-set-controller + namespace: actions-runner-system + valuesFrom: + - targetPath: githubConfigSecret.github_app_id + kind: Secret + name: actions-runner-controller-auth-secret + valuesKey: ACTION_RUNNER_CONTROLLER_GITHUB_APP_ID + - targetPath: githubConfigSecret.github_app_installation_id + kind: Secret + name: actions-runner-controller-auth-secret + valuesKey: ACTION_RUNNER_CONTROLLER_GITHUB_INSTALLATION_ID + - targetPath: githubConfigSecret.github_app_private_key + kind: Secret + name: actions-runner-controller-auth-secret + valuesKey: ACTION_RUNNER_CONTROLLER_GITHUB_PRIVATE_KEY + values: + nameOverride: gha-runner-scale-set + runnerScaleSetName: gha-runner-scale-set + githubConfigUrl: https://github.com/MaksimShakavin/flux-homelab + minRunners: 1 + maxRunners: 6 + controllerServiceAccount: + name: gha-runner-scale-set-controller + namespace: actions-runner-system diff --git a/kubernetes/apps/actions-runner-system/gha-runner-scale-set/app/kustomization.yaml b/kubernetes/apps/actions-runner-system/gha-runner-scale-set/app/kustomization.yaml new file mode 100644 index 00000000..17cbc72b --- /dev/null +++ b/kubernetes/apps/actions-runner-system/gha-runner-scale-set/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/actions-runner-system/gha-runner-scale-set/ks.yaml b/kubernetes/apps/actions-runner-system/gha-runner-scale-set/ks.yaml new file mode 100644 index 00000000..93848568 --- /dev/null +++ b/kubernetes/apps/actions-runner-system/gha-runner-scale-set/ks.yaml @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app gha-runner-scale-set + namespace: flux-system +spec: + targetNamespace: actions-runner-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/actions-runner-system/gha-runner-scale-set/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/actions-runner-system/kustomization.yaml b/kubernetes/apps/actions-runner-system/kustomization.yaml new file mode 100644 index 00000000..f9498723 --- /dev/null +++ b/kubernetes/apps/actions-runner-system/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./alert.yaml + - ./gha-runner-scale-set-controller/ks.yaml + - ./gha-runner-scale-set/ks.yaml diff --git a/kubernetes/apps/actions-runner-system/namespace.yaml b/kubernetes/apps/actions-runner-system/namespace.yaml new file mode 100644 index 00000000..d6512dfb --- /dev/null +++ b/kubernetes/apps/actions-runner-system/namespace.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: actions-runner-system + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled + volsync.backube/privileged-movers: "true" diff --git a/kubernetes/apps/cert-manager/alert.yaml b/kubernetes/apps/cert-manager/alert.yaml new file mode 100644 index 00000000..e1f2257d --- /dev/null +++ b/kubernetes/apps/cert-manager/alert.yaml @@ -0,0 +1,29 @@ +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: cert-manager +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: cert-manager +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml b/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml new file mode 100644 index 00000000..61749e65 --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml @@ -0,0 +1,36 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: cert-manager +spec: + interval: 30m + chart: + spec: + chart: cert-manager + version: v1.14.5 + sourceRef: + kind: HelmRepository + name: jetstack + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + installCRDs: true + dns01RecursiveNameservers: 1.1.1.1:53,9.9.9.9:53 + dns01RecursiveNameserversOnly: true + podDnsPolicy: None + podDnsConfig: + nameservers: + - "1.1.1.1" + - "9.9.9.9" + prometheus: + enabled: true + servicemonitor: + enabled: true diff --git a/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml b/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml new file mode 100644 index 00000000..5e098843 --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml + - ./prometheusrule.yaml diff --git a/kubernetes/apps/cert-manager/cert-manager/app/prometheusrule.yaml b/kubernetes/apps/cert-manager/cert-manager/app/prometheusrule.yaml new file mode 100644 index 00000000..ae08bb14 --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/app/prometheusrule.yaml @@ -0,0 +1,68 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/monitoring.coreos.com/prometheusrule_v1.json +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: cert-manager-rules +spec: + groups: + - name: cert-manager + rules: + - alert: CertManagerAbsent + expr: | + absent(up{job="cert-manager"}) + for: 15m + labels: + severity: critical + annotations: + description: + "New certificates will not be able to be minted, and existing + ones can't be renewed until cert-manager is back." + runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerabsent + summary: "Cert Manager has dissapeared from Prometheus service discovery." + - name: certificates + rules: + - alert: CertManagerCertExpirySoon + expr: | + avg by (exported_namespace, namespace, name) ( + certmanager_certificate_expiration_timestamp_seconds - time()) + < (21 * 24 * 3600) + for: 15m + labels: + severity: warning + annotations: + description: + "The domain that this cert covers will be unavailable after + {{ $value | humanizeDuration }}. Clients using endpoints that this cert + protects will start to fail in {{ $value | humanizeDuration }}." + runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertexpirysoon + summary: + "The cert {{ $labels.name }} is {{ $value | humanizeDuration }} + from expiry, it should have renewed over a week ago." + - alert: CertManagerCertNotReady + expr: | + max by (name, exported_namespace, namespace, condition) ( + certmanager_certificate_ready_status{condition!="True"} == 1) + for: 15m + labels: + severity: critical + annotations: + description: + "This certificate has not been ready to serve traffic for at least + 15m. If the cert is being renewed or there is another valid cert, the ingress + controller _may_ be able to serve that instead." + runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertnotready + summary: "The cert {{ $labels.name }} is not ready to serve traffic." + - alert: CertManagerHittingRateLimits + expr: | + sum by (host) (rate(certmanager_http_acme_client_request_count{status="429"}[5m])) + > 0 + for: 15m + labels: + severity: critical + annotations: + description: + "Depending on the rate limit, cert-manager may be unable to generate + certificates for up to a week." + runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerhittingratelimits + summary: "Cert manager hitting LetsEncrypt rate limits." diff --git a/kubernetes/apps/cert-manager/cert-manager/issuers/issuers.yaml b/kubernetes/apps/cert-manager/cert-manager/issuers/issuers.yaml new file mode 100644 index 00000000..1cf7148a --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/issuers/issuers.yaml @@ -0,0 +1,39 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-production +spec: + acme: + server: https://acme-v02.api.letsencrypt.org/directory + email: "${SECRET_ACME_EMAIL}" + privateKeySecretRef: + name: letsencrypt-production + solvers: + - dns01: + cloudflare: + apiTokenSecretRef: + name: cert-manager-secret + key: api-token + selector: + dnsZones: + - "${SECRET_DOMAIN}" +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-staging +spec: + acme: + server: https://acme-staging-v02.api.letsencrypt.org/directory + email: "${SECRET_ACME_EMAIL}" + privateKeySecretRef: + name: letsencrypt-staging + solvers: + - dns01: + cloudflare: + apiTokenSecretRef: + name: cert-manager-secret + key: api-token + selector: + dnsZones: + - "${SECRET_DOMAIN}" diff --git a/kubernetes/apps/cert-manager/cert-manager/issuers/kustomization.yaml b/kubernetes/apps/cert-manager/cert-manager/issuers/kustomization.yaml new file mode 100644 index 00000000..fd43d965 --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/issuers/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./secret.sops.yaml + - ./issuers.yaml diff --git a/kubernetes/apps/cert-manager/cert-manager/issuers/secret.sops.yaml b/kubernetes/apps/cert-manager/cert-manager/issuers/secret.sops.yaml new file mode 100644 index 00000000..eb1a98c9 --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/issuers/secret.sops.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Secret +metadata: + name: cert-manager-secret +stringData: + api-token: ENC[AES256_GCM,data:V/OeW+bpuNGXDAiNZ2WmawliZ8JakYzZvSqNhuLRCif3e1nXDXXL+Q==,iv:yq3rE8ZsK2ih6FMNtFRvak7xNNTTB/VCz0+Mp8CiJ5M=,tag:2eY19fzMjg99TAlbC44ntw==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1k5xl02aujw4rsgghnnd0sdymmwd095w5nqgjvf76warwrdc0uqpqsm2x8m + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBnSm52UU95ZVJMUE52cjc3 + THplYTFpbFd4ZDJSV2RIaDNLWVRxZFd6TEVzCi9OSmIvYUhvVUhHQldoalJzMEpX + dXNPV3AreUowSHBBY1NYUlh5b24wZDAKLS0tIHVwVkViazFmVmhqQjBqNkJiVlN4 + VGFML3ZMZzk4WlF3NjJ6SXpobzJPMlEKheLxsJRKPxsPwGOKZ8kb5viGJ07RT9eq + id87ugUEST/+c5l0YE4Q5DDRpikoiT3uoDS7X+PfIGHgQWiQUq4uNQ== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2024-02-17T21:45:25Z" + mac: ENC[AES256_GCM,data:bKwwkxj+C5/dPsKsiFi599+d31RpAbcQQ5HugHBNIANGT0nwmYx9Cj8gDGcAeY4OBs9fWzZ2uHVW9ZbgrzyOdsSH2VdurPvOruJZ2kuWZ1BYZm1pbsFXRWhuWxaaJLTK9mP4YlOEQ76uYVMaaXORS7Pt4AHmliDReOyGJF4X+lI=,iv:SVsKQ7xOkLcODOWe7A/IFoQpIMB4Cbb7p8P4st3lZjo=,tag:Wq6M1In3czCDwUXsFBHMGQ==,type:str] + pgp: [] + encrypted_regex: ^(data|stringData)$ + version: 3.7.3 diff --git a/kubernetes/apps/cert-manager/cert-manager/ks.yaml b/kubernetes/apps/cert-manager/cert-manager/ks.yaml new file mode 100644 index 00000000..6d180255 --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/ks.yaml @@ -0,0 +1,44 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cert-manager + namespace: flux-system +spec: + targetNamespace: cert-manager + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/cert-manager/cert-manager/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cert-manager-issuers + namespace: flux-system +spec: + targetNamespace: cert-manager + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cert-manager + path: ./kubernetes/apps/cert-manager/cert-manager/issuers + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/cert-manager/kustomization.yaml b/kubernetes/apps/cert-manager/kustomization.yaml new file mode 100644 index 00000000..39593fb7 --- /dev/null +++ b/kubernetes/apps/cert-manager/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./alert.yaml + - ./cert-manager/ks.yaml diff --git a/kubernetes/apps/cert-manager/namespace.yaml b/kubernetes/apps/cert-manager/namespace.yaml new file mode 100644 index 00000000..ed788350 --- /dev/null +++ b/kubernetes/apps/cert-manager/namespace.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: cert-manager + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/database/alert.yaml b/kubernetes/apps/database/alert.yaml new file mode 100644 index 00000000..f14a6377 --- /dev/null +++ b/kubernetes/apps/database/alert.yaml @@ -0,0 +1,29 @@ +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: database +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: database +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/apps/database/cloudnative-pg/app/externalsecret.yaml b/kubernetes/apps/database/cloudnative-pg/app/externalsecret.yaml new file mode 100644 index 00000000..4aac2e27 --- /dev/null +++ b/kubernetes/apps/database/cloudnative-pg/app/externalsecret.yaml @@ -0,0 +1,34 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: cloudnative-pg +spec: + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + target: + name: cloudnative-pg-secret + template: + engineVersion: v2 + metadata: + labels: + cnpg.io/reload: "true" + data: + - secretKey: username + remoteRef: + key: cloudnative-pg + property: POSTGRES_SUPER_USER + - secretKey: password + remoteRef: + key: cloudnative-pg + property: POSTGRES_SUPER_PASS + - secretKey: aws-access-key-id + remoteRef: + key: minio + property: MINIO_ROOT_USER + - secretKey: aws-secret-access-key + remoteRef: + key: minio + property: MINIO_ROOT_PASSWORD diff --git a/kubernetes/apps/database/cloudnative-pg/app/helmrelease.yaml b/kubernetes/apps/database/cloudnative-pg/app/helmrelease.yaml new file mode 100644 index 00000000..57730973 --- /dev/null +++ b/kubernetes/apps/database/cloudnative-pg/app/helmrelease.yaml @@ -0,0 +1,36 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: cloudnative-pg +spec: + interval: 30m + chart: + spec: + chart: cloudnative-pg + version: 0.21.2 + sourceRef: + kind: HelmRepository + name: cloudnative-pg + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + dependsOn: + - name: local-path-provisioner + namespace: storage + values: + crds: + create: true + config: + data: + INHERITED_ANNOTATIONS: kyverno.io/ignore + monitoring: + podMonitorEnabled: false + grafanaDashboard: + create: true diff --git a/kubernetes/apps/database/cloudnative-pg/app/kustomization.yaml b/kubernetes/apps/database/cloudnative-pg/app/kustomization.yaml new file mode 100644 index 00000000..c59808bf --- /dev/null +++ b/kubernetes/apps/database/cloudnative-pg/app/kustomization.yaml @@ -0,0 +1,19 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: default +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml + - ./prometheusrule.yaml +configMapGenerator: + - name: cloudnative-pg-dashboard + files: + - cloudnative-pg-dashboard.json=https://raw.githubusercontent.com/cloudnative-pg/grafana-dashboards/main/charts/cluster/grafana-dashboard.json +generatorOptions: + disableNameSuffixHash: true + annotations: + kustomize.toolkit.fluxcd.io/substitute: disabled + labels: + grafana_dashboard: "true" diff --git a/kubernetes/apps/database/cloudnative-pg/app/prometheusrule.yaml b/kubernetes/apps/database/cloudnative-pg/app/prometheusrule.yaml new file mode 100644 index 00000000..9c1d6a8d --- /dev/null +++ b/kubernetes/apps/database/cloudnative-pg/app/prometheusrule.yaml @@ -0,0 +1,67 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/monitoring.coreos.com/prometheusrule_v1.json +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: cloudnative-pg-rules + labels: + prometheus: k8s + role: alert-rules +spec: + groups: + - name: cloudnative-pg.rules + rules: + - alert: LongRunningTransaction + annotations: + description: Pod {{ $labels.pod }} is taking more than 5 minutes (300 seconds) for a query. + summary: A query is taking longer than 5 minutes. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + - alert: BackendsWaiting + annotations: + description: Pod {{ $labels.pod }} has been waiting for longer than 5 minutes + summary: If a backend is waiting for longer than 5 minutes + expr: |- + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + - alert: PGDatabase + annotations: + description: Over 150,000,000 transactions from frozen xid on pod {{ $labels.pod }} + summary: Number of transactions from the frozen XID to the current one + expr: |- + cnpg_pg_database_xid_age > 150000000 + for: 1m + labels: + severity: warning + - alert: PGReplication + annotations: + description: Standby is lagging behind by over 300 seconds (5 minutes) + summary: The standby is lagging behind the primary + expr: |- + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + - alert: LastFailedArchiveTime + annotations: + description: Archiving failed for {{ $labels.pod }} + summary: Checks the last time archiving failed. Will be < 0 when it has not failed. + expr: |- + (cnpg_pg_stat_archiver_last_failed_time - cnpg_pg_stat_archiver_last_archived_time) > 1 + for: 1m + labels: + severity: warning + - alert: DatabaseDeadlockConflicts + annotations: + description: There are over 10 deadlock conflicts in {{ $labels.pod }} + summary: Checks the number of database conflicts + expr: |- + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning diff --git a/kubernetes/apps/database/cloudnative-pg/cluster/cluster.yaml b/kubernetes/apps/database/cloudnative-pg/cluster/cluster.yaml new file mode 100644 index 00000000..67600607 --- /dev/null +++ b/kubernetes/apps/database/cloudnative-pg/cluster/cluster.yaml @@ -0,0 +1,54 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/postgresql.cnpg.io/cluster_v1.json +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: postgres-cluster +spec: + instances: 3 + storage: + size: 9Gi + storageClass: local-hostpath + superuserSecret: + name: cloudnative-pg-secret + enableSuperuserAccess: true + nodeMaintenanceWindow: + inProgress: false + reusePVC: true + monitoring: + enablePodMonitor: true + # Ref: https://github.com/cloudnative-pg/cloudnative-pg/issues/2501 + podMonitorMetricRelabelings: + - { sourceLabels: [ "cluster" ], targetLabel: cnpg_cluster, action: replace } + - { regex: cluster, action: labeldrop } + backup: + retentionPolicy: 30d + barmanObjectStore: &barmanObjectStore + data: + compression: bzip2 + wal: + compression: bzip2 + maxParallel: 8 + destinationPath: s3://cloudnative-pg/ + endpointURL: http://${NAS_URL}:9000 + # Note: serverName version needs to be inclemented + # when recovering from an existing cnpg cluster + serverName: ¤tCluster postgres-v2 + s3Credentials: + accessKeyId: + name: cloudnative-pg-secret + key: aws-access-key-id + secretAccessKey: + name: cloudnative-pg-secret + key: aws-secret-access-key + # Note: previousCluster needs to be set to the name of the previous + # cluster when recovering from an existing cnpg cluster + bootstrap: + recovery: + source: &previousCluster postgres-v1 + # Note: externalClusters is needed when recovering from an existing cnpg cluster + externalClusters: + - name: *previousCluster + barmanObjectStore: + <<: *barmanObjectStore + serverName: *previousCluster diff --git a/kubernetes/apps/database/cloudnative-pg/cluster/gatus.yaml b/kubernetes/apps/database/cloudnative-pg/cluster/gatus.yaml new file mode 100644 index 00000000..f000099e --- /dev/null +++ b/kubernetes/apps/database/cloudnative-pg/cluster/gatus.yaml @@ -0,0 +1,21 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: postgres-gatus-ep + labels: + gatus.io/enabled: "true" +data: + config.yaml: | + endpoints: + - name: postgres + group: infrastructure + url: tcp://postgres-cluster-rw.database.svc.cluster.local:5432 + interval: 1m + ui: + hide-url: true + hide-hostname: true + conditions: + - "[CONNECTED] == true" + alerts: + - type: discord diff --git a/kubernetes/apps/database/cloudnative-pg/cluster/kustomization.yaml b/kubernetes/apps/database/cloudnative-pg/cluster/kustomization.yaml new file mode 100644 index 00000000..f620a3c7 --- /dev/null +++ b/kubernetes/apps/database/cloudnative-pg/cluster/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: default +resources: + - ./cluster.yaml + - ./gatus.yaml + - ./scheduledbackup.yaml diff --git a/kubernetes/apps/database/cloudnative-pg/cluster/scheduledbackup.yaml b/kubernetes/apps/database/cloudnative-pg/cluster/scheduledbackup.yaml new file mode 100644 index 00000000..e56347d8 --- /dev/null +++ b/kubernetes/apps/database/cloudnative-pg/cluster/scheduledbackup.yaml @@ -0,0 +1,12 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/postgresql.cnpg.io/scheduledbackup_v1.json +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: postgres +spec: + schedule: "@daily" + immediate: true + backupOwnerReference: self + cluster: + name: postgres-cluster diff --git a/kubernetes/apps/database/cloudnative-pg/ks.yaml b/kubernetes/apps/database/cloudnative-pg/ks.yaml new file mode 100644 index 00000000..be90bfb1 --- /dev/null +++ b/kubernetes/apps/database/cloudnative-pg/ks.yaml @@ -0,0 +1,46 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cloudnative-pg + namespace: flux-system +spec: + targetNamespace: database + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/apps/database/cloudnative-pg/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cloudnative-pg-cluster + namespace: flux-system +spec: + targetNamespace: database + commonMetadata: + labels: + app.kubernetes.io/name: cloudnative-pg + dependsOn: + - name: cloudnative-pg + path: ./kubernetes/apps/database/cloudnative-pg/cluster + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/database/dragonfly/app/helmrelease.yaml b/kubernetes/apps/database/dragonfly/app/helmrelease.yaml new file mode 100644 index 00000000..16d84483 --- /dev/null +++ b/kubernetes/apps/database/dragonfly/app/helmrelease.yaml @@ -0,0 +1,102 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app dragonfly-operator +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + controllers: + dragonfly-operator: + strategy: RollingUpdate + containers: + app: + image: + repository: ghcr.io/dragonflydb/operator + tag: v1.1.2@sha256:f0d76725950095ac65b36252e0042d339d1db9b181b1d068f4b6686ea93055e4 + command: ["/manager"] + args: + - --health-probe-bind-address=:8081 + - --metrics-bind-address=:8080 + probes: + liveness: + enabled: true + custom: true + spec: + httpGet: + path: /healthz + port: &port 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: + enabled: true + custom: true + spec: + httpGet: + path: /readyz + port: *port + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + resources: + requests: + cpu: 10m + limits: + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + seccompProfile: { type: RuntimeDefault } + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/name: *app + service: + app: + controller: *app + ports: + http: + port: *port + metrics: + port: 8080 + serviceMonitor: + app: + serviceName: *app + endpoints: + - port: metrics + scheme: http + path: /metrics + interval: 1m + scrapeTimeout: 10s + serviceAccount: + create: true + name: *app diff --git a/kubernetes/apps/database/dragonfly/app/kustomization.yaml b/kubernetes/apps/database/dragonfly/app/kustomization.yaml new file mode 100644 index 00000000..639c55db --- /dev/null +++ b/kubernetes/apps/database/dragonfly/app/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # renovate: datasource=github-releases depName=dragonflydb/dragonfly-operator + - https://raw.githubusercontent.com/dragonflydb/dragonfly-operator/v1.1.2/manifests/crd.yaml + - ./helmrelease.yaml + - ./rbac.yaml diff --git a/kubernetes/apps/database/dragonfly/app/rbac.yaml b/kubernetes/apps/database/dragonfly/app/rbac.yaml new file mode 100644 index 00000000..6e1e0920 --- /dev/null +++ b/kubernetes/apps/database/dragonfly/app/rbac.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: dragonfly-operator +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + - apiGroups: [""] + resources: ["pods", "services"] + verbs: ["create", "delete", "get", "list", "patch", "update", "watch"] + - apiGroups: ["apps"] + resources: ["statefulsets"] + verbs: ["create", "delete", "get", "list", "patch", "update", "watch"] + - apiGroups: ["dragonflydb.io"] + resources: ["dragonflies"] + verbs: ["create", "delete", "get", "list", "patch", "update", "watch"] + - apiGroups: ["dragonflydb.io"] + resources: ["dragonflies/finalizers"] + verbs: ["update"] + - apiGroups: ["dragonflydb.io"] + resources: ["dragonflies/status"] + verbs: ["get", "patch", "update"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: dragonfly-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: dragonfly-operator +subjects: + - kind: ServiceAccount + name: dragonfly-operator + namespace: database diff --git a/kubernetes/apps/database/dragonfly/cluster/cluster.yaml b/kubernetes/apps/database/dragonfly/cluster/cluster.yaml new file mode 100644 index 00000000..97984b2b --- /dev/null +++ b/kubernetes/apps/database/dragonfly/cluster/cluster.yaml @@ -0,0 +1,25 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/dragonflydb.io/dragonfly_v1alpha1.json +apiVersion: dragonflydb.io/v1alpha1 +kind: Dragonfly +metadata: + name: dragonfly +spec: + image: ghcr.io/dragonflydb/dragonfly:v1.18.1 + replicas: 3 + env: + - name: MAX_MEMORY + valueFrom: + resourceFieldRef: + resource: limits.memory + divisor: 1Mi + args: + - --maxmemory=$(MAX_MEMORY)Mi + - --proactor_threads=2 + - --cluster_mode=emulated + - --lock_on_hashtags + resources: + requests: + cpu: 100m + limits: + memory: 512Mi diff --git a/kubernetes/apps/database/dragonfly/cluster/kustomization.yaml b/kubernetes/apps/database/dragonfly/cluster/kustomization.yaml new file mode 100644 index 00000000..6f0f305d --- /dev/null +++ b/kubernetes/apps/database/dragonfly/cluster/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./cluster.yaml + - ./podmonitor.yaml diff --git a/kubernetes/apps/database/dragonfly/cluster/podmonitor.yaml b/kubernetes/apps/database/dragonfly/cluster/podmonitor.yaml new file mode 100644 index 00000000..b26a770d --- /dev/null +++ b/kubernetes/apps/database/dragonfly/cluster/podmonitor.yaml @@ -0,0 +1,13 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/monitoring.coreos.com/podmonitor_v1.json +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: dragonfly +spec: + selector: + matchLabels: + app: dragonfly + podTargetLabels: ["app"] + podMetricsEndpoints: + - port: admin diff --git a/kubernetes/apps/database/dragonfly/ks.yaml b/kubernetes/apps/database/dragonfly/ks.yaml new file mode 100644 index 00000000..90e97232 --- /dev/null +++ b/kubernetes/apps/database/dragonfly/ks.yaml @@ -0,0 +1,44 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app dragonfly + namespace: flux-system +spec: + targetNamespace: database + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/database/dragonfly/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app dragonfly-cluster + namespace: flux-system +spec: + targetNamespace: database + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: dragonfly + path: ./kubernetes/apps/database/dragonfly/cluster + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/database/kustomization.yaml b/kubernetes/apps/database/kustomization.yaml new file mode 100644 index 00000000..2f2d257a --- /dev/null +++ b/kubernetes/apps/database/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./alert.yaml + - ./cloudnative-pg/ks.yaml + - ./dragonfly/ks.yaml diff --git a/kubernetes/apps/database/namespace.yaml b/kubernetes/apps/database/namespace.yaml new file mode 100644 index 00000000..5cad2860 --- /dev/null +++ b/kubernetes/apps/database/namespace.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: database + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/default/alert.yaml b/kubernetes/apps/default/alert.yaml new file mode 100644 index 00000000..bf897bae --- /dev/null +++ b/kubernetes/apps/default/alert.yaml @@ -0,0 +1,29 @@ +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: default +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: default +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/apps/default/homepage/app/configmap.yaml b/kubernetes/apps/default/homepage/app/configmap.yaml new file mode 100644 index 00000000..71d59b32 --- /dev/null +++ b/kubernetes/apps/default/homepage/app/configmap.yaml @@ -0,0 +1,81 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: homepage-config + labels: + app.kubernetes.io/name: homepage +data: + bookmarks.yaml: | + - Communitcate: + - Discord: + - icon: discord.png + href: https://discord.com/app + - Media: + - YouTube: + - icon: youtube.png + href: https://youtube.com/feed/subscriptions + - Reading: + - Reddit: + - icon: reddit.png + href: https://reddit.com + - Git: + - kubesearch: + - icon: kubernetes-dashboard.png + href: https://kubesearch.dev + - flux-cluster-template: + - icon: github.png + href: https://github.com/onedr0p/flux-cluster-template + docker.yaml: "" + kubernetes.yaml: | + mode: cluster + services.yaml: | + - Network: + - Cloudflared: + href: https://dash.cloudflare.com + icon: cloudflare-zero-trust.png + description: Cloudflared Tunnel + widget: + type: cloudflared + accountid: "{{HOMEPAGE_VAR_CLOUDFLARED_ACCOUNTID}}" + tunnelid: "{{HOMEPAGE_VAR_CLOUDFLARED_TUNNELID}}" + key: "{{HOMEPAGE_VAR_CLOUDFLARED_API_TOKEN}}" + settings.yaml: | + title: Pohulanka home + theme: dark + color: slate + headerStyle: clean + layout: + Home: + style: column + icon: mdi-home-analytics + Network: + style: row + columns: 3 + icon: mdi-server + Observability: + style: column + icon: mdi-chart-line + providers: + longhorn: + url: http://longhorn-frontend.storage.svc.cluster.local + widgets.yaml: | + - resources: + backend: kubernetes + cpu: true + expanded: true + memory: true + - greeting: + text_size: xl + text: "Welcome!" + - datetime: + text_size: l + format: + dateStyle: long + timeStyle: short + hourCycle: h23 + - longhorn: + expanded: true + total: true + labels: true + nodes: true diff --git a/kubernetes/apps/default/homepage/app/externalsecret.yaml b/kubernetes/apps/default/homepage/app/externalsecret.yaml new file mode 100644 index 00000000..524e8b1c --- /dev/null +++ b/kubernetes/apps/default/homepage/app/externalsecret.yaml @@ -0,0 +1,69 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: homepage +spec: + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + target: + name: homepage-secret + template: + engineVersion: v2 + data: + ## Non Cluster resources + HOMEPAGE_VAR_CLOUDFLARED_ACCOUNTID: "{{ .CLOUDFLARE_ACCOUNT_TAG }}" + HOMEPAGE_VAR_CLOUDFLARED_TUNNELID: "{{ .CLUSTER_CLOUDFLARE_TUNNEL_ID }}" + HOMEPAGE_VAR_CLOUDFLARED_API_TOKEN: "{{ .CLOUDFLARE_HOMEPAGE_TUNNEL_SECRET }}" + HOMEPAGE_VAR_SYNOLOGY_USERNAME: "{{ .HOMEPAGE_SYNOLOGY_USERNAME }}" + HOMEPAGE_VAR_SYNOLOGY_PASSWORD: "{{ .HOMEPAGE_SYNOLOGY_PASSWORD }}" + HOMEPAGE_VAR_PROXMOX_USERNAME: "{{ .HOMEPAGE_PROXMOX_USERNAME }}" + HOMEPAGE_VAR_PROXMOX_PASSWORD: "{{ .HOMEPAGE_PROXMOX_PASSWORD }}" + HOMEPAGE_VAR_PI_HOLE_TOKEN: "{{ .HOMEPAGE_PI_HOLE_TOKEN }}" + ## Default + HOMEPAGE_VAR_QBITTORRENT_USERNAME: "{{ .homepage_qbittorrent_username }}" + HOMEPAGE_VAR_QBITTORRENT_PASSWORD: "{{ .homepage_qbittorrent_password }}" + HOMEPAGE_VAR_RADARR_TOKEN: "{{ .RADARR_API_KEY }}" + HOMEPAGE_VAR_SONARR_TOKEN: "{{ .SONARR_API_KEY }}" + HOMEPAGE_VAR_PROWLARR_TOKEN: "{{ .PROWLARR_API_KEY }}" + HOMEPAGE_VAR_PLEX_TOKEN: "{{ .PLEX_TOKEN }}" + HOMEPAGE_VAR_OVERSEERR_TOKEN: "{{ .OVERSEERR_TOKEN }}" + ## Observability + HOMEPAGE_VAR_PORTAINER_TOKEN: "{{ .HOMEPAGE_PORTAINER_TOKEN }}" + HOMEPAGE_VAR_GRAFANA_USER: "{{ .grafana_username }}" + HOMEPAGE_VAR_GRAFANA_PASSWORD: "{{ .grafana_password }}" + dataFrom: + - extract: + key: cloudflare + - extract: + key: synology + - extract: + key: proxmox + - extract: + key: pihole + - extract: + key: qbittorrent + rewrite: + - regexp: + source: "(.*)" + target: "homepage_qbittorrent_$1" + - extract: + key: radarr + - extract: + key: sonarr + - extract: + key: prowlarr + - extract: + key: plex + - extract: + key: overseerr + - extract: + key: portainer + - extract: + key: grafana + rewrite: + - regexp: + source: "(.*)" + target: "grafana_$1" diff --git a/kubernetes/apps/default/homepage/app/helmrelease.yaml b/kubernetes/apps/default/homepage/app/helmrelease.yaml new file mode 100644 index 00000000..587cd222 --- /dev/null +++ b/kubernetes/apps/default/homepage/app/helmrelease.yaml @@ -0,0 +1,77 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: homepage +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.1.0 + interval: 30m + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + createNamespace: true + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + strategy: uninstall + values: + controllers: + homepage: + annotations: + reloader.stakater.com/auto: "true" + containers: + app: + image: + repository: ghcr.io/gethomepage/homepage + tag: v0.8.13 + env: + TZ: "${TIMEZONE}" + envFrom: + - secretRef: + name: homepage-secret + service: + app: + controller: homepage + ports: + http: + port: 3000 + ingress: + app: + className: internal + hosts: + - host: home.${SECRET_DOMAIN} + paths: + - path: / + pathType: Prefix + service: + identifier: app + port: http + persistence: + config: + type: configMap + name: homepage-config + globalMounts: + - subPath: bookmarks.yaml + path: /app/config/bookmarks.yaml + - subPath: docker.yaml + path: /app/config/docker.yaml + - subPath: kubernetes.yaml + path: /app/config/kubernetes.yaml + - subPath: services.yaml + path: /app/config/services.yaml + - subPath: settings.yaml + path: /app/config/settings.yaml + - subPath: widgets.yaml + path: /app/config/widgets.yaml + serviceAccount: + create: true diff --git a/kubernetes/apps/default/homepage/app/kustomization.yaml b/kubernetes/apps/default/homepage/app/kustomization.yaml new file mode 100644 index 00000000..a7e30d64 --- /dev/null +++ b/kubernetes/apps/default/homepage/app/kustomization.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./configmap.yaml + - ./helmrelease.yaml + - ./rbac.yaml + - ../../../../templates/gatus/internal diff --git a/kubernetes/apps/default/homepage/app/rbac.yaml b/kubernetes/apps/default/homepage/app/rbac.yaml new file mode 100644 index 00000000..c1df51e2 --- /dev/null +++ b/kubernetes/apps/default/homepage/app/rbac.yaml @@ -0,0 +1,63 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: &app homepage + labels: + app.kubernetes.io/instance: *app + app.kubernetes.io/name: *app +rules: + - apiGroups: + - "" + resources: + - namespaces + - pods + - nodes + verbs: + - get + - list + - apiGroups: + - extensions + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - apiGroups: + - traefik.containo.us + resources: + - ingressroutes + verbs: + - get + - list + - apiGroups: + - metrics.k8s.io + resources: + - nodes + - pods + verbs: + - get + - list + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: &app homepage + labels: + app.kubernetes.io/instance: *app + app.kubernetes.io/name: *app +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: homepage +subjects: + - kind: ServiceAccount + name: *app + namespace: default # keep diff --git a/kubernetes/apps/default/homepage/ks.yaml b/kubernetes/apps/default/homepage/ks.yaml new file mode 100644 index 00000000..8536917c --- /dev/null +++ b/kubernetes/apps/default/homepage/ks.yaml @@ -0,0 +1,26 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app homepage + namespace: flux-system +spec: + targetNamespace: default + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/apps/default/homepage/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m + postBuild: + substitute: + APP: *app diff --git a/kubernetes/apps/default/kustomization.yaml b/kubernetes/apps/default/kustomization.yaml new file mode 100644 index 00000000..336f50e4 --- /dev/null +++ b/kubernetes/apps/default/kustomization.yaml @@ -0,0 +1,16 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./alert.yaml + - ./qbittorrent/ks.yaml + - ./radarr/ks.yaml + - ./sonarr/ks.yaml + - ./prowlarr/ks.yaml + - ./plex/ks.yaml + - ./notifiarr/ks.yaml + - ./overseerr/ks.yaml + - ./homepage/ks.yaml + - ./nodered/ks.yaml diff --git a/kubernetes/apps/default/namespace.yaml b/kubernetes/apps/default/namespace.yaml new file mode 100644 index 00000000..f659b055 --- /dev/null +++ b/kubernetes/apps/default/namespace.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: default + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/default/nodered/app/configs/settings.js b/kubernetes/apps/default/nodered/app/configs/settings.js new file mode 100644 index 00000000..4f776678 --- /dev/null +++ b/kubernetes/apps/default/nodered/app/configs/settings.js @@ -0,0 +1,542 @@ +/** + * This is the default settings file provided by Node-RED. + * + * It can contain any valid JavaScript code that will get run when Node-RED + * is started. + * + * Lines that start with // are commented out. + * Each entry should be separated from the entries above and below by a comma ',' + * + * For more information about individual settings, refer to the documentation: + * https://nodered.org/docs/user-guide/runtime/configuration + * + * The settings are split into the following sections: + * - Flow File and User Directory Settings + * - Security + * - Server Settings + * - Runtime Settings + * - Editor Settings + * - Node Settings + * + **/ + +module.exports = { + + /******************************************************************************* + * Flow File and User Directory Settings + * - flowFile + * - credentialSecret + * - flowFilePretty + * - userDir + * - nodesDir + ******************************************************************************/ + + /** The file containing the flows. If not set, defaults to flows_.json **/ + flowFile: 'flows.json', + + /** By default, credentials are encrypted in storage using a generated key. To + * specify your own secret, set the following property. + * If you want to disable encryption of credentials, set this property to false. + * Note: once you set this property, do not change it - doing so will prevent + * node-red from being able to decrypt your existing credentials and they will be + * lost. + */ + credentialSecret: process.env.NODE_RED_CREDENTIAL_SECRET, + + /** By default, the flow JSON will be formatted over multiple lines making + * it easier to compare changes when using version control. + * To disable pretty-printing of the JSON set the following property to false. + */ + flowFilePretty: true, + + /** By default, all user data is stored in a directory called `.node-red` under + * the user's home directory. To use a different location, the following + * property can be used + */ + //userDir: '/home/nol/.node-red/', + + /** Node-RED scans the `nodes` directory in the userDir to find local node files. + * The following property can be used to specify an additional directory to scan. + */ + //nodesDir: '/home/nol/.node-red/nodes', + + /******************************************************************************* + * Security + * - adminAuth + * - https + * - httpsRefreshInterval + * - requireHttps + * - httpNodeAuth + * - httpStaticAuth + ******************************************************************************/ + + /** To password protect the Node-RED editor and admin API, the following + * property can be used. See http://nodered.org/docs/security.html for details. + */ + //adminAuth: { + // type: "credentials", + // users: [{ + // username: "admin", + // password: "$2a$08$zZWtXTja0fB1pzD4sHCMyOCMYz2Z6dNbM6tl8sJogENOMcxWV9DN.", + // permissions: "*" + // }] + //}, + + /** The following property can be used to enable HTTPS + * This property can be either an object, containing both a (private) key + * and a (public) certificate, or a function that returns such an object. + * See http://nodejs.org/api/https.html#https_https_createserver_options_requestlistener + * for details of its contents. + */ + + /** Option 1: static object */ + //https: { + // key: require("fs").readFileSync('privkey.pem'), + // cert: require("fs").readFileSync('cert.pem') + //}, + + /** Option 2: function that returns the HTTP configuration object */ + // https: function() { + // // This function should return the options object, or a Promise + // // that resolves to the options object + // return { + // key: require("fs").readFileSync('privkey.pem'), + // cert: require("fs").readFileSync('cert.pem') + // } + // }, + + /** If the `https` setting is a function, the following setting can be used + * to set how often, in hours, the function will be called. That can be used + * to refresh any certificates. + */ + //httpsRefreshInterval : 12, + + /** The following property can be used to cause insecure HTTP connections to + * be redirected to HTTPS. + */ + //requireHttps: true, + + /** To password protect the node-defined HTTP endpoints (httpNodeRoot), + * including node-red-dashboard, or the static content (httpStatic), the + * following properties can be used. + * The `pass` field is a bcrypt hash of the password. + * See http://nodered.org/docs/security.html#generating-the-password-hash + */ + //httpNodeAuth: {user:"user",pass:"$2a$08$zZWtXTja0fB1pzD4sHCMyOCMYz2Z6dNbM6tl8sJogENOMcxWV9DN."}, + //httpStaticAuth: {user:"user",pass:"$2a$08$zZWtXTja0fB1pzD4sHCMyOCMYz2Z6dNbM6tl8sJogENOMcxWV9DN."}, + + /******************************************************************************* + * Server Settings + * - uiPort + * - uiHost + * - apiMaxLength + * - httpServerOptions + * - httpAdminRoot + * - httpAdminMiddleware + * - httpNodeRoot + * - httpNodeCors + * - httpNodeMiddleware + * - httpStatic + * - httpStaticRoot + ******************************************************************************/ + + /** the tcp port that the Node-RED web server is listening on */ + uiPort: process.env.PORT || 1880, + + /** By default, the Node-RED UI accepts connections on all IPv4 interfaces. + * To listen on all IPv6 addresses, set uiHost to "::", + * The following property can be used to listen on a specific interface. For + * example, the following would only allow connections from the local machine. + */ + //uiHost: "127.0.0.1", + + /** The maximum size of HTTP request that will be accepted by the runtime api. + * Default: 5mb + */ + //apiMaxLength: '5mb', + + /** The following property can be used to pass custom options to the Express.js + * server used by Node-RED. For a full list of available options, refer + * to http://expressjs.com/en/api.html#app.settings.table + */ + //httpServerOptions: { }, + + /** By default, the Node-RED UI is available at http://localhost:1880/ + * The following property can be used to specify a different root path. + * If set to false, this is disabled. + */ + //httpAdminRoot: '/admin', + + /** The following property can be used to add a custom middleware function + * in front of all admin http routes. For example, to set custom http + * headers. It can be a single function or an array of middleware functions. + */ + // httpAdminMiddleware: function(req,res,next) { + // // Set the X-Frame-Options header to limit where the editor + // // can be embedded + // //res.set('X-Frame-Options', 'sameorigin'); + // next(); + // }, + + + /** Some nodes, such as HTTP In, can be used to listen for incoming http requests. + * By default, these are served relative to '/'. The following property + * can be used to specifiy a different root path. If set to false, this is + * disabled. + */ + //httpNodeRoot: '/red-nodes', + + /** The following property can be used to configure cross-origin resource sharing + * in the HTTP nodes. + * See https://github.com/troygoode/node-cors#configuration-options for + * details on its contents. The following is a basic permissive set of options: + */ + //httpNodeCors: { + // origin: "*", + // methods: "GET,PUT,POST,DELETE" + //}, + + /** If you need to set an http proxy please set an environment variable + * called http_proxy (or HTTP_PROXY) outside of Node-RED in the operating system. + * For example - http_proxy=http://myproxy.com:8080 + * (Setting it here will have no effect) + * You may also specify no_proxy (or NO_PROXY) to supply a comma separated + * list of domains to not proxy, eg - no_proxy=.acme.co,.acme.co.uk + */ + + /** The following property can be used to add a custom middleware function + * in front of all http in nodes. This allows custom authentication to be + * applied to all http in nodes, or any other sort of common request processing. + * It can be a single function or an array of middleware functions. + */ + //httpNodeMiddleware: function(req,res,next) { + // // Handle/reject the request, or pass it on to the http in node by calling next(); + // // Optionally skip our rawBodyParser by setting this to true; + // //req.skipRawBodyParser = true; + // next(); + //}, + + /** When httpAdminRoot is used to move the UI to a different root path, the + * following property can be used to identify a directory of static content + * that should be served at http://localhost:1880/. + * When httpStaticRoot is set differently to httpAdminRoot, there is no need + * to move httpAdminRoot + */ + //httpStatic: '/home/nol/node-red-static/', //single static source + /* OR multiple static sources can be created using an array of objects... */ + //httpStatic: [ + // {path: '/home/nol/pics/', root: "/img/"}, + // {path: '/home/nol/reports/', root: "/doc/"}, + //], + + /** + * All static routes will be appended to httpStaticRoot + * e.g. if httpStatic = "/home/nol/docs" and httpStaticRoot = "/static/" + * then "/home/nol/docs" will be served at "/static/" + * e.g. if httpStatic = [{path: '/home/nol/pics/', root: "/img/"}] + * and httpStaticRoot = "/static/" + * then "/home/nol/pics/" will be served at "/static/img/" + */ + //httpStaticRoot: '/static/', + + /******************************************************************************* + * Runtime Settings + * - lang + * - runtimeState + * - diagnostics + * - logging + * - contextStorage + * - exportGlobalContextKeys + * - externalModules + ******************************************************************************/ + + /** Uncomment the following to run node-red in your preferred language. + * Available languages include: en-US (default), ja, de, zh-CN, zh-TW, ru, ko + * Some languages are more complete than others. + */ + // lang: "de", + + /** Configure diagnostics options + * - enabled: When `enabled` is `true` (or unset), diagnostics data will + * be available at http://localhost:1880/diagnostics + * - ui: When `ui` is `true` (or unset), the action `show-system-info` will + * be available to logged in users of node-red editor + */ + diagnostics: { + /** enable or disable diagnostics endpoint. Must be set to `false` to disable */ + enabled: true, + /** enable or disable diagnostics display in the node-red editor. Must be set to `false` to disable */ + ui: true, + }, + /** Configure runtimeState options + * - enabled: When `enabled` is `true` flows runtime can be Started/Stoped + * by POSTing to available at http://localhost:1880/flows/state + * - ui: When `ui` is `true`, the action `core:start-flows` and + * `core:stop-flows` will be available to logged in users of node-red editor + * Also, the deploy menu (when set to default) will show a stop or start button + */ + runtimeState: { + /** enable or disable flows/state endpoint. Must be set to `false` to disable */ + enabled: false, + /** show or hide runtime stop/start options in the node-red editor. Must be set to `false` to hide */ + ui: false, + }, + /** Configure the logging output */ + logging: { + /** Only console logging is currently supported */ + console: { + /** Level of logging to be recorded. Options are: + * fatal - only those errors which make the application unusable should be recorded + * error - record errors which are deemed fatal for a particular request + fatal errors + * warn - record problems which are non fatal + errors + fatal errors + * info - record information about the general running of the application + warn + error + fatal errors + * debug - record information which is more verbose than info + info + warn + error + fatal errors + * trace - record very detailed logging + debug + info + warn + error + fatal errors + * off - turn off all logging (doesn't affect metrics or audit) + */ + level: "info", + /** Whether or not to include metric events in the log output */ + metrics: false, + /** Whether or not to include audit events in the log output */ + audit: false + } + }, + + /** Context Storage + * The following property can be used to enable context storage. The configuration + * provided here will enable file-based context that flushes to disk every 30 seconds. + * Refer to the documentation for further options: https://nodered.org/docs/api/context/ + */ + //contextStorage: { + // default: { + // module:"localfilesystem" + // }, + //}, + + /** `global.keys()` returns a list of all properties set in global context. + * This allows them to be displayed in the Context Sidebar within the editor. + * In some circumstances it is not desirable to expose them to the editor. The + * following property can be used to hide any property set in `functionGlobalContext` + * from being list by `global.keys()`. + * By default, the property is set to false to avoid accidental exposure of + * their values. Setting this to true will cause the keys to be listed. + */ + exportGlobalContextKeys: false, + + /** Configure how the runtime will handle external npm modules. + * This covers: + * - whether the editor will allow new node modules to be installed + * - whether nodes, such as the Function node are allowed to have their + * own dynamically configured dependencies. + * The allow/denyList options can be used to limit what modules the runtime + * will install/load. It can use '*' as a wildcard that matches anything. + */ + externalModules: { + // autoInstall: false, /** Whether the runtime will attempt to automatically install missing modules */ + // autoInstallRetry: 30, /** Interval, in seconds, between reinstall attempts */ + // palette: { /** Configuration for the Palette Manager */ + // allowInstall: true, /** Enable the Palette Manager in the editor */ + // allowUpdate: true, /** Allow modules to be updated in the Palette Manager */ + // allowUpload: true, /** Allow module tgz files to be uploaded and installed */ + // allowList: ['*'], + // denyList: [], + // allowUpdateList: ['*'], + // denyUpdateList: [] + // }, + // modules: { /** Configuration for node-specified modules */ + // allowInstall: true, + // allowList: [], + // denyList: [] + // } + }, + + + /******************************************************************************* + * Editor Settings + * - disableEditor + * - editorTheme + ******************************************************************************/ + + /** The following property can be used to disable the editor. The admin API + * is not affected by this option. To disable both the editor and the admin + * API, use either the httpRoot or httpAdminRoot properties + */ + //disableEditor: false, + + /** Customising the editor + * See https://nodered.org/docs/user-guide/runtime/configuration#editor-themes + * for all available options. + */ + editorTheme: { + /** The following property can be used to set a custom theme for the editor. + * See https://github.com/node-red-contrib-themes/theme-collection for + * a collection of themes to chose from. + */ + //theme: "", + + /** To disable the 'Welcome to Node-RED' tour that is displayed the first + * time you access the editor for each release of Node-RED, set this to false + */ + //tours: false, + + palette: { + /** The following property can be used to order the categories in the editor + * palette. If a node's category is not in the list, the category will get + * added to the end of the palette. + * If not set, the following default order is used: + */ + //categories: ['subflows', 'common', 'function', 'network', 'sequence', 'parser', 'storage'], + }, + + projects: { + /** To enable the Projects feature, set this value to true */ + enabled: false, + workflow: { + /** Set the default projects workflow mode. + * - manual - you must manually commit changes + * - auto - changes are automatically committed + * This can be overridden per-user from the 'Git config' + * section of 'User Settings' within the editor + */ + mode: "manual" + } + }, + + codeEditor: { + /** Select the text editor component used by the editor. + * As of Node-RED V3, this defaults to "monaco", but can be set to "ace" if desired + */ + lib: "monaco", + options: { + /** The follow options only apply if the editor is set to "monaco" + * + * theme - must match the file name of a theme in + * packages/node_modules/@node-red/editor-client/src/vendor/monaco/dist/theme + * e.g. "tomorrow-night", "upstream-sunburst", "github", "my-theme" + */ + // theme: "vs", + /** other overrides can be set e.g. fontSize, fontFamily, fontLigatures etc. + * for the full list, see https://microsoft.github.io/monaco-editor/api/interfaces/monaco.editor.IStandaloneEditorConstructionOptions.html + */ + //fontSize: 14, + //fontFamily: "Cascadia Code, Fira Code, Consolas, 'Courier New', monospace", + //fontLigatures: true, + } + } + }, + + /******************************************************************************* + * Node Settings + * - fileWorkingDirectory + * - functionGlobalContext + * - functionExternalModules + * - nodeMessageBufferMaxLength + * - ui (for use with Node-RED Dashboard) + * - debugUseColors + * - debugMaxLength + * - execMaxBufferSize + * - httpRequestTimeout + * - mqttReconnectTime + * - serialReconnectTime + * - socketReconnectTime + * - socketTimeout + * - tcpMsgQueueSize + * - inboundWebSocketTimeout + * - tlsConfigDisableLocalFiles + * - webSocketNodeVerifyClient + ******************************************************************************/ + + /** The working directory to handle relative file paths from within the File nodes + * defaults to the working directory of the Node-RED process. + */ + //fileWorkingDirectory: "", + + /** Allow the Function node to load additional npm modules directly */ + functionExternalModules: true, + + /** The following property can be used to set predefined values in Global Context. + * This allows extra node modules to be made available with in Function node. + * For example, the following: + * functionGlobalContext: { os:require('os') } + * will allow the `os` module to be accessed in a Function node using: + * global.get("os") + */ + functionGlobalContext: { + // os:require('os'), + }, + + /** The maximum number of messages nodes will buffer internally as part of their + * operation. This applies across a range of nodes that operate on message sequences. + * defaults to no limit. A value of 0 also means no limit is applied. + */ + //nodeMessageBufferMaxLength: 0, + + /** If you installed the optional node-red-dashboard you can set it's path + * relative to httpNodeRoot + * Other optional properties include + * readOnly:{boolean}, + * middleware:{function or array}, (req,res,next) - http middleware + * ioMiddleware:{function or array}, (socket,next) - socket.io middleware + */ + //ui: { path: "ui" }, + + /** Colourise the console output of the debug node */ + //debugUseColors: true, + + /** The maximum length, in characters, of any message sent to the debug sidebar tab */ + debugMaxLength: 1000, + + /** Maximum buffer size for the exec node. Defaults to 10Mb */ + //execMaxBufferSize: 10000000, + + /** Timeout in milliseconds for HTTP request connections. Defaults to 120s */ + //httpRequestTimeout: 120000, + + /** Retry time in milliseconds for MQTT connections */ + mqttReconnectTime: 15000, + + /** Retry time in milliseconds for Serial port connections */ + serialReconnectTime: 15000, + + /** Retry time in milliseconds for TCP socket connections */ + //socketReconnectTime: 10000, + + /** Timeout in milliseconds for TCP server socket connections. Defaults to no timeout */ + //socketTimeout: 120000, + + /** Maximum number of messages to wait in queue while attempting to connect to TCP socket + * defaults to 1000 + */ + //tcpMsgQueueSize: 2000, + + /** Timeout in milliseconds for inbound WebSocket connections that do not + * match any configured node. Defaults to 5000 + */ + //inboundWebSocketTimeout: 5000, + + /** To disable the option for using local files for storing keys and + * certificates in the TLS configuration node, set this to true. + */ + //tlsConfigDisableLocalFiles: true, + + /** The following property can be used to verify websocket connection attempts. + * This allows, for example, the HTTP request headers to be checked to ensure + * they include valid authentication information. + */ + //webSocketNodeVerifyClient: function(info) { + // /** 'info' has three properties: + // * - origin : the value in the Origin header + // * - req : the HTTP request + // * - secure : true if req.connection.authorized or req.connection.encrypted is set + // * + // * The function should return true if the connection should be accepted, false otherwise. + // * + // * Alternatively, if this function is defined to accept a second argument, callback, + // * it can be used to verify the client asynchronously. + // * The callback takes three arguments: + // * - result : boolean, whether to accept the connection or not + // * - code : if result is false, the HTTP error status to return + // * - reason: if result is false, the HTTP reason string to return + // */ + //}, +} diff --git a/kubernetes/apps/default/nodered/app/externalsecret.yaml b/kubernetes/apps/default/nodered/app/externalsecret.yaml new file mode 100644 index 00000000..87b6ec9c --- /dev/null +++ b/kubernetes/apps/default/nodered/app/externalsecret.yaml @@ -0,0 +1,19 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: nodered +spec: + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + target: + name: nodered-secret + template: + engineVersion: v2 + data: + NODE_RED_CREDENTIAL_SECRET: "{{ .CREDENTIAL_SECRET }}" + dataFrom: + - extract: + key: nodered diff --git a/kubernetes/apps/default/nodered/app/helmrelease.yaml b/kubernetes/apps/default/nodered/app/helmrelease.yaml new file mode 100644 index 00000000..7b21b236 --- /dev/null +++ b/kubernetes/apps/default/nodered/app/helmrelease.yaml @@ -0,0 +1,91 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app nodered + namespace: default +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + maxHistory: 2 + install: + createNamespace: true + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + controllers: + nodered: + annotations: + reloader.stakater.com/auto: "true" + containers: + app: + image: + repository: docker.io/nodered/node-red + tag: 3.1.9@sha256:df827e6ee450221ff68e2edd3bf9b43a991f6abb49121b0db95b744ad4e17a8c + env: + PUID: 1000 + PGID: 1000 + UMASK: 002 + TZ: "${TIMEZONE}" + envFrom: + - secretRef: + name: nodered-secret + pod: + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + runAsNonRoot: true + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + service: + app: + controller: nodered + ports: + http: + port: 1880 + ingress: + app: + className: internal + annotations: + gethomepage.dev/enabled: "true" + gethomepage.dev/icon: node-red.png + gethomepage.dev/name: Node-red + gethomepage.dev/description: Visual programming for automation + gethomepage.dev/group: Home + hosts: + - host: "nodered.${SECRET_DOMAIN}" + paths: + - path: / + service: + identifier: app + port: http + persistence: +# config: +# existingClaim: *app +# globalMounts: +# - path: /data +# settings: +# type: configMap +# name: node-red-configmap +# globalMounts: +# - path: /data/settings.js +# subPath: settings.js +# readOnly: true + downloads: + type: nfs + server: "${NAS_URL}" + path: "/volume1/rpi/docker/Appdata/nodered" + globalMounts: + - path: /data diff --git a/kubernetes/apps/default/nodered/app/kustomization.yaml b/kubernetes/apps/default/nodered/app/kustomization.yaml new file mode 100644 index 00000000..fcbef500 --- /dev/null +++ b/kubernetes/apps/default/nodered/app/kustomization.yaml @@ -0,0 +1,15 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml + - ../../../../templates/volsync + - ../../../../templates/gatus/internal +configMapGenerator: + - name: node-red-configmap + files: + - ./configs/settings.js +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/apps/default/nodered/ks.yaml b/kubernetes/apps/default/nodered/ks.yaml new file mode 100644 index 00000000..6cb864b4 --- /dev/null +++ b/kubernetes/apps/default/nodered/ks.yaml @@ -0,0 +1,27 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app nodered + namespace: flux-system +spec: + targetNamespace: default + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/apps/default/nodered/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m + postBuild: + substitute: + APP: *app + VOLSYNC_CAPACITY: 1Gi diff --git a/kubernetes/apps/default/notifiarr/app/externalsecret.yaml b/kubernetes/apps/default/notifiarr/app/externalsecret.yaml new file mode 100644 index 00000000..4e226f96 --- /dev/null +++ b/kubernetes/apps/default/notifiarr/app/externalsecret.yaml @@ -0,0 +1,48 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: notifiarr +spec: + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + target: + name: notifiarr-secret + template: + engineVersion: v2 + data: + DN_API_KEY: "{{ .notifiarr_api_key }}" + DN_UI_PASSWORD: "{{ .notifiarr_password }}" + DN_PLEX_URL: "http://plex.default.svc.cluster.local:32400" + DN_PLEX_TOKEN: "{{ .PLEX_TOKEN }}" + DN_QBIT_0_NAME: "qbittorrent-kube" + DN_QBIT_0_URL: "http://qbittorrent.default.svc.cluster.local:8080" + DN_QBIT_0_USER: "{{ .qbittorrent_username }}" + DN_QBIT_0_PASS: "{{ .qbittorrent_password }}" + DN_RADARR_0_NAME: "radarr-kube" + DN_RADARR_0_URL: "http://radarr.default.svc.cluster.local:8080" + DN_RADARR_0_API_KEY: "{{ .RADARR_API_KEY }}" + DN_SONARR_0_NAME: "sonarr-kube" + DN_SONARR_0_URL: "http://sonarr.default.svc.cluster.local:8080" + DN_SONARR_0_API_KEY: "{{ .SONARR_API_KEY }}" + dataFrom: + - extract: + key: qbittorrent + rewrite: + - regexp: + source: "(.*)" + target: "qbittorrent_$1" + - extract: + key: notifiarr + rewrite: + - regexp: + source: "(.*)" + target: "notifiarr_$1" + - extract: + key: radarr + - extract: + key: sonarr + - extract: + key: plex diff --git a/kubernetes/apps/default/notifiarr/app/helmrelease.yaml b/kubernetes/apps/default/notifiarr/app/helmrelease.yaml new file mode 100644 index 00000000..15ba4683 --- /dev/null +++ b/kubernetes/apps/default/notifiarr/app/helmrelease.yaml @@ -0,0 +1,75 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: notifiarr + namespace: default +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + maxHistory: 2 + install: + createNamespace: true + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + strategy: uninstall + dependsOn: + - name: longhorn + namespace: storage + - name: volsync + namespace: storage + values: + annotations: + reloader.stakater.com/auto: "true" + defaultPodOptions: + hostname: notifiarr-kube + controllers: + notifiarr: + annotations: + reloader.stakater.com/auto: "true" + containers: + app: + image: + repository: golift/notifiarr + tag: 0.7@sha256:c7a21fcf4ae2d5035c3302debde257bf6b3338b768d5678efb59e093a246c515 + env: + PUID: 1000 + PGID: 1000 + UMASK: 002 + TZ: "${TIMEZONE}" + envFrom: + - secretRef: + name: notifiarr-secret + service: + app: + controller: notifiarr + ports: + http: + port: 5454 + ingress: + app: + className: internal + annotations: + gethomepage.dev/enabled: "true" + gethomepage.dev/icon: notifiarr.png + gethomepage.dev/name: notifiarr + gethomepage.dev/group: Media + hosts: + - host: "notifiarr.${SECRET_DOMAIN}" + paths: + - path: / + service: + identifier: app + port: http diff --git a/kubernetes/apps/default/notifiarr/app/kustomization.yaml b/kubernetes/apps/default/notifiarr/app/kustomization.yaml new file mode 100644 index 00000000..17eb4a00 --- /dev/null +++ b/kubernetes/apps/default/notifiarr/app/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml + - ../../../../templates/gatus/internal diff --git a/kubernetes/apps/default/notifiarr/ks.yaml b/kubernetes/apps/default/notifiarr/ks.yaml new file mode 100644 index 00000000..dad4abd9 --- /dev/null +++ b/kubernetes/apps/default/notifiarr/ks.yaml @@ -0,0 +1,26 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app notifiarr + namespace: flux-system +spec: + targetNamespace: default + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/apps/default/notifiarr/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m + postBuild: + substitute: + APP: *app diff --git a/kubernetes/apps/default/overseerr/app/helmrelease.yaml b/kubernetes/apps/default/overseerr/app/helmrelease.yaml new file mode 100644 index 00000000..0cd40d8b --- /dev/null +++ b/kubernetes/apps/default/overseerr/app/helmrelease.yaml @@ -0,0 +1,115 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app overseerr +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + strategy: uninstall + dependsOn: + - name: longhorn + namespace: storage + - name: volsync + namespace: storage + values: + controllers: + overseerr: + annotations: + reloader.stakater.com/auto: "true" + containers: + app: + image: + repository: ghcr.io/sct/overseerr + tag: 1.33.2@sha256:714ea6db2bc007a2262d112bef7eec74972eb33d9c72bddb9cbd98b8742de950 + env: + TZ: "${TIMEZONE}" + LOG_LEVEL: "info" + PORT: &port 80 + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /api/v1/status + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + startup: + enabled: false + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + limits: + memory: 2Gi + pod: + securityContext: + runAsUser: 568 + runAsGroup: 568 + runAsNonRoot: true + fsGroup: 568 + fsGroupChangePolicy: OnRootMismatch + service: + app: + controller: overseerr + ports: + http: + port: *port + ingress: + app: + className: external + annotations: + gethomepage.dev/enabled: "true" + gethomepage.dev/group: Media + gethomepage.dev/name: Overseerr + gethomepage.dev/icon: overseerr.png + gethomepage.dev/description: Media Request Management + gethomepage.dev/widget.type: overseerr + gethomepage.dev/widget.url: http://overseerr.default.svc.cluster.local + gethomepage.dev/widget.key: "{{HOMEPAGE_VAR_OVERSEERR_TOKEN}}" + external-dns.alpha.kubernetes.io/target: "external.${SECRET_DOMAIN}" + hosts: + - host: "overseerr.${SECRET_DOMAIN}" + paths: + - path: / + service: + identifier: app + port: http + persistence: + config: + existingClaim: *app + globalMounts: + - path: /app/config + cache: + existingClaim: overseerr-cache + globalMounts: + - path: /app/config/cache + logs: + type: emptyDir + globalMounts: + - path: /app/config/logs + tmp: + type: emptyDir diff --git a/kubernetes/apps/default/overseerr/app/kustomization.yaml b/kubernetes/apps/default/overseerr/app/kustomization.yaml new file mode 100644 index 00000000..a3f6169d --- /dev/null +++ b/kubernetes/apps/default/overseerr/app/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./pvc.yaml + - ./helmrelease.yaml + - ../../../../templates/volsync + - ../../../../templates/gatus/external diff --git a/kubernetes/apps/default/overseerr/app/pvc.yaml b/kubernetes/apps/default/overseerr/app/pvc.yaml new file mode 100644 index 00000000..75f45ae8 --- /dev/null +++ b/kubernetes/apps/default/overseerr/app/pvc.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: overseerr-cache +spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 3Gi + storageClassName: longhorn + reclaimPolicy: Delete diff --git a/kubernetes/apps/default/overseerr/ks.yaml b/kubernetes/apps/default/overseerr/ks.yaml new file mode 100644 index 00000000..d37b1274 --- /dev/null +++ b/kubernetes/apps/default/overseerr/ks.yaml @@ -0,0 +1,25 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app overseerr + namespace: flux-system +spec: + targetNamespace: default + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/default/overseerr/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m + postBuild: + substitute: + APP: *app + VOLSYNC_CAPACITY: 1Gi diff --git a/kubernetes/apps/default/paperless/app/helmrelease.yaml b/kubernetes/apps/default/paperless/app/helmrelease.yaml new file mode 100644 index 00000000..dca4cdae --- /dev/null +++ b/kubernetes/apps/default/paperless/app/helmrelease.yaml @@ -0,0 +1,87 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app paperless + namespace: default +spec: + interval: 30m + driftDetection: + mode: enabled + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + maxHistory: 2 + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: longhorn + namespace: storage + - name: volsync + namespace: storage + values: + controllers: + paperless: + annotations: + reloader.stakater.com/auto: "true" + containers: + app: + image: + repository: ghcr.io/paperless-ngx/paperless-ngx + tag: 2.7.2@sha256:703c990a790dfd4d25fb56df3afec27b13cb0926a3818bf265edac9c71311647 + env: + PAPERLESS_TIME_ZONE: "${TIMEZONE}" + resources: + requests: + cpu: 11m + memory: 500Mi + limits: + memory: 2000Mi + service: + app: + controller: paperless + ports: + http: + port: *port + ingress: + app: + className: internal + annotations: + gethomepage.dev/enabled: "true" + gethomepage.dev/icon: paperless-ngx.png + gethomepage.dev/name: Paperless + gethomepage.dev/group: Storage + gethomepage.dev/description: Document management + gethomepage.dev/widget.type: paperless + gethomepage.dev/widget.url: http://paperless.default.svc.cluster.local:8080 + gethomepage.dev/widget.key: "{{HOMEPAGE_VAR_RADARR_TOKEN}}" + hosts: + - host: "paperless.${SECRET_DOMAIN}" + paths: + - path: / + service: + identifier: app + port: http + persistence: + config: + existingClaim: *app + globalMounts: + - path: /data/local + downloads: + type: nfs + server: "${NAS_URL}" + path: "${NAS_PATH}/paperless" + globalMounts: + - path: /data/nas diff --git a/kubernetes/apps/default/paperless/app/kustomization.yaml b/kubernetes/apps/default/paperless/app/kustomization.yaml new file mode 100644 index 00000000..82c34407 --- /dev/null +++ b/kubernetes/apps/default/paperless/app/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml + - ../../../../templates/volsync + - ../../../../templates/gatus/internal diff --git a/kubernetes/apps/default/paperless/ks.yaml b/kubernetes/apps/default/paperless/ks.yaml new file mode 100644 index 00000000..14f312cb --- /dev/null +++ b/kubernetes/apps/default/paperless/ks.yaml @@ -0,0 +1,27 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app paperless + namespace: flux-system +spec: + targetNamespace: default + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/default/paperless/app + prune: true + dependsOn: + - name: external-secrets-stores + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m + postBuild: + substitute: + APP: *app + VOLSYNC_CAPACITY: 1Gi diff --git a/kubernetes/apps/default/plex/app/helmrelease.yaml b/kubernetes/apps/default/plex/app/helmrelease.yaml new file mode 100644 index 00000000..e9f84927 --- /dev/null +++ b/kubernetes/apps/default/plex/app/helmrelease.yaml @@ -0,0 +1,125 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app plex +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + strategy: uninstall + dependsOn: + - name: intel-device-plugin-gpu + namespace: kube-system + - name: longhorn + namespace: storage + - name: volsync + namespace: storage + values: + controllers: + plex: + annotations: + reloader.stakater.com/auto: "true" + containers: + app: + image: + repository: ghcr.io/onedr0p/plex + tag: 1.40.0.7998-c29d4c0c8@sha256:7c4501799f0d5f4f94fcb95a8a47b883528354c779a182a9ae4af118a1fc6b10 + env: + TZ: "${TIMEZONE}" + PLEX_ADVERTISE_URL: "https://plex.${SECRET_DOMAIN}:443,http://${CLUSTER_LB_PLEX}:32400" + PLEX_NO_AUTH_NETWORKS: 192.168.10.0/24 + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /identity + port: 32400 + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + startup: + enabled: false + resources: + requests: + cpu: 100m + limits: + gpu.intel.com/i915: 1 + memory: 8Gi + pod: + nodeSelector: + intel.feature.node.kubernetes.io/gpu: "true" + securityContext: + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + supplementalGroups: [ 44, 107, 1000 ] + service: + app: + controller: plex + type: LoadBalancer + annotations: + io.cilium/lb-ipam-ips: "${CLUSTER_LB_PLEX}" + ports: + http: + port: 32400 + ingress: + app: + className: external + annotations: + external-dns.alpha.kubernetes.io/target: "external.${SECRET_DOMAIN}" + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + gethomepage.dev/enabled: "true" + gethomepage.dev/group: Media + gethomepage.dev/name: Plex + gethomepage.dev/icon: plex.png + gethomepage.dev/description: Media Player + gethomepage.dev/widget.type: plex + gethomepage.dev/widget.url: http://plex.default.svc.cluster.local:32400 + gethomepage.dev/widget.key: "{{HOMEPAGE_VAR_PLEX_TOKEN}}" + hosts: + - host: "plex.${SECRET_DOMAIN}" + paths: + - path: / + service: + identifier: app + port: http + persistence: + config: + existingClaim: *app + globalMounts: + - path: /config + # Separate PVC for cache to avoid backing up cache files + plex-cache: + existingClaim: plex-cache + globalMounts: + - path: /config/Library/Application Support/Plex Media Server/Cache + tmp: + type: emptyDir + transcode: + type: emptyDir + media: + type: nfs + server: "${NAS_URL}" + path: "${NAS_PATH}/media" + globalMounts: + - path: /media + readOnly: true diff --git a/kubernetes/apps/default/plex/app/kustomization.yaml b/kubernetes/apps/default/plex/app/kustomization.yaml new file mode 100644 index 00000000..a3f6169d --- /dev/null +++ b/kubernetes/apps/default/plex/app/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./pvc.yaml + - ./helmrelease.yaml + - ../../../../templates/volsync + - ../../../../templates/gatus/external diff --git a/kubernetes/apps/default/plex/app/pvc.yaml b/kubernetes/apps/default/plex/app/pvc.yaml new file mode 100644 index 00000000..54881821 --- /dev/null +++ b/kubernetes/apps/default/plex/app/pvc.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: plex-cache +spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 10Gi + storageClassName: longhorn + reclaimPolicy: Delete diff --git a/kubernetes/apps/default/plex/ks.yaml b/kubernetes/apps/default/plex/ks.yaml new file mode 100644 index 00000000..c8818812 --- /dev/null +++ b/kubernetes/apps/default/plex/ks.yaml @@ -0,0 +1,26 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app plex + namespace: flux-system +spec: + targetNamespace: default + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/default/plex/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m + postBuild: + substitute: + APP: *app + GATUS_PATH: /web/index.html + VOLSYNC_CAPACITY: 10Gi diff --git a/kubernetes/apps/default/prowlarr/app/externalsecret.yaml b/kubernetes/apps/default/prowlarr/app/externalsecret.yaml new file mode 100644 index 00000000..f69940c4 --- /dev/null +++ b/kubernetes/apps/default/prowlarr/app/externalsecret.yaml @@ -0,0 +1,33 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: prowlarr +spec: + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + target: + name: prowlarr-secret + template: + engineVersion: v2 + data: + PROWLARR__API_KEY: "{{ .PROWLARR_API_KEY }}" + PROWLARR__POSTGRES_HOST: &dbHost postgres-cluster-rw.database.svc.cluster.local + PROWLARR__POSTGRES_PORT: "5432" + PROWLARR__POSTGRES_USER: &dbUser "{{ .PROWLARR_POSTGRES_USER }}" + PROWLARR__POSTGRES_PASSWORD: &dbPass "{{ .PROWLARR_POSTGRES_PASSWORD }}" + PROWLARR__POSTGRES_MAIN_DB: prowlarr_main + PROWLARR__POSTGRES_LOG_DB: prowlarr_log + INIT_POSTGRES_DBNAME: prowlarr_main prowlarr_log + INIT_POSTGRES_HOST: *dbHost + INIT_POSTGRES_USER: *dbUser + INIT_POSTGRES_PASS: *dbPass + INIT_POSTGRES_SUPER_USER: "{{ .POSTGRES_SUPER_USER }}" + INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}" + dataFrom: + - extract: + key: prowlarr + - extract: + key: cloudnative-pg diff --git a/kubernetes/apps/default/prowlarr/app/helmrelease.yaml b/kubernetes/apps/default/prowlarr/app/helmrelease.yaml new file mode 100644 index 00000000..cea636ce --- /dev/null +++ b/kubernetes/apps/default/prowlarr/app/helmrelease.yaml @@ -0,0 +1,106 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app prowlarr + namespace: default +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + maxHistory: 2 + install: + createNamespace: true + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + strategy: uninstall + dependsOn: + - name: longhorn + namespace: storage + - name: volsync + namespace: storage + values: + defaultPodOptions: + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + controllers: + prowlarr: + annotations: + reloader.stakater.com/auto: "true" + initContainers: + init-db: + image: + repository: ghcr.io/onedr0p/postgres-init + tag: 16 + envFrom: &envFrom + - secretRef: + name: prowlarr-secret + containers: + app: + image: + repository: ghcr.io/onedr0p/prowlarr + tag: 1.17@sha256:da8fba1ef93d8013b86ea4d9fc4ccea7433db5ed79dc8d7fa12fe6d4374f0412 + env: + TZ: "${TIMEZONE}" + PROWLARR__INSTANCE_NAME: Prowlarr + PROWLARR__PORT: &port 8080 + PROWLARR__LOG_LEVEL: info + PROWLARR__AUTHENTICATION_METHOD: External + PROWLARR__THEME: dark + envFrom: *envFrom + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + memory: 500Mi + service: + app: + controller: prowlarr + ports: + http: + port: *port + ingress: + app: + className: internal + annotations: + gethomepage.dev/enabled: "true" + gethomepage.dev/icon: prowlarr.png + gethomepage.dev/name: Prowlarr + gethomepage.dev/group: Media + gethomepage.dev/description: Torrent/NZB Indexer Management + gethomepage.dev/widget.type: prowlarr + gethomepage.dev/widget.url: http://prowlarr.default.svc.cluster.local:8080 + gethomepage.dev/widget.key: "{{HOMEPAGE_VAR_PROWLARR_TOKEN}}" + hosts: + - host: "prowlarr.${SECRET_DOMAIN}" + paths: + - path: / + service: + identifier: app + port: http + persistence: + config: + existingClaim: *app + globalMounts: + - path: /config + downloads: + type: nfs + server: "${NAS_URL}" + path: "${NAS_PATH}" + globalMounts: + - path: /data diff --git a/kubernetes/apps/default/prowlarr/app/kustomization.yaml b/kubernetes/apps/default/prowlarr/app/kustomization.yaml new file mode 100644 index 00000000..82b7f826 --- /dev/null +++ b/kubernetes/apps/default/prowlarr/app/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml + - ../../../../templates/volsync + - ../../../../templates/gatus/internal diff --git a/kubernetes/apps/default/prowlarr/ks.yaml b/kubernetes/apps/default/prowlarr/ks.yaml new file mode 100644 index 00000000..e8cb51b5 --- /dev/null +++ b/kubernetes/apps/default/prowlarr/ks.yaml @@ -0,0 +1,28 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app prowlarr + namespace: flux-system +spec: + targetNamespace: default + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/default/prowlarr/app + prune: true + dependsOn: + - name: cloudnative-pg + - name: external-secrets-stores + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m + postBuild: + substitute: + APP: *app + VOLSYNC_CAPACITY: 1Gi diff --git a/kubernetes/apps/default/qbittorrent/app/helmrelease.yaml b/kubernetes/apps/default/qbittorrent/app/helmrelease.yaml new file mode 100644 index 00000000..fabe66a4 --- /dev/null +++ b/kubernetes/apps/default/qbittorrent/app/helmrelease.yaml @@ -0,0 +1,157 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app qbittorrent + namespace: default +spec: + interval: 30m + driftDetection: + mode: enabled + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + maxHistory: 2 + install: + createNamespace: true + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + dependsOn: + - name: longhorn + namespace: storage + - name: volsync + namespace: storage + values: + defaultPodOptions: + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + controllers: + qbittorrent: + initContainers: + init-categories: + image: + repository: docker.io/library/alpine + tag: 3.19.1 + command: + - "sh" + - "-c" + - | + mkdir -p /config/qBittorrent/ && + echo '{"movies": {"save_path": "/data/torrents/movies"}, "tv": {"save_path": "/data/torrents/tv"}}' > /config/qBittorrent/categories.json + containers: + app: + image: + repository: ghcr.io/onedr0p/qbittorrent + tag: 4.6.4@sha256:53ead5ab43027d04efc5d52740aa02308a88d6b4a6eaa90cf6fd2e94fc11ba17 + env: + TZ: "${TIMEZONE}" + QBITTORRENT__PORT: &port 8080 + QBITTORRENT__BT_PORT: &port-bt 58462 + QBT_Preferences__Downloads__SavePath: /data/torrents + QBT_Preferences__WebUI__Password_PBKDF2: "@ByteArray(iXranQkCEwRqp96g0yKHHA==:2ujiTbO+e12jHqzAJccPqjrBcVmRhaTSrrMi27VRiv2rbWk50twuRcHBCc8jsX/J/oZ8JQnBzHFjNzZ2bvpZkQ==)" + QBT_Preferences__WebUI__AlternativeUIEnabled: 'true' + QBT_Preferences__WebUI__RootFolder: '/add-ons/VueTorrent' + QBT_Preferences__WebUI__LocalHostAuth: false + QBT_Preferences__WebUI__UseUPNP: false + QBT_Preferences__WebUI__CSRFProtection: false + QBT_Preferences__WebUI__ClickjackingProtection: false + QBT_Preferences__WebUI__AuthSubnetWhitelistEnabled: true + QBT_Preferences__WebUI__AuthSubnetWhitelist: |- + 10.42.0.0/16, 192.168.10.0/24, 192.168.20.0/24 + QBT_BitTorrent__Session__AlternativeGlobalDLSpeedLimit: 20000 + QBT_BitTorrent__Session__AlternativeGlobalUPSpeedLimit: 0 + QBT_BitTorrent__Session__GlobalUPSpeedLimit: 0 + QBT_BitTorrent__Session__GlobalDLSpeedLimit: 2500 + QBT_BitTorrent__Session__UseAlternativeGlobalSpeedLimit: false + QBT_BitTorrent__Session__BandwidthSchedulerEnabled: true + QBT_BitTorrent__Session__DisableAutoTMMByDefault: false + QBT_BitTorrent__Session__TempPathEnabled: false + QBT_BitTorrent__Session__DisableAutoTMMTriggers__CategorySavePathChanged: false + QBT_BitTorrent__Session__DisableAutoTMMTriggers__DefaultSavePathChanged: false + QBT_BitTorrent__Scheduler__days: true + QBT_BitTorrent__Scheduler__start_time: '@Variant(\0\0\0\xf\0\0\0\0)' + QBT_BitTorrent__Scheduler__end_time: '@Variant(\0\0\0\xf\x1\xb7t\0)' + resources: + requests: + cpu: 500m + memory: 1Gi + limits: + memory: 3Gi + secondary: + dependsOn: app + image: + repository: registry.k8s.io/git-sync/git-sync + tag: v4.2.3 + args: + - --repo=https://github.com/WDaan/VueTorrent + - --ref=latest-release + - --period=86400s + - --root=/add-ons + resources: + requests: + cpu: 10m + memory: 25Mi + limits: + memory: 50Mi + service: + app: + controller: qbittorrent + type: LoadBalancer + annotations: + io.cilium/lb-ipam-ips: "${CLUSTER_LB_QBITTORRENT}" + ports: + http: + port: *port + bittorrent: + enabled: true + port: *port-bt + protocol: TCP + ingress: + app: + className: internal + annotations: + gethomepage.dev/enabled: "true" + gethomepage.dev/group: Media + gethomepage.dev/name: qBittorrent + gethomepage.dev/icon: qbittorrent.png + gethomepage.dev/description: Torrent Client + gethomepage.dev/widget.type: qbittorrent + gethomepage.dev/widget.url: http://qbittorrent.default.svc.cluster.local:8080 + gethomepage.dev/widget.username: "{{HOMEPAGE_VAR_QBITTORRENT_USERNAME}}" + gethomepage.dev/widget.password: "{{HOMEPAGE_VAR_QBITTORRENT_PASSWORD}}" + hosts: + - host: "torrent.${SECRET_DOMAIN}" + paths: + - path: / + service: + identifier: app + port: http + persistence: + config: + existingClaim: *app + globalMounts: + - path: /config + downloads: + type: nfs + server: "${NAS_URL}" + path: "${NAS_PATH}" + globalMounts: + - path: /data + add-ons: + enabled: true + type: emptyDir + globalMounts: + - path: /add-ons diff --git a/kubernetes/apps/default/qbittorrent/app/kustomization.yaml b/kubernetes/apps/default/qbittorrent/app/kustomization.yaml new file mode 100644 index 00000000..82c34407 --- /dev/null +++ b/kubernetes/apps/default/qbittorrent/app/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml + - ../../../../templates/volsync + - ../../../../templates/gatus/internal diff --git a/kubernetes/apps/default/qbittorrent/ks.yaml b/kubernetes/apps/default/qbittorrent/ks.yaml new file mode 100644 index 00000000..7e093546 --- /dev/null +++ b/kubernetes/apps/default/qbittorrent/ks.yaml @@ -0,0 +1,25 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app qbittorrent + namespace: flux-system +spec: + targetNamespace: default + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/default/qbittorrent/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m + postBuild: + substitute: + APP: *app + VOLSYNC_CAPACITY: 2Gi diff --git a/kubernetes/apps/default/radarr/app/externalsecret.yaml b/kubernetes/apps/default/radarr/app/externalsecret.yaml new file mode 100644 index 00000000..fb563fe7 --- /dev/null +++ b/kubernetes/apps/default/radarr/app/externalsecret.yaml @@ -0,0 +1,33 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: radarr +spec: + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + target: + name: radarr-secret + template: + engineVersion: v2 + data: + RADARR__API_KEY: "{{ .RADARR_API_KEY }}" + RADARR__POSTGRES_HOST: &dbHost postgres-cluster-rw.database.svc.cluster.local + RADARR__POSTGRES_PORT: "5432" + RADARR__POSTGRES_USER: &dbUser "{{ .RADARR_POSTGRES_USER }}" + RADARR__POSTGRES_PASSWORD: &dbPass "{{ .RADARR_POSTGRES_PASSWORD }}" + RADARR__POSTGRES_MAIN_DB: radarr_main + RADARR__POSTGRES_LOG_DB: radarr_log + INIT_POSTGRES_DBNAME: radarr_main radarr_log + INIT_POSTGRES_HOST: *dbHost + INIT_POSTGRES_USER: *dbUser + INIT_POSTGRES_PASS: *dbPass + INIT_POSTGRES_SUPER_USER: "{{ .POSTGRES_SUPER_USER }}" + INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}" + dataFrom: + - extract: + key: cloudnative-pg + - extract: + key: radarr diff --git a/kubernetes/apps/default/radarr/app/helmrelease.yaml b/kubernetes/apps/default/radarr/app/helmrelease.yaml new file mode 100644 index 00000000..f5313d44 --- /dev/null +++ b/kubernetes/apps/default/radarr/app/helmrelease.yaml @@ -0,0 +1,107 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app radarr + namespace: default +spec: + interval: 30m + driftDetection: + mode: enabled + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + maxHistory: 2 + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: longhorn + namespace: storage + - name: volsync + namespace: storage + values: + defaultPodOptions: + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + controllers: + radarr: + annotations: + reloader.stakater.com/auto: "true" + initContainers: + init-db: + image: + repository: ghcr.io/onedr0p/postgres-init + tag: 16 + envFrom: &envFrom + - secretRef: + name: radarr-secret + containers: + app: + image: + repository: ghcr.io/onedr0p/radarr-develop + tag: 5.6.0.8846@sha256:b3137a2b451683d834627bf6997460f26eb864757b1ffb5eb6544a8ba6d432ef + env: + TZ: "${TIMEZONE}" + RADARR__INSTANCE_NAME: Radarr + RADARR__PORT: &port 8080 + RADARR__APPLICATION_URL: "https://radarr.${SECRET_DOMAIN}" + RADARR__LOG_LEVEL: info + RADARR__THEME: dark + envFrom: *envFrom + resources: + requests: + cpu: 500m + memory: 500Mi + limits: + memory: 2000Mi + service: + app: + controller: radarr + ports: + http: + port: *port + ingress: + app: + className: internal + annotations: + gethomepage.dev/enabled: "true" + gethomepage.dev/icon: radarr.png + gethomepage.dev/name: Radarr + gethomepage.dev/group: Media + gethomepage.dev/description: Movie Downloads + gethomepage.dev/widget.type: radarr + gethomepage.dev/widget.url: http://radarr.default.svc.cluster.local:8080 + gethomepage.dev/widget.key: "{{HOMEPAGE_VAR_RADARR_TOKEN}}" + hosts: + - host: "radarr.${SECRET_DOMAIN}" + paths: + - path: / + service: + identifier: app + port: http + persistence: + config: + existingClaim: *app + globalMounts: + - path: /config + downloads: + type: nfs + server: "${NAS_URL}" + path: "${NAS_PATH}" + globalMounts: + - path: /data diff --git a/kubernetes/apps/default/radarr/app/kustomization.yaml b/kubernetes/apps/default/radarr/app/kustomization.yaml new file mode 100644 index 00000000..82b7f826 --- /dev/null +++ b/kubernetes/apps/default/radarr/app/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml + - ../../../../templates/volsync + - ../../../../templates/gatus/internal diff --git a/kubernetes/apps/default/radarr/ks.yaml b/kubernetes/apps/default/radarr/ks.yaml new file mode 100644 index 00000000..7bf84148 --- /dev/null +++ b/kubernetes/apps/default/radarr/ks.yaml @@ -0,0 +1,28 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app radarr + namespace: flux-system +spec: + targetNamespace: default + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/default/radarr/app + prune: true + dependsOn: + - name: cloudnative-pg + - name: external-secrets-stores + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m + postBuild: + substitute: + APP: *app + VOLSYNC_CAPACITY: 1Gi diff --git a/kubernetes/apps/default/sonarr/app/externalsecret.yaml b/kubernetes/apps/default/sonarr/app/externalsecret.yaml new file mode 100644 index 00000000..3b09d9c9 --- /dev/null +++ b/kubernetes/apps/default/sonarr/app/externalsecret.yaml @@ -0,0 +1,32 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: sonarr +spec: + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + target: + name: sonarr-secret + template: + engineVersion: v2 + data: + SONARR__API_KEY: "{{ .SONARR_API_KEY }}" + SONARR__POSTGRES__HOST: &dbHost postgres-cluster-rw.database.svc.cluster.local + SONARR__POSTGRES__PORT: "5432" + SONARR__POSTGRES__USER: &dbUser "{{ .SONARR_POSTGRES_USER }}" + SONARR__POSTGRES__PASSWORD: &dbPass "{{ .SONARR_POSTGRES_PASSWORD }}" + SONARR__POSTGRES__MAINDB: &dbName sonarr_main + INIT_POSTGRES_DBNAME: *dbName + INIT_POSTGRES_HOST: *dbHost + INIT_POSTGRES_USER: *dbUser + INIT_POSTGRES_PASS: *dbPass + INIT_POSTGRES_SUPER_USER: "{{ .POSTGRES_SUPER_USER }}" + INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}" + dataFrom: + - extract: + key: cloudnative-pg + - extract: + key: sonarr diff --git a/kubernetes/apps/default/sonarr/app/helmrelease.yaml b/kubernetes/apps/default/sonarr/app/helmrelease.yaml new file mode 100644 index 00000000..bf022fca --- /dev/null +++ b/kubernetes/apps/default/sonarr/app/helmrelease.yaml @@ -0,0 +1,109 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app sonarr + namespace: default +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + maxHistory: 2 + install: + createNamespace: true + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + strategy: uninstall + dependsOn: + - name: longhorn + namespace: storage + - name: volsync + namespace: storage + values: + defaultPodOptions: + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + controllers: + sonarr: + annotations: + reloader.stakater.com/auto: "true" + initContainers: + init-db: + image: + repository: ghcr.io/onedr0p/postgres-init + tag: 16 + envFrom: &envFrom + - secretRef: + name: sonarr-secret + containers: + app: + image: + repository: ghcr.io/onedr0p/sonarr-develop + tag: 4.0.4.1668@sha256:0b89ee847f0b7e782386be22f964de18046e828c47d0d34eba5b3651c361eaa4 + env: + TZ: "${TIMEZONE}" + SONARR__AUTH__METHOD: External + SONARR__AUTH__REQUIRED: DisabledForLocalAddresses + SONARR__APP__INSTANCENAME: Sonarr + SONARR__SERVER__PORT: &port 8080 + SONARR__SERVER__URLBASE: "https://sonarr.${SECRET_DOMAIN}" + SONARR__LOG__DBENABLED: "False" + SONARR__LOG__LEVEL: info + SONARR__APP__THEME: dark + envFrom: *envFrom + resources: + requests: + cpu: 500m + memory: 500Mi + limits: + memory: 2000Mi + service: + app: + controller: sonarr + ports: + http: + port: *port + ingress: + app: + className: internal + annotations: + gethomepage.dev/enabled: "true" + gethomepage.dev/icon: sonarr.png + gethomepage.dev/name: Sonarr + gethomepage.dev/group: Media + gethomepage.dev/description: TV Downloads + gethomepage.dev/widget.type: sonarr + gethomepage.dev/widget.url: http://sonarr.default.svc.cluster.local:8080 + gethomepage.dev/widget.key: "{{HOMEPAGE_VAR_SONARR_TOKEN}}" + hosts: + - host: "sonarr.${SECRET_DOMAIN}" + paths: + - path: / + service: + identifier: app + port: http + persistence: + config: + existingClaim: *app + globalMounts: + - path: /config + downloads: + type: nfs + server: "${NAS_URL}" + path: "${NAS_PATH}" + globalMounts: + - path: /data diff --git a/kubernetes/apps/default/sonarr/app/kustomization.yaml b/kubernetes/apps/default/sonarr/app/kustomization.yaml new file mode 100644 index 00000000..5a11297d --- /dev/null +++ b/kubernetes/apps/default/sonarr/app/kustomization.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml + - ../../../../templates/volsync + - ../../../../templates/gatus/internal + diff --git a/kubernetes/apps/default/sonarr/ks.yaml b/kubernetes/apps/default/sonarr/ks.yaml new file mode 100644 index 00000000..0c4b222a --- /dev/null +++ b/kubernetes/apps/default/sonarr/ks.yaml @@ -0,0 +1,28 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app sonarr + namespace: flux-system +spec: + targetNamespace: default + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/default/sonarr/app + prune: true + dependsOn: + - name: cloudnative-pg + - name: external-secrets-stores + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m + postBuild: + substitute: + APP: *app + VOLSYNC_CAPACITY: 1Gi diff --git a/kubernetes/apps/external-secrets/alert.yaml b/kubernetes/apps/external-secrets/alert.yaml new file mode 100644 index 00000000..ba1970d2 --- /dev/null +++ b/kubernetes/apps/external-secrets/alert.yaml @@ -0,0 +1,29 @@ +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: external-secrets +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: external-secrets +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/apps/external-secrets/external-secrets/app/helmrelease.yaml b/kubernetes/apps/external-secrets/external-secrets/app/helmrelease.yaml new file mode 100644 index 00000000..51b93c73 --- /dev/null +++ b/kubernetes/apps/external-secrets/external-secrets/app/helmrelease.yaml @@ -0,0 +1,37 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: external-secrets +spec: + interval: 30m + chart: + spec: + chart: external-secrets + version: 0.9.18 + sourceRef: + kind: HelmRepository + name: external-secrets + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + installCRDs: true + serviceMonitor: + enabled: true + interval: 1m + webhook: + serviceMonitor: + enabled: true + interval: 1m + certController: + serviceMonitor: + enabled: true + interval: 1m diff --git a/kubernetes/apps/external-secrets/external-secrets/app/kustomization.yaml b/kubernetes/apps/external-secrets/external-secrets/app/kustomization.yaml new file mode 100644 index 00000000..076bf83c --- /dev/null +++ b/kubernetes/apps/external-secrets/external-secrets/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml + - ./onepassword-connect.secret.sops.yaml diff --git a/kubernetes/apps/external-secrets/external-secrets/app/onepassword-connect.secret.sops.yaml b/kubernetes/apps/external-secrets/external-secrets/app/onepassword-connect.secret.sops.yaml new file mode 100644 index 00000000..40f873e5 --- /dev/null +++ b/kubernetes/apps/external-secrets/external-secrets/app/onepassword-connect.secret.sops.yaml @@ -0,0 +1,28 @@ +# yamllint disable +apiVersion: v1 +kind: Secret +metadata: + name: onepassword-connect-secret +stringData: + 1password-credentials.json: ENC[AES256_GCM,data:b1M1F133xoEeInGFZL0Gv+atGJfv8yLJhsOS2tvhdYmzcZO6Zt+5gl7PTQZNkbsdJqCTFfL8UyusElHH1pFOOA6SQGKPdCl37x/9DAvZ8dw2JpS5zRp8wi0VDEGSiv1U62HXi30ami+71rAI2DYGZUZJ3EKINsY6k9SEf1kARxlhJQ6/ilGsmCD1oogOJjkWWLz73Jp7pVW8jarFXRxIdG0ucy53smzuZ5GMzv5XZFcFXL75M4Y72IqMLdlLbM3q78G1o8k3pqX7uZ5ZSAMH2nvPLh9b8n/S28dTlV6C8KQzEqXy5Rp0lrOv2NuK8atLyy0IQsMp0dzGgly5Y2LOlW16be2JZ2IRb/AOZ6SJxsMjZj/d2dwsvCocp+vEd6LPC9atr5/6MDn1nV/WuHSjACcIHHFHflOX4tWvW0tPqDLa3up0h3z5omwSCXwVMXkK80hMD0F+T5iC/eDyMeeFGNYxpQuYBp/LoAxdVhemetIBQRBJwjfGHutrk9OpNs063sbhUNSKFLx0ZdevazbU/DyO3DTooO46jNhYfuIzuTPJpiKjjDKbXcX+bXUC96GE/zZR0QLcuDHLVaOPYFF9oXbxTW8rbJDbTy6NTDoXqhVeqNGIOLQdX/0eyuoo3jWSAMtCYps8Tv7cufJYpnscPT4adkBrBetHHPd4H5Q8/4JxceVnf852PTaUBT8OeB7tAI7/Dj2JfOi3RKqZGBRQx1LJ8bQz2aA2b6oiYIPwquTUKLp1UtO4CWGCN0EtkRwm5G6r53d1jLNnUV5eNJ1sk0WTluDQpxRgQcXDrW7jEgGNbWcZniJk/IiL43FBy5YH4JRotGJZTO043VHU+i6kXoFbY8kQ8Dv018u6NlEYGuSEkOdb3gJYzXRS0dt+65tR5+ZtcnJzK6usv+rwpvUm3e6fV5KeGvSb4P6GRwAZesW+vC7XzQ6EF5s8yQlbrum/ar8+oo7WIf+D51REvrEBLj1Dm5LxicQqFiI6HqfIq0DTY5jTEykBDn6RP+29zl0AEulgv5Kp8hmtrXfDSwvmakhHy88ERyFSzP+JKqD9rl8CCLkCLnshJBcJAqij7G1tsfrTiXNdaX4aSsUB/Ld5ufq9tt6ovi4yI3NuNBJ5o5ym8FjI5Y+LdfTbH/qLMwO08kXMGU6CrYk4KGNjYpcJxi0AJ04Yhpxexnc2+ERyXVmBDIISfqnubh+LuzTQJ2L6oGnuSxvqXx1NmZevZoBnJQe7hAJU4RnkPKD86GA8XPOcmbeZmtXn3K/v2iKW03VqG7rHth0Uo373lmcnmBqcsfRJOrdULopcmZBQo+F83vY9mXEZ5TGt4QskKYglwH//MUDQ1hLordJtAmL0p+d2FUb3NOnpw3ngCwUFpJZiYonnQ3Uybau56zcqD/2zWUUciB9+u3hImRvTiqk3PDZ309CmR7BSt+Rosh/h1ufFCL3qnWDrFmyesdn7qeRv3Gvr/TybmTPi13Ti3RL6Wa2g3dMniLIEvVGPSapwmpipThZ3tPgISC+hHbrjwRyx7R2XTvVYcGigEeTJErWtSK92Sa+HWZt20OB1/0bOrqcpEcZFXQG2GEaYkzOeicRyOlGE8Wwk0TKGmCnaAGnOXa3MBLxq06LD/8n6y5KW86zgbzWN6GTIVTslNZRrZPzSrY9mXsJ/s0zDN9ffb+MLK95+Qwa8EHAy727qf8jbdXaO4BmZRBdqNzUgSVtJDLcBTP86jjz+smedMMr1s4ejC0rg65l7IGDseZAR7AdtPcAun5fS5JSGHyKxMUYVBYpJ3Dpw7najBg9zHcB/IC1//oOcmRdNLNOrGK7IFTsrhKq7LzGj5Easy6wu8plumGPvE+5CJVQzMtrMJNJkBwCEaxF/MxfCEyP/YZzJDGhwCOuPEA+5Va0VQi4rIENrpFiELT3di6t88if0+xE=,iv:TeTAXTppPkhOMcF+h4t8Zbfz5pgAbEgvBLCiPVpp8PE=,tag:Br9vGQOu17Y2TNMqB+PdMQ==,type:str] + token: ENC[AES256_GCM,data:s9Ke8hrDLUfExiQKAGCFE6xL8jWBhqejiCB3iiTmQ8tZ70o2lZwA3vUpALiwLsH6gkHAWSu7FbM11kMChdLFY23yrQ4p03eLK5Oe7xZTvhwZiXMH9kpDt61hXEdA6kcrcR75OTC4wzHxlxq8rVuQ48PEZPUErWw1EgauLF/sMWnttQOgvQndBa/PDoVL0McKzWBydPMO2Duhwe5v1WO5h7JSHGSQyI/Ddf+5bejwNFbh02wdJYyAkzRq17ECSwS1J9vUCUHs7Ack1IiebFVf8CFb+e9FnnWsaT50quhKm1yMfyVUDH41+UtScxepmeX3zPjNMLfrlcNTiWj+h0bPr4ZGYK+TQqhyVtIf91G8Omv8HjfG3og90QiQ7qpU+VfKP72x/vN7ySAC3hWLBj9yZJCZtFSghuw2fmbPZPUTDZBzFpSPKAdw+LmWfz1EmpgqqQK88JdEuhvHCPk+8ERokFCICkL2oFqZfHR8I3pJATUTOV3Pi/Pfy40seG3ilofdYCR20EpDMgVUK9EuGemc7VAx0RtGRkbQUaOAsu2/PmFju1owrDK2JBv0ouLdhUluIr1oUSPcVo9Pq4EsOUd+9/7+1GEFyqwOMDO9eZ0IxAHQH82PcYMhoBAJLanZVjlU7Aqp3DQv/OO/iOiBLmPZ8eQjNCLgjKKf90gSRR2qiQpfC/9xX6aKuMROL2RODLAnVixhWbcC7uXrlTWWndzkhCzr6IMyT52DiehpDPJ6jRSl4KKrf6NbuvB0XuhfK+OgbN8/Kjyacx8y7ToQgSF/H7vbhNvok9Ib6FlZo6Pq6kXbjtqOsQ6eRbKcQ0k7kBf2PFj03gYRjCVg8t2Rv4HZtDyW,iv:E2+tWKberC+zPnllwResWdCL7xY7RcfpQhOvKazWxYw=,tag:edoa/jOkaqKiOGIMIr4bnw==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1k5xl02aujw4rsgghnnd0sdymmwd095w5nqgjvf76warwrdc0uqpqsm2x8m + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBKWkdqL0VhNTNUUkgxNExL + Qk9rZThFZFNTQmZIc3VVZG5HNGgyMmhkQmpRClVkZDk1WkRCV0l4MUhTRVVoemJM + Q1ltdGllV0VPTzA2bTh4NTBobW5sQncKLS0tIG5rMmlQTTZ5cVkvQ2h3L0hCYURy + L3dJcnh3ZmxvKzlQUGtFKy9hc2U2MEkK15zbBy2Q/TKP6io1Ubuj8NIQBG0mawhG + edYrl4XIG94DFsUHiBkjW4Ef3XHjoFOIIqjCmbrKyNzHv4lEK+LoJg== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2024-04-23T22:20:53Z" + mac: ENC[AES256_GCM,data:2OAPQUX5yEl4VnVyR2wn65Lq4nDqB57Di7FjSqojujqVU1fYtqBTLlVaNqbBRS6ZFuqkzPbUFBl2KWA9SAJsMjJ2zXkJQ9ch0pB/NCN/j/AlKvDAfhvnJtbKEEd8bWVwZ+AIOJ5ophSDp1ufMv8fanq9Wee+SOwxTrYxH4N/u6k=,iv:20yAJAIB8VYlO8YA818zWFTDBxbvsGcaeMADpufWm3o=,tag:au54g+6PEq1eYZxRCdDlSQ==,type:str] + pgp: [] + encrypted_regex: ^(data|stringData)$ + version: 3.8.1 diff --git a/kubernetes/apps/external-secrets/external-secrets/ks.yaml b/kubernetes/apps/external-secrets/external-secrets/ks.yaml new file mode 100644 index 00000000..8dd4a51b --- /dev/null +++ b/kubernetes/apps/external-secrets/external-secrets/ks.yaml @@ -0,0 +1,44 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app external-secrets + namespace: flux-system +spec: + targetNamespace: external-secrets + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/external-secrets/external-secrets/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app external-secrets-stores + namespace: flux-system +spec: + targetNamespace: external-secrets + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets + path: ./kubernetes/apps/external-secrets/external-secrets/store + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/external-secrets/external-secrets/store/clustersecretstore.yaml b/kubernetes/apps/external-secrets/external-secrets/store/clustersecretstore.yaml new file mode 100644 index 00000000..18b8222c --- /dev/null +++ b/kubernetes/apps/external-secrets/external-secrets/store/clustersecretstore.yaml @@ -0,0 +1,18 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/clustersecretstore_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ClusterSecretStore +metadata: + name: onepassword-connect +spec: + provider: + onepassword: + connectHost: http://onepassword-connect.external-secrets.svc.cluster.local + vaults: + homelab: 1 + auth: + secretRef: + connectTokenSecretRef: + name: onepassword-connect-secret + key: token + namespace: external-secrets diff --git a/kubernetes/apps/external-secrets/external-secrets/store/helmrelease.yaml b/kubernetes/apps/external-secrets/external-secrets/store/helmrelease.yaml new file mode 100644 index 00000000..de507d18 --- /dev/null +++ b/kubernetes/apps/external-secrets/external-secrets/store/helmrelease.yaml @@ -0,0 +1,137 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: onepassword-connect +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + controllers: + onepassword-connect: + strategy: RollingUpdate + annotations: + reloader.stakater.com/auto: "true" + containers: + api: + image: + repository: docker.io/1password/connect-api + tag: 1.7.2@sha256:6aa94cf713f99c0fa58c12ffdd1b160404b4c13a7f501a73a791aa84b608c5a1 + env: + XDG_DATA_HOME: &configDir /config + OP_HTTP_PORT: &apiPort 80 + OP_BUS_PORT: 11220 + OP_BUS_PEERS: localhost:11221 + OP_SESSION: + valueFrom: + secretKeyRef: + name: onepassword-connect-secret + key: 1password-credentials.json + probes: + liveness: + enabled: true + custom: true + spec: + httpGet: + path: /heartbeat + port: *apiPort + initialDelaySeconds: 15 + periodSeconds: 30 + failureThreshold: 3 + readiness: + enabled: true + custom: true + spec: + httpGet: + path: /health + port: *apiPort + initialDelaySeconds: 15 + securityContext: &securityContext + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: &resources + requests: + cpu: 10m + limits: + memory: 256M + sync: + image: + repository: docker.io/1password/connect-sync + tag: 1.7.2@sha256:fe527ed9d81f193d8dfbba4140d61f9e8c8dceb0966b3009259087504e5ff79c + env: + XDG_DATA_HOME: *configDir + OP_HTTP_PORT: &syncPort 8081 + OP_BUS_PORT: 11221 + OP_BUS_PEERS: localhost:11220 + OP_SESSION: + valueFrom: + secretKeyRef: + name: onepassword-connect-secret + key: 1password-credentials.json + probes: + liveness: + enabled: true + custom: true + spec: + httpGet: + path: /heartbeat + port: *syncPort + initialDelaySeconds: 15 + periodSeconds: 30 + failureThreshold: 3 + readiness: + enabled: true + custom: true + spec: + httpGet: + path: /health + port: *syncPort + initialDelaySeconds: 15 + securityContext: *securityContext + resources: *resources + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 999 + runAsGroup: 999 + fsGroup: 999 + fsGroupChangePolicy: OnRootMismatch + seccompProfile: { type: RuntimeDefault } + service: + app: + controller: onepassword-connect + ports: + http: + port: *apiPort + ingress: + app: + className: internal + hosts: + - host: "{{ .Release.Name }}.${SECRET_DOMAIN}" + paths: + - path: / + service: + identifier: app + port: http + persistence: + config: + type: emptyDir + globalMounts: + - path: *configDir diff --git a/kubernetes/apps/external-secrets/external-secrets/store/kustomization.yaml b/kubernetes/apps/external-secrets/external-secrets/store/kustomization.yaml new file mode 100644 index 00000000..65df29ef --- /dev/null +++ b/kubernetes/apps/external-secrets/external-secrets/store/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml + - ./clustersecretstore.yaml diff --git a/kubernetes/apps/external-secrets/kustomization.yaml b/kubernetes/apps/external-secrets/kustomization.yaml new file mode 100644 index 00000000..8b5a7e34 --- /dev/null +++ b/kubernetes/apps/external-secrets/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./external-secrets/ks.yaml diff --git a/kubernetes/apps/external-secrets/namespace.yaml b/kubernetes/apps/external-secrets/namespace.yaml new file mode 100644 index 00000000..26718c2a --- /dev/null +++ b/kubernetes/apps/external-secrets/namespace.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: external-secrets + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/flux-system/addons/app/kustomization.yaml b/kubernetes/apps/flux-system/addons/app/kustomization.yaml new file mode 100644 index 00000000..7b48edcd --- /dev/null +++ b/kubernetes/apps/flux-system/addons/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./monitoring + - ./webhooks diff --git a/kubernetes/apps/flux-system/addons/app/monitoring/kustomization.yaml b/kubernetes/apps/flux-system/addons/app/monitoring/kustomization.yaml new file mode 100644 index 00000000..247c0374 --- /dev/null +++ b/kubernetes/apps/flux-system/addons/app/monitoring/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: flux-system +resources: + - ./podmonitor.yaml + - ./prometheusrule.yaml diff --git a/kubernetes/apps/flux-system/addons/app/monitoring/podmonitor.yaml b/kubernetes/apps/flux-system/addons/app/monitoring/podmonitor.yaml new file mode 100644 index 00000000..8d09c127 --- /dev/null +++ b/kubernetes/apps/flux-system/addons/app/monitoring/podmonitor.yaml @@ -0,0 +1,32 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/monitoring.coreos.com/podmonitor_v1.json +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: flux-system + namespace: flux-system + labels: + app.kubernetes.io/part-of: flux + app.kubernetes.io/component: monitoring +spec: + namespaceSelector: + matchNames: + - flux-system + selector: + matchExpressions: + - key: app + operator: In + values: + - helm-controller + - source-controller + - kustomize-controller + - notification-controller + - image-automation-controller + - image-reflector-controller + podMetricsEndpoints: + - port: http-prom + relabelings: + # Ref: https://github.com/prometheus-operator/prometheus-operator/issues/4816 + - sourceLabels: [__meta_kubernetes_pod_phase] + action: keep + regex: Running diff --git a/kubernetes/apps/flux-system/addons/app/monitoring/prometheusrule.yaml b/kubernetes/apps/flux-system/addons/app/monitoring/prometheusrule.yaml new file mode 100644 index 00000000..4257e56d --- /dev/null +++ b/kubernetes/apps/flux-system/addons/app/monitoring/prometheusrule.yaml @@ -0,0 +1,32 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/monitoring.coreos.com/prometheusrule_v1.json +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: flux-rules + namespace: flux-system +spec: + groups: + - name: flux.rules + rules: + - alert: FluxComponentAbsent + annotations: + summary: Flux component has disappeared from Prometheus target discovery. + expr: | + absent(up{job=~".*flux-system.*"} == 1) + for: 15m + labels: + severity: critical + - alert: FluxReconciliationFailure + annotations: + summary: >- + {{ $labels.kind }} {{ $labels.namespace }}/{{ $labels.name }} reconciliation + has been failing for more than 15 minutes. + expr: | + max(gotk_reconcile_condition{status="False",type="Ready"}) by (namespace, name, kind) + + + on(namespace, name, kind) (max(gotk_reconcile_condition{status="Deleted"}) + by (namespace, name, kind)) * 2 == 1 + for: 15m + labels: + severity: critical diff --git a/kubernetes/apps/flux-system/addons/app/webhooks/github/ingress.yaml b/kubernetes/apps/flux-system/addons/app/webhooks/github/ingress.yaml new file mode 100644 index 00000000..e20604f0 --- /dev/null +++ b/kubernetes/apps/flux-system/addons/app/webhooks/github/ingress.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: flux-webhook + annotations: + external-dns.alpha.kubernetes.io/target: "external.${SECRET_DOMAIN}" +spec: + ingressClassName: external + rules: + - host: "flux-webhook.${SECRET_DOMAIN}" + http: + paths: + - path: /hook/ + pathType: Prefix + backend: + service: + name: webhook-receiver + port: + number: 80 diff --git a/kubernetes/apps/flux-system/addons/app/webhooks/github/kustomization.yaml b/kubernetes/apps/flux-system/addons/app/webhooks/github/kustomization.yaml new file mode 100644 index 00000000..5461805c --- /dev/null +++ b/kubernetes/apps/flux-system/addons/app/webhooks/github/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./secret.sops.yaml + - ./ingress.yaml + - ./receiver.yaml diff --git a/kubernetes/apps/flux-system/addons/app/webhooks/github/receiver.yaml b/kubernetes/apps/flux-system/addons/app/webhooks/github/receiver.yaml new file mode 100644 index 00000000..cca5931b --- /dev/null +++ b/kubernetes/apps/flux-system/addons/app/webhooks/github/receiver.yaml @@ -0,0 +1,25 @@ +--- +apiVersion: notification.toolkit.fluxcd.io/v1 +kind: Receiver +metadata: + name: github-receiver +spec: + type: github + events: + - ping + - push + secretRef: + name: github-webhook-token-secret + resources: + - apiVersion: source.toolkit.fluxcd.io/v1 + kind: GitRepository + name: home-kubernetes + namespace: flux-system + - apiVersion: kustomize.toolkit.fluxcd.io/v1 + kind: Kustomization + name: cluster + namespace: flux-system + - apiVersion: kustomize.toolkit.fluxcd.io/v1 + kind: Kustomization + name: cluster-apps + namespace: flux-system diff --git a/kubernetes/apps/flux-system/addons/app/webhooks/github/secret.sops.yaml b/kubernetes/apps/flux-system/addons/app/webhooks/github/secret.sops.yaml new file mode 100644 index 00000000..520439cc --- /dev/null +++ b/kubernetes/apps/flux-system/addons/app/webhooks/github/secret.sops.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Secret +metadata: + name: github-webhook-token-secret +stringData: + token: ENC[AES256_GCM,data:cmOlej/6DM4ZPp4rwwJi/77Xde4z1cpN,iv:sVqWqBUqMh3hDSLlYwMqCM+53SmBdwelBObn2VfCz6A=,tag:7GRWcZhdAPuD+LMQEhn9lw==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1k5xl02aujw4rsgghnnd0sdymmwd095w5nqgjvf76warwrdc0uqpqsm2x8m + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBWVUxkY3ZIRVVuYzhPU0s5 + K3VkQy9hTytxM3lFallMLzJvYnRDMS9MWmhrCm5JU3FMd3U5aGk0NDlhcUc3SDhm + VUFLTnJDUTdQLzVQdjB5MElHRmdJdjAKLS0tIHVFT2o0MVZkNGlqTWdrU3B6YXJR + R3RUV2Q1OXBFZ3gvdHk4SWkwVEhxZFkKkKlAIGgJEI8Vfkxo2Syhe3LV1/wXJQKU + Vh/tZjp/L1gbj3GA4Yunx/wQS/9RNJU7CI5+OV57Lk+NKI7/pVS+DQ== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2024-02-17T21:45:25Z" + mac: ENC[AES256_GCM,data:WTPjGJeMPdQZaRrjPL2VQqNzKkDsMd0Sk7ChgwiE4QPoEx8RZnHlwUk7QqNeAN84y7JfQFXDefJAJq6A4vSRlpZAAa1Rc/8N4n+gwSmOXVKNsX5e0dBkVYDS89gHdip164+Nr6NwEO5f7FbcI0vxNf7Lz0Zot5OIJPSX+LPe8CE=,iv:BdQgo/ATSuCPkpcrAlXKWFv8qWo8Z946IA5vnP6FrRo=,tag:zkil/5Q3ni3lU4YZ3+Jwkw==,type:str] + pgp: [] + encrypted_regex: ^(data|stringData)$ + version: 3.7.3 diff --git a/kubernetes/apps/flux-system/addons/app/webhooks/kustomization.yaml b/kubernetes/apps/flux-system/addons/app/webhooks/kustomization.yaml new file mode 100644 index 00000000..08c1780f --- /dev/null +++ b/kubernetes/apps/flux-system/addons/app/webhooks/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./github diff --git a/kubernetes/apps/flux-system/addons/ks.yaml b/kubernetes/apps/flux-system/addons/ks.yaml new file mode 100644 index 00000000..f8f0746e --- /dev/null +++ b/kubernetes/apps/flux-system/addons/ks.yaml @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app flux-addons + namespace: flux-system +spec: + targetNamespace: flux-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/flux-system/addons/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/flux-system/alert.yaml b/kubernetes/apps/flux-system/alert.yaml new file mode 100644 index 00000000..0168beb6 --- /dev/null +++ b/kubernetes/apps/flux-system/alert.yaml @@ -0,0 +1,38 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: flux-system +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: flux-system +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: GitRepository + name: "*" + - kind: HelmRelease + name: "*" + - kind: HelmRepository + name: "*" + - kind: Kustomization + name: "*" + - kind: OCIRepository + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/apps/flux-system/kustomization.yaml b/kubernetes/apps/flux-system/kustomization.yaml new file mode 100644 index 00000000..041136f0 --- /dev/null +++ b/kubernetes/apps/flux-system/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./alert.yaml + - ./addons/ks.yaml diff --git a/kubernetes/apps/flux-system/namespace.yaml b/kubernetes/apps/flux-system/namespace.yaml new file mode 100644 index 00000000..b48db452 --- /dev/null +++ b/kubernetes/apps/flux-system/namespace.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: flux-system + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/kube-system/alert.yaml b/kubernetes/apps/kube-system/alert.yaml new file mode 100644 index 00000000..33a50f33 --- /dev/null +++ b/kubernetes/apps/kube-system/alert.yaml @@ -0,0 +1,29 @@ +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: kube-system +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: kube-system +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/apps/kube-system/cilium/app/helm-values.yaml b/kubernetes/apps/kube-system/cilium/app/helm-values.yaml new file mode 100644 index 00000000..944dc39e --- /dev/null +++ b/kubernetes/apps/kube-system/cilium/app/helm-values.yaml @@ -0,0 +1,57 @@ +--- +autoDirectNodeRoutes: true +bpf: + masquerade: false +cgroup: + automount: + enabled: false + hostRoot: /sys/fs/cgroup +cluster: + id: 1 + name: home-kubernetes +cni: + exclusive: false +containerRuntime: + integration: containerd +# NOTE: devices might need to be set if you have more than one active NIC on your hosts +# devices: eno+ eth+ +endpointRoutes: + enabled: true +hubble: + enabled: false +ipam: + mode: kubernetes +ipv4NativeRoutingCIDR: "10.69.0.0/16" +k8sServiceHost: 127.0.0.1 +k8sServicePort: 7445 +kubeProxyReplacement: true +kubeProxyReplacementHealthzBindAddr: 0.0.0.0:10256 +l2announcements: + enabled: true +loadBalancer: + algorithm: maglev + mode: snat +localRedirectPolicy: true +operator: + replicas: 1 + rollOutPods: true +rollOutCiliumPods: true +routingMode: native +securityContext: + capabilities: + ciliumAgent: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + cleanCiliumState: + - NET_ADMIN + - SYS_ADMIN + - SYS_RESOURCE diff --git a/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml b/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml new file mode 100644 index 00000000..b1e0bcab --- /dev/null +++ b/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml @@ -0,0 +1,81 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: cilium +spec: + interval: 30m + chart: + spec: + chart: cilium + version: 1.15.5 + sourceRef: + kind: HelmRepository + name: cilium + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + valuesFrom: + - kind: ConfigMap + name: cilium-helm-values + values: + hubble: + enabled: true + metrics: + enabled: + - dns:query + - drop + - tcp + - flow + - port-distribution + - icmp + - http + serviceMonitor: + enabled: true + dashboards: + enabled: true + annotations: + grafana_folder: Cilium + relay: + enabled: true + rollOutPods: true + prometheus: + serviceMonitor: + enabled: true + ui: + enabled: true + rollOutPods: true + ingress: + enabled: true + className: internal + annotations: + gethomepage.dev/enabled: "true" + gethomepage.dev/icon: cilium.png + gethomepage.dev/name: Hubble + gethomepage.dev/group: Observability + gethomepage.dev/description: Network Monitoring Dashboard + hosts: ["hubble.${SECRET_DOMAIN}"] + operator: + prometheus: + enabled: true + serviceMonitor: + enabled: true + dashboards: + enabled: true + annotations: + grafana_folder: Cilium + prometheus: + enabled: true + serviceMonitor: + enabled: true + trustCRDsExist: true + dashboards: + enabled: true + annotations: + grafana_folder: Cilium diff --git a/kubernetes/apps/kube-system/cilium/app/kustomization.yaml b/kubernetes/apps/kube-system/cilium/app/kustomization.yaml new file mode 100644 index 00000000..25781ef1 --- /dev/null +++ b/kubernetes/apps/kube-system/cilium/app/kustomization.yaml @@ -0,0 +1,12 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml +configMapGenerator: + - name: cilium-helm-values + files: + - values.yaml=./helm-values.yaml +configurations: + - kustomizeconfig.yaml diff --git a/kubernetes/apps/kube-system/cilium/app/kustomizeconfig.yaml b/kubernetes/apps/kube-system/cilium/app/kustomizeconfig.yaml new file mode 100644 index 00000000..58f92ba1 --- /dev/null +++ b/kubernetes/apps/kube-system/cilium/app/kustomizeconfig.yaml @@ -0,0 +1,7 @@ +--- +nameReference: + - kind: ConfigMap + version: v1 + fieldSpecs: + - path: spec/valuesFrom/name + kind: HelmRelease diff --git a/kubernetes/apps/kube-system/cilium/config/cilium-l2.yaml b/kubernetes/apps/kube-system/cilium/config/cilium-l2.yaml new file mode 100644 index 00000000..3cdeaa7d --- /dev/null +++ b/kubernetes/apps/kube-system/cilium/config/cilium-l2.yaml @@ -0,0 +1,24 @@ +--- +# https://docs.cilium.io/en/latest/network/l2-announcements +apiVersion: cilium.io/v2alpha1 +kind: CiliumL2AnnouncementPolicy +metadata: + name: l2-policy +spec: + loadBalancerIPs: true + # NOTE: interfaces might need to be set if you have more than one active NIC on your hosts + # interfaces: + # - ^eno[0-9]+ + # - ^eth[0-9]+ + nodeSelector: + matchLabels: + kubernetes.io/os: linux +--- +apiVersion: cilium.io/v2alpha1 +kind: CiliumLoadBalancerIPPool +metadata: + name: l2-pool +spec: + allowFirstLastIPs: "Yes" + blocks: + - cidr: "192.168.20.0/24" diff --git a/kubernetes/apps/kube-system/cilium/config/kustomization.yaml b/kubernetes/apps/kube-system/cilium/config/kustomization.yaml new file mode 100644 index 00000000..b0ecf0d1 --- /dev/null +++ b/kubernetes/apps/kube-system/cilium/config/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./cilium-l2.yaml diff --git a/kubernetes/apps/kube-system/cilium/ks.yaml b/kubernetes/apps/kube-system/cilium/ks.yaml new file mode 100644 index 00000000..36194e8b --- /dev/null +++ b/kubernetes/apps/kube-system/cilium/ks.yaml @@ -0,0 +1,44 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cilium + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/cilium/app + prune: false # never should be deleted + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cilium-config + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cilium + path: ./kubernetes/apps/kube-system/cilium/config + prune: false # never should be deleted + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/kube-system/coredns/app/helm-values.yaml b/kubernetes/apps/kube-system/coredns/app/helm-values.yaml new file mode 100644 index 00000000..22da0298 --- /dev/null +++ b/kubernetes/apps/kube-system/coredns/app/helm-values.yaml @@ -0,0 +1,50 @@ +--- +fullnameOverride: coredns +k8sAppLabelOverride: kube-dns +serviceAccount: + create: true +service: + name: kube-dns + clusterIP: "10.96.0.10" +servers: + - zones: + - zone: . + scheme: dns:// + use_tcp: true + port: 53 + plugins: + - name: errors + - name: health + configBlock: |- + lameduck 5s + - name: ready + - name: log + configBlock: |- + class error + - name: prometheus + parameters: 0.0.0.0:9153 + - name: kubernetes + parameters: cluster.local in-addr.arpa ip6.arpa + configBlock: |- + pods insecure + fallthrough in-addr.arpa ip6.arpa + - name: forward + parameters: . /etc/resolv.conf + - name: cache + parameters: 30 + - name: loop + - name: reload + - name: loadbalance +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists +tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule diff --git a/kubernetes/apps/kube-system/coredns/app/helmrelease.yaml b/kubernetes/apps/kube-system/coredns/app/helmrelease.yaml new file mode 100644 index 00000000..85fd31e3 --- /dev/null +++ b/kubernetes/apps/kube-system/coredns/app/helmrelease.yaml @@ -0,0 +1,27 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: coredns +spec: + interval: 30m + chart: + spec: + chart: coredns + version: 1.29.0 + sourceRef: + kind: HelmRepository + name: coredns + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + valuesFrom: + - kind: ConfigMap + name: coredns-helm-values diff --git a/kubernetes/apps/kube-system/coredns/app/kustomization.yaml b/kubernetes/apps/kube-system/coredns/app/kustomization.yaml new file mode 100644 index 00000000..39444bbd --- /dev/null +++ b/kubernetes/apps/kube-system/coredns/app/kustomization.yaml @@ -0,0 +1,12 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml +configMapGenerator: + - name: coredns-helm-values + files: + - values.yaml=./helm-values.yaml +configurations: + - kustomizeconfig.yaml diff --git a/kubernetes/apps/kube-system/coredns/app/kustomizeconfig.yaml b/kubernetes/apps/kube-system/coredns/app/kustomizeconfig.yaml new file mode 100644 index 00000000..58f92ba1 --- /dev/null +++ b/kubernetes/apps/kube-system/coredns/app/kustomizeconfig.yaml @@ -0,0 +1,7 @@ +--- +nameReference: + - kind: ConfigMap + version: v1 + fieldSpecs: + - path: spec/valuesFrom/name + kind: HelmRelease diff --git a/kubernetes/apps/kube-system/coredns/ks.yaml b/kubernetes/apps/kube-system/coredns/ks.yaml new file mode 100644 index 00000000..766a6c07 --- /dev/null +++ b/kubernetes/apps/kube-system/coredns/ks.yaml @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app coredns + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/coredns/app + prune: false # never should be deleted + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/kube-system/intel-device-plugin/app/helmrelease.yaml b/kubernetes/apps/kube-system/intel-device-plugin/app/helmrelease.yaml new file mode 100644 index 00000000..359bca29 --- /dev/null +++ b/kubernetes/apps/kube-system/intel-device-plugin/app/helmrelease.yaml @@ -0,0 +1,28 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: intel-device-plugin-operator +spec: + interval: 30m + chart: + spec: + chart: intel-device-plugins-operator + version: 0.30.0 + sourceRef: + kind: HelmRepository + name: intel + namespace: flux-system + install: + crds: CreateReplace + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + crds: CreateReplace + remediation: + retries: 3 + dependsOn: + - name: node-feature-discovery + namespace: kube-system diff --git a/kubernetes/apps/kube-system/intel-device-plugin/app/kustomization.yaml b/kubernetes/apps/kube-system/intel-device-plugin/app/kustomization.yaml new file mode 100644 index 00000000..17cbc72b --- /dev/null +++ b/kubernetes/apps/kube-system/intel-device-plugin/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/kube-system/intel-device-plugin/gpu/helmrelease.yaml b/kubernetes/apps/kube-system/intel-device-plugin/gpu/helmrelease.yaml new file mode 100644 index 00000000..66c53af3 --- /dev/null +++ b/kubernetes/apps/kube-system/intel-device-plugin/gpu/helmrelease.yaml @@ -0,0 +1,30 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: intel-device-plugin-gpu +spec: + interval: 30m + chart: + spec: + chart: intel-device-plugins-gpu + version: 0.30.0 + sourceRef: + kind: HelmRepository + name: intel + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + dependsOn: + - name: intel-device-plugin-operator + namespace: kube-system + values: + name: intel-gpu-plugin + sharedDevNum: 3 + nodeFeatureRule: false diff --git a/kubernetes/apps/kube-system/intel-device-plugin/gpu/kustomization.yaml b/kubernetes/apps/kube-system/intel-device-plugin/gpu/kustomization.yaml new file mode 100644 index 00000000..17cbc72b --- /dev/null +++ b/kubernetes/apps/kube-system/intel-device-plugin/gpu/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/kube-system/intel-device-plugin/ks.yaml b/kubernetes/apps/kube-system/intel-device-plugin/ks.yaml new file mode 100644 index 00000000..4371f0b5 --- /dev/null +++ b/kubernetes/apps/kube-system/intel-device-plugin/ks.yaml @@ -0,0 +1,42 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app intel-device-plugin + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/intel-device-plugin/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app intel-device-plugin-gpu + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/intel-device-plugin/gpu + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/kube-system/kubelet-csr-approver/app/helm-values.yaml b/kubernetes/apps/kube-system/kubelet-csr-approver/app/helm-values.yaml new file mode 100644 index 00000000..00ad772e --- /dev/null +++ b/kubernetes/apps/kube-system/kubelet-csr-approver/app/helm-values.yaml @@ -0,0 +1,3 @@ +--- +providerRegex: ^k8s-control-\d$ +bypassDnsResolution: true diff --git a/kubernetes/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml b/kubernetes/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml new file mode 100644 index 00000000..2947713b --- /dev/null +++ b/kubernetes/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml @@ -0,0 +1,31 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: kubelet-csr-approver +spec: + interval: 30m + chart: + spec: + chart: kubelet-csr-approver + version: 1.2.1 + sourceRef: + kind: HelmRepository + name: postfinance + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + valuesFrom: + - kind: ConfigMap + name: kubelet-csr-approver-helm-values + values: + metrics: + enable: true + serviceMonitor: + enabled: true diff --git a/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml b/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml new file mode 100644 index 00000000..16074ce8 --- /dev/null +++ b/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml @@ -0,0 +1,12 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml +configMapGenerator: + - name: kubelet-csr-approver-helm-values + files: + - values.yaml=./helm-values.yaml +configurations: + - kustomizeconfig.yaml diff --git a/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomizeconfig.yaml b/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomizeconfig.yaml new file mode 100644 index 00000000..58f92ba1 --- /dev/null +++ b/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomizeconfig.yaml @@ -0,0 +1,7 @@ +--- +nameReference: + - kind: ConfigMap + version: v1 + fieldSpecs: + - path: spec/valuesFrom/name + kind: HelmRelease diff --git a/kubernetes/apps/kube-system/kubelet-csr-approver/ks.yaml b/kubernetes/apps/kube-system/kubelet-csr-approver/ks.yaml new file mode 100644 index 00000000..f43156a8 --- /dev/null +++ b/kubernetes/apps/kube-system/kubelet-csr-approver/ks.yaml @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app kubelet-csr-approver + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/kubelet-csr-approver/app + prune: false # never should be deleted + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/kube-system/kustomization.yaml b/kubernetes/apps/kube-system/kustomization.yaml new file mode 100644 index 00000000..6c816831 --- /dev/null +++ b/kubernetes/apps/kube-system/kustomization.yaml @@ -0,0 +1,15 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./alert.yaml + - ./cilium/ks.yaml + - ./coredns/ks.yaml + - ./metrics-server/ks.yaml + - ./reloader/ks.yaml + - ./spegel/ks.yaml + - ./kubelet-csr-approver/ks.yaml + - ./node-feature-discovery/ks.yaml + - ./intel-device-plugin/ks.yaml diff --git a/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml b/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml new file mode 100644 index 00000000..60b8fdf9 --- /dev/null +++ b/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml @@ -0,0 +1,32 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: metrics-server +spec: + interval: 30m + chart: + spec: + chart: metrics-server + version: 3.12.1 + sourceRef: + kind: HelmRepository + name: metrics-server + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + args: + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + metrics: + enabled: true + serviceMonitor: + enabled: true diff --git a/kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml b/kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml new file mode 100644 index 00000000..17cbc72b --- /dev/null +++ b/kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/kube-system/metrics-server/ks.yaml b/kubernetes/apps/kube-system/metrics-server/ks.yaml new file mode 100644 index 00000000..6a21d99c --- /dev/null +++ b/kubernetes/apps/kube-system/metrics-server/ks.yaml @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app metrics-server + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/metrics-server/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/kube-system/namespace.yaml b/kubernetes/apps/kube-system/namespace.yaml new file mode 100644 index 00000000..5eeb2c91 --- /dev/null +++ b/kubernetes/apps/kube-system/namespace.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: kube-system + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/kube-system/node-feature-discovery/app/helmrelease.yaml b/kubernetes/apps/kube-system/node-feature-discovery/app/helmrelease.yaml new file mode 100644 index 00000000..ff177a02 --- /dev/null +++ b/kubernetes/apps/kube-system/node-feature-discovery/app/helmrelease.yaml @@ -0,0 +1,32 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: node-feature-discovery +spec: + interval: 30m + chart: + spec: + chart: node-feature-discovery + version: 0.15.4 + sourceRef: + kind: HelmRepository + name: node-feature-discovery + namespace: flux-system + install: + crds: CreateReplace + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + crds: CreateReplace + remediation: + retries: 3 + values: + worker: + config: + core: + sources: ["custom", "pci"] + prometheus: + enable: true diff --git a/kubernetes/apps/kube-system/node-feature-discovery/app/kustomization.yaml b/kubernetes/apps/kube-system/node-feature-discovery/app/kustomization.yaml new file mode 100644 index 00000000..17cbc72b --- /dev/null +++ b/kubernetes/apps/kube-system/node-feature-discovery/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/kube-system/node-feature-discovery/ks.yaml b/kubernetes/apps/kube-system/node-feature-discovery/ks.yaml new file mode 100644 index 00000000..c4e859d1 --- /dev/null +++ b/kubernetes/apps/kube-system/node-feature-discovery/ks.yaml @@ -0,0 +1,44 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app node-feature-discovery + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/node-feature-discovery/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app node-feature-discovery-rules + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: node-feature-discovery + path: ./kubernetes/apps/kube-system/node-feature-discovery/rules + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/kube-system/node-feature-discovery/rules/intel-gpu-device.yaml b/kubernetes/apps/kube-system/node-feature-discovery/rules/intel-gpu-device.yaml new file mode 100644 index 00000000..865b9548 --- /dev/null +++ b/kubernetes/apps/kube-system/node-feature-discovery/rules/intel-gpu-device.yaml @@ -0,0 +1,17 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/nfd.k8s-sigs.io/nodefeaturerule_v1alpha1.json +apiVersion: nfd.k8s-sigs.io/v1alpha1 +kind: NodeFeatureRule +metadata: + name: intel-gpu-device +spec: + rules: + - # Intel UHD Graphics 630 + name: intel.gpu + labels: + intel.feature.node.kubernetes.io/gpu: "true" + matchFeatures: + - feature: pci.device + matchExpressions: + class: { op: In, value: ["0300"] } + vendor: { op: In, value: ["8086"] } diff --git a/kubernetes/apps/kube-system/node-feature-discovery/rules/kustomization.yaml b/kubernetes/apps/kube-system/node-feature-discovery/rules/kustomization.yaml new file mode 100644 index 00000000..2f8dd39c --- /dev/null +++ b/kubernetes/apps/kube-system/node-feature-discovery/rules/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./intel-gpu-device.yaml diff --git a/kubernetes/apps/kube-system/reloader/app/helmrelease.yaml b/kubernetes/apps/kube-system/reloader/app/helmrelease.yaml new file mode 100644 index 00000000..e04152b0 --- /dev/null +++ b/kubernetes/apps/kube-system/reloader/app/helmrelease.yaml @@ -0,0 +1,30 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: reloader +spec: + interval: 30m + chart: + spec: + chart: reloader + version: 1.0.97 + sourceRef: + kind: HelmRepository + name: stakater + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + fullnameOverride: reloader + reloader: + readOnlyRootFileSystem: true + podMonitor: + enabled: true + namespace: "{{ .Release.Namespace }}" diff --git a/kubernetes/apps/kube-system/reloader/app/kustomization.yaml b/kubernetes/apps/kube-system/reloader/app/kustomization.yaml new file mode 100644 index 00000000..17cbc72b --- /dev/null +++ b/kubernetes/apps/kube-system/reloader/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/kube-system/reloader/ks.yaml b/kubernetes/apps/kube-system/reloader/ks.yaml new file mode 100644 index 00000000..0aae5261 --- /dev/null +++ b/kubernetes/apps/kube-system/reloader/ks.yaml @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app reloader + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/reloader/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/kube-system/spegel/app/helm-values.yaml b/kubernetes/apps/kube-system/spegel/app/helm-values.yaml new file mode 100644 index 00000000..a4185ae3 --- /dev/null +++ b/kubernetes/apps/kube-system/spegel/app/helm-values.yaml @@ -0,0 +1,7 @@ +--- +spegel: + containerdSock: /run/containerd/containerd.sock + containerdRegistryConfigPath: /etc/cri/conf.d/hosts +service: + registry: + hostPort: 29999 diff --git a/kubernetes/apps/kube-system/spegel/app/helmrelease.yaml b/kubernetes/apps/kube-system/spegel/app/helmrelease.yaml new file mode 100644 index 00000000..ae67dc7b --- /dev/null +++ b/kubernetes/apps/kube-system/spegel/app/helmrelease.yaml @@ -0,0 +1,30 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: spegel +spec: + interval: 30m + chart: + spec: + chart: spegel + version: v0.0.22 + sourceRef: + kind: HelmRepository + name: spegel + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + valuesFrom: + - kind: ConfigMap + name: spegel-helm-values + values: + serviceMonitor: + enabled: true diff --git a/kubernetes/apps/kube-system/spegel/app/kustomization.yaml b/kubernetes/apps/kube-system/spegel/app/kustomization.yaml new file mode 100644 index 00000000..8c7c0551 --- /dev/null +++ b/kubernetes/apps/kube-system/spegel/app/kustomization.yaml @@ -0,0 +1,12 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml +configMapGenerator: + - name: spegel-helm-values + files: + - values.yaml=./helm-values.yaml +configurations: + - kustomizeconfig.yaml diff --git a/kubernetes/apps/kube-system/spegel/app/kustomizeconfig.yaml b/kubernetes/apps/kube-system/spegel/app/kustomizeconfig.yaml new file mode 100644 index 00000000..58f92ba1 --- /dev/null +++ b/kubernetes/apps/kube-system/spegel/app/kustomizeconfig.yaml @@ -0,0 +1,7 @@ +--- +nameReference: + - kind: ConfigMap + version: v1 + fieldSpecs: + - path: spec/valuesFrom/name + kind: HelmRelease diff --git a/kubernetes/apps/kube-system/spegel/ks.yaml b/kubernetes/apps/kube-system/spegel/ks.yaml new file mode 100644 index 00000000..8f129bd6 --- /dev/null +++ b/kubernetes/apps/kube-system/spegel/ks.yaml @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app spegel + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/spegel/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/network/alert.yaml b/kubernetes/apps/network/alert.yaml new file mode 100644 index 00000000..c5edd9d4 --- /dev/null +++ b/kubernetes/apps/network/alert.yaml @@ -0,0 +1,29 @@ +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: network +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: network +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/apps/network/cloudflared/app/configs/config.yaml b/kubernetes/apps/network/cloudflared/app/configs/config.yaml new file mode 100644 index 00000000..05bcef5c --- /dev/null +++ b/kubernetes/apps/network/cloudflared/app/configs/config.yaml @@ -0,0 +1,10 @@ +--- +originRequest: + originServerName: "external.${SECRET_DOMAIN}" + +ingress: + - hostname: "${SECRET_DOMAIN}" + service: https://ingress-nginx-external-controller.network.svc.cluster.local:443 + - hostname: "*.${SECRET_DOMAIN}" + service: https://ingress-nginx-external-controller.network.svc.cluster.local:443 + - service: http_status:404 diff --git a/kubernetes/apps/network/cloudflared/app/dnsendpoint.yaml b/kubernetes/apps/network/cloudflared/app/dnsendpoint.yaml new file mode 100644 index 00000000..d4252103 --- /dev/null +++ b/kubernetes/apps/network/cloudflared/app/dnsendpoint.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/externaldns.k8s.io/dnsendpoint_v1alpha1.json +apiVersion: externaldns.k8s.io/v1alpha1 +kind: DNSEndpoint +metadata: + name: cloudflared +spec: + endpoints: + - dnsName: "external.${SECRET_DOMAIN}" + recordType: CNAME + targets: ["${SECRET_CLOUDFLARE_TUNNEL_ID}.cfargotunnel.com"] diff --git a/kubernetes/apps/network/cloudflared/app/externalsecret.yaml b/kubernetes/apps/network/cloudflared/app/externalsecret.yaml new file mode 100644 index 00000000..925ea7d1 --- /dev/null +++ b/kubernetes/apps/network/cloudflared/app/externalsecret.yaml @@ -0,0 +1,26 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/clustersecretstore_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: cloudflared-tunnel +spec: + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + refreshInterval: 15m + target: + name: cloudflared-tunnel-secret + template: + engineVersion: v2 + data: + TUNNEL_ID: "{{ .CLUSTER_CLOUDFLARE_TUNNEL_ID }}" + credentials.json: | + { + "AccountTag": "{{ .CLOUDFLARE_ACCOUNT_TAG }}", + "TunnelSecret": "{{ .CLOUDFLARE_TUNNEL_SECRET }}", + "TunnelID": "{{ .CLUSTER_CLOUDFLARE_TUNNEL_ID }}" + } + dataFrom: + - extract: + key: cloudflare diff --git a/kubernetes/apps/network/cloudflared/app/helmrelease.yaml b/kubernetes/apps/network/cloudflared/app/helmrelease.yaml new file mode 100644 index 00000000..bec06fe6 --- /dev/null +++ b/kubernetes/apps/network/cloudflared/app/helmrelease.yaml @@ -0,0 +1,116 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app cloudflared +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: ingress-nginx-external + namespace: network + values: + controllers: + cloudflared: + strategy: RollingUpdate + annotations: + reloader.stakater.com/auto: "true" + containers: + app: + image: + repository: docker.io/cloudflare/cloudflared + tag: 2024.5.0@sha256:5d5f70a59d5e124d4a1a747769e0d27431861877860ca31deaad41b09726ca71 + env: + NO_AUTOUPDATE: true + TUNNEL_CRED_FILE: /etc/cloudflared/creds/credentials.json + TUNNEL_METRICS: 0.0.0.0:8080 + TUNNEL_ORIGIN_ENABLE_HTTP2: true + TUNNEL_TRANSPORT_PROTOCOL: quic + TUNNEL_POST_QUANTUM: true + args: + - tunnel + - --config + - /etc/cloudflared/config/config.yaml + - run + - "$(TUNNEL_ID)" + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /ready + port: &port 8080 + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + limits: + memory: 256Mi + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + seccompProfile: { type: RuntimeDefault } + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/name: *app + service: + app: + controller: cloudflared + ports: + http: + port: *port + serviceMonitor: + app: + serviceName: cloudflared + endpoints: + - port: http + scheme: http + path: /metrics + interval: 1m + scrapeTimeout: 10s + persistence: + config: + type: configMap + name: cloudflared-configmap + globalMounts: + - path: /etc/cloudflared/config/config.yaml + subPath: config.yaml + readOnly: true + creds: + type: secret + name: cloudflared-tunnel-secret + globalMounts: + - path: /etc/cloudflared/creds/credentials.json + subPath: credentials.json + readOnly: true diff --git a/kubernetes/apps/network/cloudflared/app/kustomization.yaml b/kubernetes/apps/network/cloudflared/app/kustomization.yaml new file mode 100644 index 00000000..dae6e9ed --- /dev/null +++ b/kubernetes/apps/network/cloudflared/app/kustomization.yaml @@ -0,0 +1,14 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./dnsendpoint.yaml + - ./externalsecret.yaml + - ./helmrelease.yaml +configMapGenerator: + - name: cloudflared-configmap + files: + - ./configs/config.yaml +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/apps/network/cloudflared/ks.yaml b/kubernetes/apps/network/cloudflared/ks.yaml new file mode 100644 index 00000000..e3d26dc3 --- /dev/null +++ b/kubernetes/apps/network/cloudflared/ks.yaml @@ -0,0 +1,24 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cloudflared + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-dns-cloudflare + - name: external-secrets-stores + path: ./kubernetes/apps/network/cloudflared/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/network/echo-server/app/helmrelease.yaml b/kubernetes/apps/network/echo-server/app/helmrelease.yaml new file mode 100644 index 00000000..bfa70629 --- /dev/null +++ b/kubernetes/apps/network/echo-server/app/helmrelease.yaml @@ -0,0 +1,92 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: echo-server +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + controllers: + echo-server: + strategy: RollingUpdate + containers: + app: + image: + repository: ghcr.io/mendhak/http-https-echo + tag: 33 + env: + HTTP_PORT: &port 8080 + LOG_WITHOUT_NEWLINE: true + LOG_IGNORE_PATH: /healthz + PROMETHEUS_ENABLED: true + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /healthz + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + limits: + memory: 64Mi + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + seccompProfile: { type: RuntimeDefault } + service: + app: + controller: echo-server + ports: + http: + port: *port + serviceMonitor: + app: + serviceName: echo-server + endpoints: + - port: http + scheme: http + path: /metrics + interval: 1m + scrapeTimeout: 10s + ingress: + app: + className: external + annotations: + external-dns.alpha.kubernetes.io/target: "external.${SECRET_DOMAIN}" + hosts: + - host: "{{ .Release.Name }}.${SECRET_DOMAIN}" + paths: + - path: / + service: + identifier: app + port: http diff --git a/kubernetes/apps/network/echo-server/app/kustomization.yaml b/kubernetes/apps/network/echo-server/app/kustomization.yaml new file mode 100644 index 00000000..17cbc72b --- /dev/null +++ b/kubernetes/apps/network/echo-server/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/network/echo-server/ks.yaml b/kubernetes/apps/network/echo-server/ks.yaml new file mode 100644 index 00000000..0cfc7559 --- /dev/null +++ b/kubernetes/apps/network/echo-server/ks.yaml @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app echo-server + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/network/echo-server/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/network/external-dns/app/externalsecret.yaml b/kubernetes/apps/network/external-dns/app/externalsecret.yaml new file mode 100644 index 00000000..7dd4493d --- /dev/null +++ b/kubernetes/apps/network/external-dns/app/externalsecret.yaml @@ -0,0 +1,19 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: external-dns-cloudflare +spec: + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + target: + name: external-dns-cloudflare-secret + template: + engineVersion: v2 + data: + CF_API_TOKEN: "{{ .CF_API_TOKEN }}" + dataFrom: + - extract: + key: cloudflare diff --git a/kubernetes/apps/network/external-dns/app/helmrelease.yaml b/kubernetes/apps/network/external-dns/app/helmrelease.yaml new file mode 100644 index 00000000..65919d35 --- /dev/null +++ b/kubernetes/apps/network/external-dns/app/helmrelease.yaml @@ -0,0 +1,50 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app external-dns +spec: + interval: 30m + chart: + spec: + chart: external-dns + version: 1.14.4 + sourceRef: + kind: HelmRepository + name: external-dns + namespace: flux-system + install: + crds: CreateReplace + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + crds: CreateReplace + remediation: + strategy: rollback + retries: 3 + values: + fullnameOverride: *app + provider: cloudflare + env: + - name: &name CF_API_TOKEN + valueFrom: + secretKeyRef: + name: &secret external-dns-cloudflare-secret + key: *name + extraArgs: + - --ingress-class=external + - --cloudflare-proxied + - --crd-source-apiversion=externaldns.k8s.io/v1alpha1 + - --crd-source-kind=DNSEndpoint + - --dry-run + policy: sync + sources: ["crd", "ingress"] + txtPrefix: k8s. + txtOwnerId: default + domainFilters: ["${SECRET_DOMAIN}"] + serviceMonitor: + enabled: true + podAnnotations: + secret.reloader.stakater.com/reload: *secret diff --git a/kubernetes/apps/network/external-dns/app/kustomization.yaml b/kubernetes/apps/network/external-dns/app/kustomization.yaml new file mode 100644 index 00000000..4eed917b --- /dev/null +++ b/kubernetes/apps/network/external-dns/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml diff --git a/kubernetes/apps/network/external-dns/ks.yaml b/kubernetes/apps/network/external-dns/ks.yaml new file mode 100644 index 00000000..9c6e788b --- /dev/null +++ b/kubernetes/apps/network/external-dns/ks.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app external-dns-cloudflare + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/apps/network/external-dns/app + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/network/external-services/ks.yaml b/kubernetes/apps/network/external-services/ks.yaml new file mode 100644 index 00000000..fdb0cc58 --- /dev/null +++ b/kubernetes/apps/network/external-services/ks.yaml @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app external-services + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/network/external-services/services + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/network/external-services/services/kustomization.yaml b/kubernetes/apps/network/external-services/services/kustomization.yaml new file mode 100644 index 00000000..2834df48 --- /dev/null +++ b/kubernetes/apps/network/external-services/services/kustomization.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./synology.yaml + - ./sprut.yaml + - ./proxmox.yaml + - ./minio.yaml + - ./pihole.yaml diff --git a/kubernetes/apps/network/external-services/services/minio.yaml b/kubernetes/apps/network/external-services/services/minio.yaml new file mode 100644 index 00000000..0906c64c --- /dev/null +++ b/kubernetes/apps/network/external-services/services/minio.yaml @@ -0,0 +1,34 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: minio-external-service +spec: + type: ExternalName + externalName: "${NAS_URL}" +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: minio-ingress + annotations: + gethomepage.dev/enabled: "true" + gethomepage.dev/icon: "minio.png" + gethomepage.dev/name: Minio + gethomepage.dev/group: Storage + gethomepage.dev/description: S3 compatible object storage +spec: + rules: + - host: &host "minio.${SECRET_DOMAIN}" + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: minio-external-service + port: + number: 9090 + tls: + - hosts: + - *host diff --git a/kubernetes/apps/network/external-services/services/pihole.yaml b/kubernetes/apps/network/external-services/services/pihole.yaml new file mode 100644 index 00000000..ecd27fb0 --- /dev/null +++ b/kubernetes/apps/network/external-services/services/pihole.yaml @@ -0,0 +1,34 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: pihole-external-service +spec: + type: ExternalName + externalName: "${RPI_URL}" +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: pihole-ingress + annotations: + gethomepage.dev/enabled: "true" + gethomepage.dev/icon: pi-hole.png + gethomepage.dev/name: PiHole + gethomepage.dev/group: Network + gethomepage.dev/description: Network-wide Ad Blocking DNS + gethomepage.dev/widget.type: pihole + gethomepage.dev/widget.url: "http://${RPI_URL}" + gethomepage.dev/widget.key: "{{HOMEPAGE_VAR_PI_HOLE_TOKEN}}" +spec: + rules: + - host: "pihole.${SECRET_DOMAIN}" + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: pihole-external-service + port: + number: 80 diff --git a/kubernetes/apps/network/external-services/services/proxmox.yaml b/kubernetes/apps/network/external-services/services/proxmox.yaml new file mode 100644 index 00000000..97c8af34 --- /dev/null +++ b/kubernetes/apps/network/external-services/services/proxmox.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: proxmox-external-service +spec: + type: ExternalName + externalName: 192.168.0.41 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: proxmox-ingress + annotations: + gethomepage.dev/enabled: "true" + gethomepage.dev/icon: proxmox.png + gethomepage.dev/name: Proxmox + gethomepage.dev/group: hardware + gethomepage.dev/description: Virtual Environment + gethomepage.dev/widget.type: proxmox + gethomepage.dev/widget.url: https://192.168.0.41:8006 + gethomepage.dev/widget.username: "{{HOMEPAGE_VAR_PROXMOX_USERNAME}}" + gethomepage.dev/widget.password: "{{HOMEPAGE_VAR_PROXMOX_PASSWORD}}" + gethomepage.dev/node: proxmox1 + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" +spec: + rules: + - host: "proxmox.${SECRET_DOMAIN}" + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: proxmox-external-service + port: + number: 8006 diff --git a/kubernetes/apps/network/external-services/services/sprut.yaml b/kubernetes/apps/network/external-services/services/sprut.yaml new file mode 100644 index 00000000..ae22e707 --- /dev/null +++ b/kubernetes/apps/network/external-services/services/sprut.yaml @@ -0,0 +1,31 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: sprut-external-service +spec: + type: ExternalName + externalName: "${RPI_URL}" +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: spruthub-ingress + annotations: + gethomepage.dev/enabled: "true" + gethomepage.dev/icon: "https://sprut.${SECRET_DOMAIN}/favicon.ico" + gethomepage.dev/name: Spruthub + gethomepage.dev/group: Home + gethomepage.dev/description: Zigbee hub and Homekit integration +spec: + rules: + - host: "sprut.${SECRET_DOMAIN}" + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: sprut-external-service + port: + number: 7777 diff --git a/kubernetes/apps/network/external-services/services/synology.yaml b/kubernetes/apps/network/external-services/services/synology.yaml new file mode 100644 index 00000000..5141e2f1 --- /dev/null +++ b/kubernetes/apps/network/external-services/services/synology.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: synology-external-service +spec: + type: ExternalName + externalName: "${NAS_URL}" +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: synology-ingress + annotations: + gethomepage.dev/enabled: "true" + gethomepage.dev/icon: synology.png + gethomepage.dev/name: Synology + gethomepage.dev/group: Storage + gethomepage.dev/description: Synology nas disk station + gethomepage.dev/widget.type: diskstation + gethomepage.dev/widget.url: "http://${NAS_URL}:5000" + gethomepage.dev/widget.username: "{{HOMEPAGE_VAR_SYNOLOGY_USERNAME}}" + gethomepage.dev/widget.password: "{{HOMEPAGE_VAR_SYNOLOGY_PASSWORD}}" + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" +spec: + rules: + - host: "nas.${SECRET_DOMAIN}" + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: synology-external-service + port: + number: 5001 diff --git a/kubernetes/apps/network/ingress-nginx/certificates/kustomization.yaml b/kubernetes/apps/network/ingress-nginx/certificates/kustomization.yaml new file mode 100644 index 00000000..1323aabb --- /dev/null +++ b/kubernetes/apps/network/ingress-nginx/certificates/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./staging.yaml + - ./production.yaml diff --git a/kubernetes/apps/network/ingress-nginx/certificates/production.yaml b/kubernetes/apps/network/ingress-nginx/certificates/production.yaml new file mode 100644 index 00000000..b5afdf41 --- /dev/null +++ b/kubernetes/apps/network/ingress-nginx/certificates/production.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: "${SECRET_DOMAIN/./-}-production" +spec: + secretName: "${SECRET_DOMAIN/./-}-production-tls" + issuerRef: + name: letsencrypt-production + kind: ClusterIssuer + commonName: "${SECRET_DOMAIN}" + dnsNames: + - "${SECRET_DOMAIN}" + - "*.${SECRET_DOMAIN}" diff --git a/kubernetes/apps/network/ingress-nginx/certificates/staging.yaml b/kubernetes/apps/network/ingress-nginx/certificates/staging.yaml new file mode 100644 index 00000000..9c869425 --- /dev/null +++ b/kubernetes/apps/network/ingress-nginx/certificates/staging.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: "${SECRET_DOMAIN/./-}-staging" +spec: + secretName: "${SECRET_DOMAIN/./-}-staging-tls" + issuerRef: + name: letsencrypt-staging + kind: ClusterIssuer + commonName: "${SECRET_DOMAIN}" + dnsNames: + - "${SECRET_DOMAIN}" + - "*.${SECRET_DOMAIN}" diff --git a/kubernetes/apps/network/ingress-nginx/external/helmrelease.yaml b/kubernetes/apps/network/ingress-nginx/external/helmrelease.yaml new file mode 100644 index 00000000..145a356d --- /dev/null +++ b/kubernetes/apps/network/ingress-nginx/external/helmrelease.yaml @@ -0,0 +1,73 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: ingress-nginx-external +spec: + interval: 30m + chart: + spec: + chart: ingress-nginx + version: 4.10.1 + sourceRef: + kind: HelmRepository + name: ingress-nginx + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + fullnameOverride: ingress-nginx-external + controller: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: "external.${SECRET_DOMAIN}" + io.cilium/lb-ipam-ips: "192.168.20.63" + externalTrafficPolicy: Cluster + ingressClassResource: + name: external + default: false + controllerValue: k8s.io/external + admissionWebhooks: + objectSelector: + matchExpressions: + - key: ingress-class + operator: In + values: ["external"] + config: + client-body-buffer-size: 100M + client-body-timeout: 120 + client-header-timeout: 120 + enable-brotli: "true" + enable-real-ip: "true" + hsts-max-age: 31449600 + keep-alive-requests: 10000 + keep-alive: 120 + log-format-escape-json: "true" + log-format-upstream: > + {"time": "$time_iso8601", "remote_addr": "$proxy_protocol_addr", "x_forwarded_for": "$proxy_add_x_forwarded_for", + "request_id": "$req_id", "remote_user": "$remote_user", "bytes_sent": $bytes_sent, "request_time": $request_time, + "status": $status, "vhost": "$host", "request_proto": "$server_protocol", "path": "$uri", "request_query": "$args", + "request_length": $request_length, "duration": $request_time, "method": "$request_method", "http_referrer": "$http_referer", + "http_user_agent": "$http_user_agent"} + proxy-body-size: 0 + proxy-buffer-size: 16k + ssl-protocols: TLSv1.3 TLSv1.2 + metrics: + enabled: true + serviceMonitor: + enabled: true + namespaceSelector: + any: true + extraArgs: + default-ssl-certificate: "network/${SECRET_DOMAIN/./-}-production-tls" + resources: + requests: + cpu: 100m + limits: + memory: 500Mi diff --git a/kubernetes/apps/network/ingress-nginx/external/kustomization.yaml b/kubernetes/apps/network/ingress-nginx/external/kustomization.yaml new file mode 100644 index 00000000..17cbc72b --- /dev/null +++ b/kubernetes/apps/network/ingress-nginx/external/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/network/ingress-nginx/internal/helmrelease.yaml b/kubernetes/apps/network/ingress-nginx/internal/helmrelease.yaml new file mode 100644 index 00000000..c98c62de --- /dev/null +++ b/kubernetes/apps/network/ingress-nginx/internal/helmrelease.yaml @@ -0,0 +1,73 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: ingress-nginx-internal + namespace: network +spec: + interval: 30m + chart: + spec: + chart: ingress-nginx + version: 4.10.1 + sourceRef: + kind: HelmRepository + name: ingress-nginx + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + fullnameOverride: ingress-nginx-internal + controller: + service: + annotations: + io.cilium/lb-ipam-ips: "192.168.20.61" + externalTrafficPolicy: Cluster + ingressClassResource: + name: internal + default: true + controllerValue: k8s.io/internal + admissionWebhooks: + objectSelector: + matchExpressions: + - key: ingress-class + operator: In + values: ["internal"] + config: + client-body-buffer-size: 100M + client-body-timeout: 120 + client-header-timeout: 120 + enable-brotli: "true" + enable-real-ip: "true" + hsts-max-age: 31449600 + keep-alive-requests: 10000 + keep-alive: 120 + log-format-escape-json: "true" + log-format-upstream: > + {"time": "$time_iso8601", "remote_addr": "$proxy_protocol_addr", "x_forwarded_for": "$proxy_add_x_forwarded_for", + "request_id": "$req_id", "remote_user": "$remote_user", "bytes_sent": $bytes_sent, "request_time": $request_time, + "status": $status, "vhost": "$host", "request_proto": "$server_protocol", "path": "$uri", "request_query": "$args", + "request_length": $request_length, "duration": $request_time, "method": "$request_method", "http_referrer": "$http_referer", + "http_user_agent": "$http_user_agent"} + proxy-body-size: 0 + proxy-buffer-size: 16k + ssl-protocols: TLSv1.3 TLSv1.2 + metrics: + enabled: true + serviceMonitor: + enabled: true + namespaceSelector: + any: true + extraArgs: + default-ssl-certificate: "network/${SECRET_DOMAIN/./-}-production-tls" + resources: + requests: + cpu: 100m + limits: + memory: 500Mi diff --git a/kubernetes/apps/network/ingress-nginx/internal/kustomization.yaml b/kubernetes/apps/network/ingress-nginx/internal/kustomization.yaml new file mode 100644 index 00000000..17cbc72b --- /dev/null +++ b/kubernetes/apps/network/ingress-nginx/internal/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/network/ingress-nginx/ks.yaml b/kubernetes/apps/network/ingress-nginx/ks.yaml new file mode 100644 index 00000000..4121eab5 --- /dev/null +++ b/kubernetes/apps/network/ingress-nginx/ks.yaml @@ -0,0 +1,69 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app ingress-nginx-certificates + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cert-manager-issuers + path: ./kubernetes/apps/network/ingress-nginx/certificates + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app ingress-nginx-internal + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: ingress-nginx-certificates + path: ./kubernetes/apps/network/ingress-nginx/internal + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app ingress-nginx-external + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: ingress-nginx-certificates + path: ./kubernetes/apps/network/ingress-nginx/external + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/network/k8s-gateway/app/helmrelease.yaml b/kubernetes/apps/network/k8s-gateway/app/helmrelease.yaml new file mode 100644 index 00000000..95f41bf7 --- /dev/null +++ b/kubernetes/apps/network/k8s-gateway/app/helmrelease.yaml @@ -0,0 +1,34 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: k8s-gateway +spec: + interval: 30m + chart: + spec: + chart: k8s-gateway + version: 2.4.0 + sourceRef: + kind: HelmRepository + name: k8s-gateway + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + fullnameOverride: k8s-gateway + domain: "${SECRET_DOMAIN}" + ttl: 1 + service: + type: LoadBalancer + port: 53 + annotations: + io.cilium/lb-ipam-ips: "192.168.20.62" + externalTrafficPolicy: Cluster + watchedResources: ["Ingress", "Service"] diff --git a/kubernetes/apps/network/k8s-gateway/app/kustomization.yaml b/kubernetes/apps/network/k8s-gateway/app/kustomization.yaml new file mode 100644 index 00000000..17cbc72b --- /dev/null +++ b/kubernetes/apps/network/k8s-gateway/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/network/k8s-gateway/ks.yaml b/kubernetes/apps/network/k8s-gateway/ks.yaml new file mode 100644 index 00000000..6709e768 --- /dev/null +++ b/kubernetes/apps/network/k8s-gateway/ks.yaml @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app k8s-gateway + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/network/k8s-gateway/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/network/kustomization.yaml b/kubernetes/apps/network/kustomization.yaml new file mode 100644 index 00000000..d527fd1a --- /dev/null +++ b/kubernetes/apps/network/kustomization.yaml @@ -0,0 +1,13 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./alert.yaml + - ./cloudflared/ks.yaml + - ./echo-server/ks.yaml + - ./external-dns/ks.yaml + - ./ingress-nginx/ks.yaml + - ./k8s-gateway/ks.yaml + - ./external-services/ks.yaml diff --git a/kubernetes/apps/network/namespace.yaml b/kubernetes/apps/network/namespace.yaml new file mode 100644 index 00000000..4d78d7b1 --- /dev/null +++ b/kubernetes/apps/network/namespace.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: network + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/observability/alert.yaml b/kubernetes/apps/observability/alert.yaml new file mode 100644 index 00000000..df840dff --- /dev/null +++ b/kubernetes/apps/observability/alert.yaml @@ -0,0 +1,29 @@ +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: observability +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: observability +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/apps/observability/gatus/app/externalsecret.yaml b/kubernetes/apps/observability/gatus/app/externalsecret.yaml new file mode 100644 index 00000000..f9eca61d --- /dev/null +++ b/kubernetes/apps/observability/gatus/app/externalsecret.yaml @@ -0,0 +1,29 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: gatus +spec: + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + target: + name: gatus-secret + template: + engineVersion: v2 + data: + INIT_POSTGRES_DBNAME: gatus + INIT_POSTGRES_HOST: postgres-cluster-rw.database.svc.cluster.local + INIT_POSTGRES_USER: "{{ .GATUS_POSTGRES_USER }}" + INIT_POSTGRES_PASS: "{{ .GATUS_POSTGRES_PASS }}" + INIT_POSTGRES_SUPER_USER: "{{ .POSTGRES_SUPER_USER }}" + INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}" + DISCORD_WEBHOOK: "{{ .GATUS_DISCORD_WEBHOOK }}" + dataFrom: + - extract: + key: cloudnative-pg + - extract: + key: gatus + - extract: + key: discord diff --git a/kubernetes/apps/observability/gatus/app/helmrelease.yaml b/kubernetes/apps/observability/gatus/app/helmrelease.yaml new file mode 100644 index 00000000..c11f00cc --- /dev/null +++ b/kubernetes/apps/observability/gatus/app/helmrelease.yaml @@ -0,0 +1,143 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: gatus +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + controllers: + gatus: + annotations: + reloader.stakater.com/auto: "true" + initContainers: + init-db: + image: + repository: ghcr.io/onedr0p/postgres-init + tag: 16 + envFrom: &envFrom + - secretRef: + name: gatus-secret + init-config: + dependsOn: init-db + image: + repository: ghcr.io/kiwigrid/k8s-sidecar + tag: 1.27.1@sha256:df71eab1466c67b84e46fa9cd2d84738372377971d44dbb8699ab4483278c839 + env: + FOLDER: /config + LABEL: gatus.io/enabled + NAMESPACE: ALL + RESOURCE: both + UNIQUE_FILENAMES: true + METHOD: WATCH + restartPolicy: Always + resources: &resources + requests: + cpu: 10m + limits: + memory: 256Mi + containers: + app: + image: + repository: ghcr.io/twin/gatus + tag: v5.10.0@sha256:658a9cb993ff0b16832947dab8de885b2e2a66037330b839310fa3f39d5c00f4 + env: + TZ: "${TIMEZONE}" + GATUS_CONFIG_PATH: /config + GATUS_DELAY_START_SECONDS: 5 + SECRET_DOMAIN: "${SECRET_DOMAIN}" + CUSTOM_WEB_PORT: &port 80 + envFrom: *envFrom + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /health + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: [ "ALL" ] } + resources: *resources + defaultPodOptions: + dnsConfig: + options: + - { name: ndots, value: "1" } + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + fsGroup: 65534 + fsGroupChangePolicy: OnRootMismatch + seccompProfile: { type: RuntimeDefault } + service: + app: + controller: gatus + ports: + http: + port: *port + serviceMonitor: + app: + serviceName: gatus + endpoints: + - port: http + scheme: http + path: /metrics + interval: 1m + scrapeTimeout: 10s + ingress: + app: + className: external + annotations: + gethomepage.dev/enabled: "true" + gethomepage.dev/group: Observability + gethomepage.dev/name: Gatus + gethomepage.dev/icon: gatus.png + gethomepage.dev/description: Status page + gethomepage.dev/widget.type: gatus + gethomepage.dev/widget.url: http://gatus.observability.svc.cluster.local + external-dns.alpha.kubernetes.io/target: "external.${SECRET_DOMAIN}" + hosts: + - host: "status.${SECRET_DOMAIN}" + paths: + - path: / + service: + identifier: app + port: http + serviceAccount: + create: true + name: gatus + persistence: + config: + type: emptyDir + config-file: + type: configMap + name: gatus-configmap + globalMounts: + - path: /config/config.yaml + subPath: config.yaml + readOnly: true diff --git a/kubernetes/apps/observability/gatus/app/kustomization.yaml b/kubernetes/apps/observability/gatus/app/kustomization.yaml new file mode 100644 index 00000000..30bf43b9 --- /dev/null +++ b/kubernetes/apps/observability/gatus/app/kustomization.yaml @@ -0,0 +1,14 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./rbac.yaml + - ./helmrelease.yaml +configMapGenerator: + - name: gatus-configmap + files: + - config.yaml=./resources/config.yaml +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/apps/observability/gatus/app/rbac.yaml b/kubernetes/apps/observability/gatus/app/rbac.yaml new file mode 100644 index 00000000..0f12c439 --- /dev/null +++ b/kubernetes/apps/observability/gatus/app/rbac.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: gatus +rules: + - apiGroups: [""] + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: gatus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: gatus +subjects: + - kind: ServiceAccount + name: gatus + namespace: observability diff --git a/kubernetes/apps/observability/gatus/app/resources/config.yaml b/kubernetes/apps/observability/gatus/app/resources/config.yaml new file mode 100644 index 00000000..7e950b0a --- /dev/null +++ b/kubernetes/apps/observability/gatus/app/resources/config.yaml @@ -0,0 +1,46 @@ +--- +# Note: Gatus vars should be escaped with $${VAR_NAME} to avoid interpolation by Flux +web: + port: $${CUSTOM_WEB_PORT} +storage: + type: postgres + path: postgres://$${INIT_POSTGRES_USER}:$${INIT_POSTGRES_PASS}@$${INIT_POSTGRES_HOST}:5432/$${INIT_POSTGRES_DBNAME}?sslmode=disable + caching: true +metrics: true +debug: false +ui: + title: Status | Gatus + header: Status +alerting: + discord: + webhook-url: $${DISCORD_WEBHOOK} + default-alert: + description: health-check failed + send-on-resolved: true + failure-threshold: 5 + success-threshold: 2 +connectivity: + checker: + target: 1.1.1.1:53 + interval: 1m +endpoints: + - name: status + group: external + url: https://status.$${SECRET_DOMAIN} + interval: 1m + client: + dns-resolver: tcp://1.1.1.1:53 + conditions: + - "[STATUS] == 200" + alerts: + - type: discord + - name: flux-webhook + group: external + url: https://flux-webhook.$${SECRET_DOMAIN} + interval: 1m + client: + dns-resolver: tcp://1.1.1.1:53 + conditions: + - "[STATUS] == 404" + alerts: + - type: discord diff --git a/kubernetes/apps/observability/gatus/ks.yaml b/kubernetes/apps/observability/gatus/ks.yaml new file mode 100644 index 00000000..fc200af3 --- /dev/null +++ b/kubernetes/apps/observability/gatus/ks.yaml @@ -0,0 +1,24 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app gatus + namespace: flux-system +spec: + targetNamespace: observability + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + - name: cloudnative-pg + path: ./kubernetes/apps/observability/gatus/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/observability/grafana/app/externalsecret.yaml b/kubernetes/apps/observability/grafana/app/externalsecret.yaml new file mode 100644 index 00000000..222548e1 --- /dev/null +++ b/kubernetes/apps/observability/grafana/app/externalsecret.yaml @@ -0,0 +1,34 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: grafana +spec: + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + target: + name: grafana-secret + template: + engineVersion: v2 + data: + GF_DATABASE_NAME: &dbName grafana + GF_DATABASE_HOST: postgres-cluster-rw.database.svc.cluster.local + GF_DATABASE_USER: &dbUser "{{ .GRAFANA_POSTGRES_USER }}" + GF_DATABASE_PASSWORD: &dbPass "{{ .GRAFANA_POSTGRES_PASS }}" + GF_DATABASE_SSL_MODE: disable + GF_DATABASE_TYPE: postgres + INIT_POSTGRES_DBNAME: *dbName + INIT_POSTGRES_HOST: postgres-cluster-rw.database.svc.cluster.local + INIT_POSTGRES_USER: *dbUser + INIT_POSTGRES_PASS: *dbPass + INIT_POSTGRES_SUPER_USER: "{{ .POSTGRES_SUPER_USER }}" + INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}" + ADMIN_USER_PASS: "{{ .password }}" + ADMIN_USER_NAME: "{{ .username }}" + dataFrom: + - extract: + key: grafana + - extract: + key: cloudnative-pg diff --git a/kubernetes/apps/observability/grafana/app/helmrelease.yaml b/kubernetes/apps/observability/grafana/app/helmrelease.yaml new file mode 100644 index 00000000..4f2dcfc8 --- /dev/null +++ b/kubernetes/apps/observability/grafana/app/helmrelease.yaml @@ -0,0 +1,355 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: grafana +spec: + interval: 30m + chart: + spec: + chart: grafana + version: 7.3.11 + sourceRef: + kind: HelmRepository + name: grafana + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: kube-prometheus-stack + namespace: observability + - name: loki + namespace: observability + values: + extraInitContainers: + - name: 01-init-db + image: ghcr.io/onedr0p/postgres-init:16 + envFrom: + - secretRef: + name: &secret grafana-secret + replicas: 3 + env: + GF_DATE_FORMATS_USE_BROWSER_LOCALE: true + GF_EXPLORE_ENABLED: true + GF_FEATURE_TOGGLES_ENABLE: publicDashboards + GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS: natel-discrete-panel,pr0ps-trackmap-panel,panodata-map-panel + GF_SECURITY_ANGULAR_SUPPORT_ENABLED: true + GF_SECURITY_COOKIE_SAMESITE: grafana + GF_SERVER_ROOT_URL: "https://grafana.${SECRET_DOMAIN}" + envFromSecrets: + - name: *secret + grafana.ini: + analytics: + check_for_updates: false + check_for_plugin_updates: false + reporting_enabled: false + auth.basic: + enabled: true + auth.anonymous: + enabled: true + org_role: Viewer + news: + news_feed_enabled: false + admin: + existingSecret: grafana-secret + passwordKey: ADMIN_USER_PASS + userKey: ADMIN_USER_NAME + dashboardProviders: + dashboardproviders.yaml: + apiVersion: 1 + providers: + - name: default + orgId: 1 + folder: "" + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards/default-folder + - name: flux + orgId: 1 + folder: Flux + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards/flux-folder + - name: kubernetes + orgId: 1 + folder: Kubernetes + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards/kubernetes-folder + - name: nginx + orgId: 1 + folder: Nginx + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards/nginx-folder + - name: prometheus + orgId: 1 + folder: Prometheus + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards/prometheus-folder + - name: thanos + orgId: 1 + folder: Thanos + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards/thanos-folder + - name: unifi + orgId: 1 + folder: Unifi + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards/unifi-folder + datasources: + datasources.yaml: + apiVersion: 1 + deleteDatasources: + - { name: Alertmanager, orgId: 1 } + - { name: Loki, orgId: 1 } + - { name: Prometheus, orgId: 1 } + datasources: + - name: Prometheus + type: prometheus + uid: prometheus + access: proxy + url: http://thanos-query-frontend.observability.svc.cluster.local:10902 + jsonData: + prometheusType: Thanos + timeInterval: 1m + - name: Loki + type: loki + uid: loki + access: proxy + url: http://loki-gateway.observability.svc.cluster.local + jsonData: + maxLines: 250 + - name: Alertmanager + type: alertmanager + uid: alertmanager + access: proxy + url: http://alertmanager-operated.observability.svc.cluster.local:9093 + jsonData: + implementation: prometheus + dashboards: + default: + cloudflared: + # renovate: depName="Cloudflare Tunnels (cloudflared)" + gnetId: 17457 + revision: 6 + datasource: + - { name: DS_PROMETHEUS, value: Prometheus } + external-dns: + # renovate: depName="External-dns" + gnetId: 15038 + revision: 3 + datasource: Prometheus + minio: + # renovate: depName="MinIO Dashboard" + gnetId: 13502 + revision: 26 + datasource: + - { name: DS_PROMETHEUS, value: Prometheus } + node-exporter-full: + # renovate: depName="Node Exporter Full" + gnetId: 1860 + revision: 36 + datasource: Prometheus + spegel: + # renovate: depName="Spegel" + gnetId: 18089 + revision: 1 + datasource: + - { name: DS_PROMETHEUS, value: Prometheus } + unpackerr: + # renovate: depName="Unpackerr" + gnetId: 18817 + revision: 1 + datasource: + - { name: DS_PROMETHEUS, value: Prometheus } + zfs: + # renovate: depName="ZFS" + gnetId: 7845 + revision: 4 + datasource: Prometheus + cert-manager: + url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/cert-manager/dashboards/cert-manager.json + datasource: Prometheus + dragonfly: + url: https://raw.githubusercontent.com/dragonflydb/dragonfly/main/tools/local/monitoring/grafana/provisioning/dashboards/dashboard.json + datasource: Prometheus + node-feature-discovery: + url: https://raw.githubusercontent.com/kubernetes-sigs/node-feature-discovery/master/examples/grafana-dashboard.json + datasource: Prometheus + flux: + flux-cluster: + url: https://raw.githubusercontent.com/fluxcd/flux2-monitoring-example/main/monitoring/configs/dashboards/cluster.json + datasource: Prometheus + flux-control-plane: + url: https://raw.githubusercontent.com/fluxcd/flux2-monitoring-example/main/monitoring/configs/dashboards/control-plane.json + datasource: Prometheus + kubernetes: + kubernetes-api-server: + # renovate: depName="Kubernetes / System / API Server" + gnetId: 15761 + revision: 16 + datasource: Prometheus + kubernetes-coredns: + # renovate: depName="Kubernetes / System / CoreDNS" + gnetId: 15762 + revision: 17 + datasource: Prometheus + kubernetes-global: + # renovate: depName="Kubernetes / Views / Global" + gnetId: 15757 + revision: 37 + datasource: Prometheus + kubernetes-namespaces: + # renovate: depName="Kubernetes / Views / Namespaces" + gnetId: 15758 + revision: 34 + datasource: Prometheus + kubernetes-nodes: + # renovate: depName="Kubernetes / Views / Nodes" + gnetId: 15759 + revision: 29 + datasource: Prometheus + kubernetes-pods: + # renovate: depName="Kubernetes / Views / Pods" + gNetId: 15760 + revision: 21 + datasource: Prometheus + kubernetes-volumes: + # renovate: depName="K8s / Storage / Volumes / Cluster" + gnetId: 11454 + revision: 14 + datasource: Prometheus + nginx: + nginx: + url: https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/grafana/dashboards/nginx.json + datasource: Prometheus + nginx-request-handling-performance: + url: https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/grafana/dashboards/request-handling-performance.json + datasource: Prometheus + prometheus: + prometheus: + # renovate: depName="Prometheus" + gnetId: 19105 + revision: 3 + datasource: Prometheus + thanos: + thanos-bucket-replicate: + url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/thanos/dashboards/bucket-replicate.json + datasource: Prometheus + thanos-compact: + url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/thanos/dashboards/compact.json + datasource: Prometheus + thanos-overview: + url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/thanos/dashboards/overview.json + datasource: Prometheus + thanos-query: + url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/thanos/dashboards/query.json + datasource: Prometheus + thanos-query-frontend: + url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/thanos/dashboards/query-frontend.json + datasource: Prometheus + thanos-receieve: + url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/thanos/dashboards/receive.json + datasource: Prometheus + thanos-rule: + url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/thanos/dashboards/rule.json + datasource: Prometheus + thanos-sidecar: + url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/thanos/dashboards/sidecar.json + datasource: Prometheus + thanos-store: + url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/thanos/dashboards/store.json + datasource: Prometheus + unifi: + unifi-insights: + # renovate: depName="UniFi-Poller: Client Insights - Prometheus" + gnetId: 11315 + revision: 9 + datasource: Prometheus + unifi-network-sites: + # renovate: depName="UniFi-Poller: Network Sites - Prometheus" + gnetId: 11311 + revision: 5 + datasource: Prometheus + unifi-uap: + # renovate: depName="UniFi-Poller: UAP Insights - Prometheus" + gnetId: 11314 + revision: 10 + datasource: Prometheus + unifi-usw: + # renovate: depName="UniFi-Poller: USW Insights - Prometheus" + gnetId: 11312 + revision: 9 + datasource: Prometheus + sidecar: + dashboards: + enabled: true + searchNamespace: ALL + label: grafana_dashboard + folderAnnotation: grafana_folder + provider: + disableDelete: true + foldersFromFilesStructure: true + datasources: + enabled: true + searchNamespace: ALL + labelValue: "" + plugins: + - grafana-clock-panel + - grafana-piechart-panel + - grafana-worldmap-panel + - natel-discrete-panel + - pr0ps-trackmap-panel + - vonage-status-panel + serviceMonitor: + enabled: true + ingress: + enabled: true + ingressClassName: internal + annotations: + gethomepage.dev/enabled: "true" + gethomepage.dev/icon: grafana.png + gethomepage.dev/name: Grafana + gethomepage.dev/group: Observability + gethomepage.dev/description: Visual analytics & monitoring platform + gethomepage.dev/widget.type: grafana + gethomepage.dev/widget.url: http://grafana.observability.svc.cluster.local + gethomepage.dev/widget.username: "{{`{{HOMEPAGE_VAR_GRAFANA_USER}}`}}" + gethomepage.dev/widget.password: "{{`{{HOMEPAGE_VAR_GRAFANA_PASSWORD}}`}}" + hosts: ["grafana.${SECRET_DOMAIN}"] + persistence: + enabled: false + testFramework: + enabled: false + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/name: grafana diff --git a/kubernetes/apps/observability/grafana/app/kustomization.yaml b/kubernetes/apps/observability/grafana/app/kustomization.yaml new file mode 100644 index 00000000..4eed917b --- /dev/null +++ b/kubernetes/apps/observability/grafana/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml diff --git a/kubernetes/apps/observability/grafana/ks.yaml b/kubernetes/apps/observability/grafana/ks.yaml new file mode 100644 index 00000000..5e74d286 --- /dev/null +++ b/kubernetes/apps/observability/grafana/ks.yaml @@ -0,0 +1,24 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app grafana + namespace: flux-system +spec: + targetNamespace: observability + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + - name: cloudnative-pg-cluster + path: ./kubernetes/apps/observability/grafana/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/observability/kube-prometheus-stack/app/externalsecret.yaml b/kubernetes/apps/observability/kube-prometheus-stack/app/externalsecret.yaml new file mode 100644 index 00000000..079f6dce --- /dev/null +++ b/kubernetes/apps/observability/kube-prometheus-stack/app/externalsecret.yaml @@ -0,0 +1,48 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: alertmanager +spec: + refreshInterval: 5m + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + target: + name: alertmanager-secret + template: + engineVersion: v2 + data: + alertmanager.yaml: | + global: + resolve_timeout: 5m + route: + group_by: ["alertname", "job"] + group_interval: 10m + group_wait: 1m + receiver: discord + repeat_interval: 12h + routes: + - receiver: "null" + matchers: + - alertname =~ "InfoInhibitor" + - receiver: discord + continue: true + matchers: + - severity = "critical" + inhibit_rules: + - equal: ["alertname", "namespace"] + source_matchers: + - severity = "critical" + target_matchers: + - severity = "warning" + receivers: + - name: "null" + - name: discord + discord_configs: + - webhook_url: "{{ .ALERTMANAGER_DISCORD_WEBHOOK }}" + send_resolved: true + dataFrom: + - extract: + key: discord diff --git a/kubernetes/apps/observability/kube-prometheus-stack/app/helmrelease.yaml b/kubernetes/apps/observability/kube-prometheus-stack/app/helmrelease.yaml new file mode 100644 index 00000000..5793e378 --- /dev/null +++ b/kubernetes/apps/observability/kube-prometheus-stack/app/helmrelease.yaml @@ -0,0 +1,188 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: kube-prometheus-stack +spec: + interval: 30m + timeout: 15m + chart: + spec: + chart: kube-prometheus-stack + version: 58.6.0 + sourceRef: + kind: HelmRepository + name: prometheus-community + namespace: flux-system + install: + crds: CreateReplace + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + crds: CreateReplace + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: local-path-provisioner + namespace: storage + - name: thanos + namespace: observability + values: + crds: + enabled: true + cleanPrometheusOperatorObjectNames: true + alertmanager: + ingress: + enabled: true + pathType: Prefix + annotations: + gethomepage.dev/enabled: "true" + gethomepage.dev/icon: alertmanager.svg + gethomepage.dev/name: Alertmanager + gethomepage.dev/group: Observability + gethomepage.dev/description: Manage prometheus alerts + ingressClassName: internal + hosts: [ "alertmanager.${SECRET_DOMAIN}" ] + alertmanagerSpec: + replicas: 2 + useExistingSecret: true + configSecret: alertmanager-secret + storage: + volumeClaimTemplate: + spec: + storageClassName: local-hostpath + resources: + requests: + storage: 1Gi + kubelet: + enabled: true + serviceMonitor: + metricRelabelings: + # Drop high cardinality labels + - action: labeldrop + regex: (uid) + - action: labeldrop + regex: (id|name) + - action: drop + sourceLabels: [ "__name__" ] + regex: (rest_client_request_duration_seconds_bucket|rest_client_request_duration_seconds_sum|rest_client_request_duration_seconds_count) + kubeApiServer: + enabled: true + serviceMonitor: + metricRelabelings: + # Drop high cardinality labels + - action: drop + sourceLabels: [ "__name__" ] + regex: (apiserver|etcd|rest_client)_request(|_sli|_slo)_duration_seconds_bucket + - action: drop + sourceLabels: [ "__name__" ] + regex: (apiserver_response_sizes_bucket|apiserver_watch_events_sizes_bucket) + kubeControllerManager: + enabled: true + endpoints: &cp + - 192.168.20.51 + - 192.168.20.52 + - 192.168.20.53 + kubeEtcd: + enabled: true + endpoints: *cp + kubeScheduler: + enabled: true + endpoints: *cp + kubeProxy: + enabled: false + prometheus: + ingress: + enabled: true + ingressClassName: internal + annotations: + gethomepage.dev/enabled: "true" + gethomepage.dev/icon: prometheus.svg + gethomepage.dev/name: Prometheus + gethomepage.dev/group: Observability + gethomepage.dev/description: Monitoring system + pathType: Prefix + hosts: [ "prometheus.${SECRET_DOMAIN}" ] + thanosService: + enabled: true + thanosServiceMonitor: + enabled: true + prometheusSpec: + podMetadata: + annotations: + secret.reloader.stakater.com/reload: &secret thanos-objstore-config + replicas: 2 + replicaExternalLabelName: __replica__ + scrapeInterval: 1m # Must match interval in Grafana Helm chart + ruleSelectorNilUsesHelmValues: false + serviceMonitorSelectorNilUsesHelmValues: false + podMonitorSelectorNilUsesHelmValues: false + probeSelectorNilUsesHelmValues: false + scrapeConfigSelectorNilUsesHelmValues: false + enableAdminAPI: true + walCompression: true + enableFeatures: # https://prometheus.io/docs/prometheus/latest/feature_flags/ + - auto-gomemlimit + - memory-snapshot-on-shutdown + - new-service-discovery-manager + thanos: + image: quay.io/thanos/thanos:${THANOS_VERSION} + version: "${THANOS_VERSION#v}" + objectStorageConfig: + existingSecret: + name: *secret + key: config + retention: 2d + retentionSize: 10GB + resources: + requests: + cpu: 100m + limits: + memory: 2500Mi + storageSpec: + volumeClaimTemplate: + spec: + storageClassName: local-hostpath + resources: + requests: + storage: 15Gi + nodeExporter: + enabled: true + prometheus-node-exporter: + fullnameOverride: node-exporter + prometheus: + monitor: + enabled: true + relabelings: + - action: replace + regex: (.*) + replacement: $1 + sourceLabels: [ "__meta_kubernetes_pod_node_name" ] + targetLabel: kubernetes_node + kubeStateMetrics: + enabled: true + kube-state-metrics: + fullnameOverride: kube-state-metrics + metricLabelsAllowlist: + - pods=[*] + - deployments=[*] + - persistentvolumeclaims=[*] + prometheus: + monitor: + enabled: true + relabelings: + - action: replace + regex: (.*) + replacement: $1 + sourceLabels: [ "__meta_kubernetes_pod_node_name" ] + targetLabel: kubernetes_node + grafana: + enabled: false + forceDeployDashboards: true + sidecar: + dashboards: + annotations: + grafana_folder: Kubernetes diff --git a/kubernetes/apps/observability/kube-prometheus-stack/app/kustomization.yaml b/kubernetes/apps/observability/kube-prometheus-stack/app/kustomization.yaml new file mode 100644 index 00000000..9cffb524 --- /dev/null +++ b/kubernetes/apps/observability/kube-prometheus-stack/app/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml + - ./prometheusrule.yaml diff --git a/kubernetes/apps/observability/kube-prometheus-stack/app/prometheusrule.yaml b/kubernetes/apps/observability/kube-prometheus-stack/app/prometheusrule.yaml new file mode 100644 index 00000000..4d880fa2 --- /dev/null +++ b/kubernetes/apps/observability/kube-prometheus-stack/app/prometheusrule.yaml @@ -0,0 +1,25 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/monitoring.coreos.com/prometheusrule_v1.json +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: miscellaneous-rules +spec: + groups: + - name: dockerhub + rules: + - alert: BootstrapRateLimitRisk + annotations: + summary: Kubernetes cluster at risk of being rate limited by dockerhub on bootstrap + expr: count(time() - container_last_seen{image=~"(docker.io).*",container!=""} < 30) > 100 + for: 15m + labels: + severity: critical + - name: oom + rules: + - alert: OOMKilled + annotations: + summary: Container {{ $labels.container }} in pod {{ $labels.namespace }}/{{ $labels.pod }} has been OOMKilled {{ $value }} times in the last 10 minutes. + expr: (kube_pod_container_status_restarts_total - kube_pod_container_status_restarts_total offset 10m >= 1) and ignoring (reason) min_over_time(kube_pod_container_status_last_terminated_reason{reason="OOMKilled"}[10m]) == 1 + labels: + severity: critical diff --git a/kubernetes/apps/observability/kube-prometheus-stack/ks.yaml b/kubernetes/apps/observability/kube-prometheus-stack/ks.yaml new file mode 100644 index 00000000..1dca6026 --- /dev/null +++ b/kubernetes/apps/observability/kube-prometheus-stack/ks.yaml @@ -0,0 +1,27 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app kube-prometheus-stack + namespace: flux-system +spec: + targetNamespace: observability + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/observability/kube-prometheus-stack/app + dependsOn: + - name: external-secrets-stores + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m + postBuild: + substitute: + # renovate: datasource=docker depName=quay.io/thanos/thanos + THANOS_VERSION: v0.35.0 diff --git a/kubernetes/apps/observability/kustomization.yaml b/kubernetes/apps/observability/kustomization.yaml new file mode 100644 index 00000000..3fb6fdbc --- /dev/null +++ b/kubernetes/apps/observability/kustomization.yaml @@ -0,0 +1,15 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./alert.yaml + - ./portainer/ks.yaml + - ./gatus/ks.yaml + - ./thanos/ks.yaml + - ./kube-prometheus-stack/ks.yaml + - ./grafana/ks.yaml + - ./vector/ks.yaml + - ./loki/ks.yaml + - ./unpoller/ks.yaml diff --git a/kubernetes/apps/observability/loki/app/externalsecret.yaml b/kubernetes/apps/observability/loki/app/externalsecret.yaml new file mode 100644 index 00000000..e13a2dbf --- /dev/null +++ b/kubernetes/apps/observability/loki/app/externalsecret.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: loki +spec: + refreshInterval: 5m + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + target: + name: loki-secret + template: + engineVersion: v2 + data: + S3_BUCKET: "{{ .MINIO_LOKI_BUCKET }}" + S3_ACCESS_KEY: "{{ .MINIO_LOKI_ACCESS_KEY }}" + S3_SECRET_KEY: "{{ .MINIO_LOKI_SECRET_KEY }}" + S3_REGION: us-east-1 + dataFrom: + - extract: + key: minio diff --git a/kubernetes/apps/observability/loki/app/helmrelease.yaml b/kubernetes/apps/observability/loki/app/helmrelease.yaml new file mode 100644 index 00000000..4f1d3af9 --- /dev/null +++ b/kubernetes/apps/observability/loki/app/helmrelease.yaml @@ -0,0 +1,148 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: loki +spec: + interval: 30m + timeout: 15m + chart: + spec: + chart: loki + version: 6.5.2 + sourceRef: + kind: HelmRepository + name: grafana + namespace: flux-system + install: + crds: Skip + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + crds: Skip + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: local-path-provisioner + namespace: storage + - name: vector-agent + namespace: observability + - name: vector-aggregator + namespace: observability + valuesFrom: + - targetPath: loki.storage.bucketNames.chunks + kind: Secret + name: &lokiSecret loki-secret + valuesKey: S3_BUCKET + - targetPath: loki.storage.s3.region + kind: Secret + name: *lokiSecret + valuesKey: S3_REGION + - targetPath: loki.storage.s3.accessKeyId + kind: Secret + name: *lokiSecret + valuesKey: S3_ACCESS_KEY + - targetPath: loki.storage.s3.secretAccessKey + kind: Secret + name: *lokiSecret + valuesKey: S3_SECRET_KEY + values: + deploymentMode: SimpleScalable + loki: + podAnnotations: + configmap.reloader.stakater.com/reload: *lokiSecret + secret.reloader.stakater.com/reload: *lokiSecret + ingester: + chunk_encoding: snappy + storage: + type: s3 + s3: + endpoint: ${NAS_URL}:9000 + s3ForcePathStyle: true + insecure: true + schemaConfig: + configs: + - from: "2024-04-01" + store: tsdb + object_store: s3 + schema: v13 + index: + prefix: loki_index_ + period: 24h + structuredConfig: + auth_enabled: false + server: + log_level: info + http_listen_port: 3100 + grpc_listen_port: 9095 + grpc_server_max_recv_msg_size: 8388608 + grpc_server_max_send_msg_size: 8388608 + limits_config: + ingestion_burst_size_mb: 128 + ingestion_rate_mb: 64 + max_query_parallelism: 100 + per_stream_rate_limit: 64M + per_stream_rate_limit_burst: 128M + reject_old_samples: true + reject_old_samples_max_age: 168h + retention_period: 30d + shard_streams: + enabled: true + split_queries_by_interval: 1h + query_scheduler: + max_outstanding_requests_per_tenant: 4096 + frontend: + max_outstanding_per_tenant: 4096 + ruler: + enable_api: true + enable_alertmanager_v2: true + alertmanager_url: http://alertmanager-operated.observability.svc.cluster.local:9093 + storage: + type: local + local: + directory: /rules + rule_path: /rules/fake + analytics: + reporting_enabled: false + backend: + replicas: 3 + persistence: + size: 7Gi + storageClass: local-hostpath + gateway: + replicas: 3 + image: + registry: ghcr.io + deploymentStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + maxSurge: 1 + ingress: + enabled: true + ingressClassName: internal + hosts: + - host: "loki.${SECRET_DOMAIN}" + paths: + - path: / + pathType: Prefix + read: + replicas: 3 + write: + replicas: 3 + persistence: + size: 7Gi + storageClass: local-hostpath + sidecar: + image: + repository: ghcr.io/kiwigrid/k8s-sidecar + rules: + searchNamespace: ALL + folder: /rules/fake + lokiCanary: + enabled: false + test: + enabled: false diff --git a/kubernetes/apps/observability/loki/app/kustomization.yaml b/kubernetes/apps/observability/loki/app/kustomization.yaml new file mode 100644 index 00000000..4eed917b --- /dev/null +++ b/kubernetes/apps/observability/loki/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml diff --git a/kubernetes/apps/observability/loki/ks.yaml b/kubernetes/apps/observability/loki/ks.yaml new file mode 100644 index 00000000..e21695b9 --- /dev/null +++ b/kubernetes/apps/observability/loki/ks.yaml @@ -0,0 +1,24 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app loki + namespace: flux-system +spec: + targetNamespace: observability + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + - name: dragonfly-cluster + path: ./kubernetes/apps/observability/loki/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/observability/namespace.yaml b/kubernetes/apps/observability/namespace.yaml new file mode 100644 index 00000000..ce3a5bd2 --- /dev/null +++ b/kubernetes/apps/observability/namespace.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: observability + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/observability/portainer/app/helmrelease.yaml b/kubernetes/apps/observability/portainer/app/helmrelease.yaml new file mode 100644 index 00000000..08f6b97a --- /dev/null +++ b/kubernetes/apps/observability/portainer/app/helmrelease.yaml @@ -0,0 +1,50 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app portainer + namespace: networking +spec: + interval: 5m + dependsOn: + - name: longhorn + namespace: storage + - name: volsync + namespace: storage + chart: + spec: + chart: portainer + version: 1.0.51 + sourceRef: + kind: HelmRepository + name: portainer-charts + namespace: flux-system + interval: 5m + values: + image: + repository: portainer/portainer-ce + tag: 2.20.2 + service: + type: ClusterIP + httpPort: 9000 + persistence: + existingClaim: *app + ingress: + enabled: true + ingressClassName: internal + annotations: + gethomepage.dev/enabled: "true" + gethomepage.dev/icon: portainer.png + gethomepage.dev/name: Portainer + gethomepage.dev/group: Observability + gethomepage.dev/description: Container management UI + gethomepage.dev/widget.type: portainer + gethomepage.dev/widget.url: http://portainer.observability.svc.cluster.local:9000 + gethomepage.dev/widget.env: "2" + gethomepage.dev/widget.key: "{{HOMEPAGE_VAR_PORTAINER_TOKEN}}" + hosts: + - host: "portainer.${SECRET_DOMAIN}" + paths: + - path: / + pathType: Prefix diff --git a/kubernetes/apps/observability/portainer/app/kustomization.yaml b/kubernetes/apps/observability/portainer/app/kustomization.yaml new file mode 100644 index 00000000..a928a563 --- /dev/null +++ b/kubernetes/apps/observability/portainer/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml + - ../../../../templates/volsync diff --git a/kubernetes/apps/observability/portainer/ks.yaml b/kubernetes/apps/observability/portainer/ks.yaml new file mode 100644 index 00000000..90c5ec1c --- /dev/null +++ b/kubernetes/apps/observability/portainer/ks.yaml @@ -0,0 +1,25 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app portainer + namespace: flux-system +spec: + targetNamespace: observability + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/observability/portainer/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m + postBuild: + substitute: + APP: *app + VOLSYNC_CAPACITY: 1Gi diff --git a/kubernetes/apps/observability/thanos/app/externalsecret.yaml b/kubernetes/apps/observability/thanos/app/externalsecret.yaml new file mode 100644 index 00000000..21ce82c3 --- /dev/null +++ b/kubernetes/apps/observability/thanos/app/externalsecret.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: thanos +spec: + refreshInterval: 5m + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + target: + name: thanos-secret + template: + engineVersion: v2 + data: + S3_BUCKET: "{{ .MINIO_THANOS_BUCKET }}" + S3_ACCESS_KEY: "{{ .MINIO_THANOS_ACCESS_KEY }}" + S3_SECRET_KEY: "{{ .MINIO_THANOS_SECRET_KEY }}" + S3_REGION: us-east-1 + dataFrom: + - extract: + key: minio diff --git a/kubernetes/apps/observability/thanos/app/helmrelease.yaml b/kubernetes/apps/observability/thanos/app/helmrelease.yaml new file mode 100644 index 00000000..660f7690 --- /dev/null +++ b/kubernetes/apps/observability/thanos/app/helmrelease.yaml @@ -0,0 +1,124 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: thanos +spec: + interval: 30m + timeout: 15m + chart: + spec: + chart: thanos + version: 1.17.1 + sourceRef: + kind: HelmRepository + name: stevehipwell + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: local-path-provisioner + namespace: storage + valuesFrom: + - targetPath: objstoreConfig.value.config.bucket + kind: Secret + name: thanos-secret + valuesKey: S3_BUCKET + - targetPath: objstoreConfig.value.config.region + kind: Secret + name: thanos-secret + valuesKey: S3_REGION + - targetPath: objstoreConfig.value.config.access_key + kind: Secret + name: thanos-secret + valuesKey: S3_ACCESS_KEY + - targetPath: objstoreConfig.value.config.secret_key + kind: Secret + name: thanos-secret + valuesKey: S3_SECRET_KEY + values: + objstoreConfig: + value: + type: s3 + config: + insecure: true + endpoint: ${NAS_URL}:9000 + additionalEndpoints: + - dnssrv+_grpc._tcp.kube-prometheus-stack-thanos-discovery.observability.svc.cluster.local + additionalReplicaLabels: ["__replica__"] + serviceMonitor: + enabled: true + compact: + enabled: true + extraArgs: + - --compact.concurrency=4 + - --delete-delay=30m + - --retention.resolution-raw=14d + - --retention.resolution-5m=30d + - --retention.resolution-1h=60d + persistence: &persistence + enabled: true + storageClass: local-hostpath + size: 5Gi + query: + replicas: 3 + extraArgs: ["--alert.query-url=https://thanos.${SECRET_DOMAIN}"] + queryFrontend: + enabled: true + replicas: 3 + extraEnv: &extraEnv + - name: THANOS_CACHE_CONFIG + valueFrom: + configMapKeyRef: + name: &configMap thanos-cache-configmap + key: cache.yaml + extraArgs: ["--query-range.response-cache-config=$(THANOS_CACHE_CONFIG)"] + ingress: + enabled: true + ingressClassName: internal + annotations: + gethomepage.dev/enabled: "true" + gethomepage.dev/icon: thanos.svg + gethomepage.dev/name: Thanos + gethomepage.dev/group: Observability + gethomepage.dev/description: Highly available Prometheus setup + hosts: + - thanos.${SECRET_DOMAIN} + podAnnotations: &podAnnotations + configmap.reloader.stakater.com/reload: *configMap + rule: + enabled: true + replicas: 3 + extraArgs: ["--web.prefix-header=X-Forwarded-Prefix"] + alertmanagersConfig: + value: |- + alertmanagers: + - api_version: v2 + static_configs: + - dnssrv+_http-web._tcp.alertmanager-operated.observability.svc.cluster.local + rules: + value: |- + groups: + - name: PrometheusWatcher + rules: + - alert: PrometheusDown + annotations: + summary: A Prometheus has disappeared from Prometheus target discovery + expr: absent(up{job="kube-prometheus-stack-prometheus"}) + for: 5m + labels: + severity: critical + persistence: *persistence + storeGateway: + replicas: 3 + extraEnv: *extraEnv + extraArgs: ["--index-cache.config=$(THANOS_CACHE_CONFIG)"] + persistence: *persistence + podAnnotations: *podAnnotations diff --git a/kubernetes/apps/observability/thanos/app/kustomization.yaml b/kubernetes/apps/observability/thanos/app/kustomization.yaml new file mode 100644 index 00000000..3690a04f --- /dev/null +++ b/kubernetes/apps/observability/thanos/app/kustomization.yaml @@ -0,0 +1,13 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml +configMapGenerator: + - name: thanos-cache-configmap + files: + - cache.yaml=./resources/cache.yaml +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/apps/observability/thanos/app/resources/cache.yaml b/kubernetes/apps/observability/thanos/app/resources/cache.yaml new file mode 100644 index 00000000..df31f345 --- /dev/null +++ b/kubernetes/apps/observability/thanos/app/resources/cache.yaml @@ -0,0 +1,5 @@ +--- +type: REDIS +config: + addr: dragonfly.database.svc.cluster.local:6379 + db: 1 diff --git a/kubernetes/apps/observability/thanos/ks.yaml b/kubernetes/apps/observability/thanos/ks.yaml new file mode 100644 index 00000000..c981bcf5 --- /dev/null +++ b/kubernetes/apps/observability/thanos/ks.yaml @@ -0,0 +1,24 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app thanos + namespace: flux-system +spec: + targetNamespace: observability + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: dragonfly-cluster + - name: external-secrets-stores + path: ./kubernetes/apps/observability/thanos/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/observability/unpoller/app/externalsecret.yaml b/kubernetes/apps/observability/unpoller/app/externalsecret.yaml new file mode 100644 index 00000000..9d5e5d0d --- /dev/null +++ b/kubernetes/apps/observability/unpoller/app/externalsecret.yaml @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: unpoller +spec: + refreshInterval: 5m + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + target: + name: unpoller-secret + template: + engineVersion: v2 + data: + UP_UNIFI_DEFAULT_USER: "{{ .username }}" + UP_UNIFI_DEFAULT_PASS: "{{ .password }}" + dataFrom: + - extract: + key: unifipoller diff --git a/kubernetes/apps/observability/unpoller/app/helmrelease.yaml b/kubernetes/apps/observability/unpoller/app/helmrelease.yaml new file mode 100644 index 00000000..995a7ce7 --- /dev/null +++ b/kubernetes/apps/observability/unpoller/app/helmrelease.yaml @@ -0,0 +1,79 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: unpoller +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: kube-prometheus-stack + namespace: observability + values: + controllers: + unpoller: + containers: + app: + image: + repository: ghcr.io/unpoller/unpoller + tag: v2.11.2@sha256:73b39c0b3b8fa92aa82a7613d3486253ffbd8c057833b4621402a268159bf2a2 + env: + TZ: "${TIMEZONE}" + UP_UNIFI_DEFAULT_ROLE: home-ops + UP_UNIFI_DEFAULT_URL: https://192.168.20.1 + UP_UNIFI_DEFAULT_VERIFY_SSL: false + UP_INFLUXDB_DISABLE: true + envFrom: + - secretRef: + name: unpoller-secret + probes: + liveness: + enabled: true + readiness: + enabled: true + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + limits: + memory: 128Mi + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + seccompProfile: { type: RuntimeDefault } + service: + app: + controller: unpoller + ports: + http: + port: 9130 + serviceMonitor: + app: + serviceName: unpoller + endpoints: + - port: http + scheme: http + path: /metrics + interval: 2m # Unifi API only polls at 2m intervals + scrapeTimeout: 10s diff --git a/kubernetes/apps/observability/unpoller/app/kustomization.yaml b/kubernetes/apps/observability/unpoller/app/kustomization.yaml new file mode 100644 index 00000000..4eed917b --- /dev/null +++ b/kubernetes/apps/observability/unpoller/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml diff --git a/kubernetes/apps/observability/unpoller/ks.yaml b/kubernetes/apps/observability/unpoller/ks.yaml new file mode 100644 index 00000000..498091f7 --- /dev/null +++ b/kubernetes/apps/observability/unpoller/ks.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app unpoller + namespace: flux-system +spec: + targetNamespace: observability + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/apps/observability/unpoller/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/observability/vector/app/agent/helmrelease.yaml b/kubernetes/apps/observability/vector/app/agent/helmrelease.yaml new file mode 100644 index 00000000..129b5ff9 --- /dev/null +++ b/kubernetes/apps/observability/vector/app/agent/helmrelease.yaml @@ -0,0 +1,103 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: vector-agent +spec: + interval: 30m + timeout: 15m + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: vector-aggregator + namespace: observability + values: + controllers: + vector-agent: + type: daemonset + strategy: RollingUpdate + annotations: + reloader.stakater.com/auto: "true" + containers: + app: + image: + repository: docker.io/timberio/vector + tag: 0.38.0-alpine@sha256:0f40a5bc6df18de0f4855ee6a801449f1b78fa60b3c5001c530896abc64b18b2 + env: + PROCFS_ROOT: /host/proc + SYSFS_ROOT: /host/sys + VECTOR_SELF_NODE_NAME: + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + VECTOR_SELF_POD_NAME: + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + VECTOR_SELF_POD_NAMESPACE: + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + args: ["--config", "/etc/vector/vector.yaml"] + securityContext: + privileged: true + serviceAccount: + create: true + name: vector-agent + persistence: + config: + enabled: true + type: configMap + name: vector-agent-configmap + globalMounts: + - path: /etc/vector/vector.yaml + subPath: vector.yaml + readOnly: true + data: + type: emptyDir + globalMounts: + - path: /vector-data-dir + procfs: + type: hostPath + hostPath: /proc + hostPathType: Directory + globalMounts: + - path: /host/proc + readOnly: true + sysfs: + type: hostPath + hostPath: /sys + hostPathType: Directory + globalMounts: + - path: /host/sys + readOnly: true + var-lib: + type: hostPath + hostPath: /var/lib + hostPathType: Directory + globalMounts: + - readOnly: true + var-log: + type: hostPath + hostPath: /var/log + hostPathType: Directory + globalMounts: + - readOnly: true diff --git a/kubernetes/apps/observability/vector/app/agent/kustomization.yaml b/kubernetes/apps/observability/vector/app/agent/kustomization.yaml new file mode 100644 index 00000000..cad3d529 --- /dev/null +++ b/kubernetes/apps/observability/vector/app/agent/kustomization.yaml @@ -0,0 +1,13 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml + - ./rbac.yaml +configMapGenerator: + - name: vector-agent-configmap + files: + - vector.yaml=./resources/vector.yaml +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/apps/observability/vector/app/agent/rbac.yaml b/kubernetes/apps/observability/vector/app/agent/rbac.yaml new file mode 100644 index 00000000..a088f8d1 --- /dev/null +++ b/kubernetes/apps/observability/vector/app/agent/rbac.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: vector-agent +rules: + - apiGroups: [""] + resources: ["namespaces", "nodes", "pods"] + verbs: ["list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: vector-agent +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vector-agent +subjects: + - kind: ServiceAccount + name: vector-agent + namespace: observability diff --git a/kubernetes/apps/observability/vector/app/agent/resources/vector.yaml b/kubernetes/apps/observability/vector/app/agent/resources/vector.yaml new file mode 100644 index 00000000..f3a7565c --- /dev/null +++ b/kubernetes/apps/observability/vector/app/agent/resources/vector.yaml @@ -0,0 +1,25 @@ +--- +data_dir: /vector-data-dir + +sources: + kubernetes_source: + type: kubernetes_logs + use_apiserver_cache: true + pod_annotation_fields: + container_image: container_image + container_name: container_name + pod_labels: pod_labels + pod_name: pod_name + pod_annotations: "" + namespace_annotation_fields: + namespace_labels: "" + node_annotation_fields: + node_labels: "" + +sinks: + kubernetes: + type: vector + compression: true + version: "2" + address: vector-aggregator.observability.svc.cluster.local:6010 + inputs: ["kubernetes_source"] diff --git a/kubernetes/apps/observability/vector/app/aggregator/helmrelease.yaml b/kubernetes/apps/observability/vector/app/aggregator/helmrelease.yaml new file mode 100644 index 00000000..30dd1955 --- /dev/null +++ b/kubernetes/apps/observability/vector/app/aggregator/helmrelease.yaml @@ -0,0 +1,78 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app vector-aggregator +spec: + interval: 30m + timeout: 15m + chart: + spec: + chart: app-template + version: 3.1.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + controllers: + vector-aggregator: + replicas: 3 + strategy: RollingUpdate + annotations: + reloader.stakater.com/auto: "true" + containers: + app: + image: + repository: docker.io/timberio/vector + tag: 0.38.0-alpine@sha256:0f40a5bc6df18de0f4855ee6a801449f1b78fa60b3c5001c530896abc64b18b2 + args: ["--config", "/etc/vector/vector.yaml"] + probes: + liveness: + enabled: true + readiness: + enabled: true + defaultPodOptions: + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/name: *app + service: + app: + controller: vector-aggregator + type: LoadBalancer + annotations: + external-dns.alpha.kubernetes.io/hostname: vector.devbu.io + io.cilium/lb-ipam-ips: 192.168.20.66 + ports: + http: + primary: true + port: 8686 + journald: + port: 6000 + kubernetes: + port: 6010 + persistence: + config: + type: configMap + name: vector-aggregator-configmap + globalMounts: + - path: /etc/vector/vector.yaml + subPath: vector.yaml + readOnly: true + data: + type: emptyDir + globalMounts: + - path: /vector-data-dir diff --git a/kubernetes/apps/observability/vector/app/aggregator/kustomization.yaml b/kubernetes/apps/observability/vector/app/aggregator/kustomization.yaml new file mode 100644 index 00000000..a1322387 --- /dev/null +++ b/kubernetes/apps/observability/vector/app/aggregator/kustomization.yaml @@ -0,0 +1,12 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml +configMapGenerator: + - name: vector-aggregator-configmap + files: + - vector.yaml=./resources/vector.yaml +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/apps/observability/vector/app/aggregator/resources/vector.yaml b/kubernetes/apps/observability/vector/app/aggregator/resources/vector.yaml new file mode 100644 index 00000000..7d0c4b41 --- /dev/null +++ b/kubernetes/apps/observability/vector/app/aggregator/resources/vector.yaml @@ -0,0 +1,63 @@ +--- +data_dir: /vector-data-dir +api: + enabled: true + address: 0.0.0.0:8686 + +# +# Sources +# + +sources: + journald_source: + type: vector + version: "2" + address: 0.0.0.0:6000 + + kubernetes_source: + type: vector + version: "2" + address: 0.0.0.0:6010 + +# +# Transforms +# + +transforms: + kubernetes_remap: + type: remap + inputs: ["kubernetes_source"] + source: | + # Standardize 'app' index + .custom_app_name = .pod_labels."app.kubernetes.io/name" || .pod_labels.app || .pod_labels."k8s-app" || "unknown" + # Drop pod_labels + del(.pod_labels) + +# +# Sinks +# + +sinks: + journald: + inputs: ["journald_source"] + type: loki + endpoint: http://loki-gateway.observability.svc.cluster.local + encoding: { codec: json } + out_of_order_action: accept + remove_label_fields: true + remove_timestamp: true + labels: + hostname: '{{ host }}' + + kubernetes: + inputs: ["kubernetes_remap"] + type: loki + endpoint: http://loki-gateway.observability.svc.cluster.local + encoding: { codec: json } + out_of_order_action: accept + remove_label_fields: true + remove_timestamp: true + labels: + app: '{{ custom_app_name }}' + namespace: '{{ kubernetes.pod_namespace }}' + node: '{{ kubernetes.pod_node_name }}' diff --git a/kubernetes/apps/observability/vector/app/kustomization.yaml b/kubernetes/apps/observability/vector/app/kustomization.yaml new file mode 100644 index 00000000..54568aa0 --- /dev/null +++ b/kubernetes/apps/observability/vector/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./agent + - ./aggregator diff --git a/kubernetes/apps/observability/vector/ks.yaml b/kubernetes/apps/observability/vector/ks.yaml new file mode 100644 index 00000000..fef82ed7 --- /dev/null +++ b/kubernetes/apps/observability/vector/ks.yaml @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app vector + namespace: flux-system +spec: + targetNamespace: observability + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/observability/vector/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/storage/alert.yaml b/kubernetes/apps/storage/alert.yaml new file mode 100644 index 00000000..e16426b6 --- /dev/null +++ b/kubernetes/apps/storage/alert.yaml @@ -0,0 +1,29 @@ +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: storage +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: storage +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/apps/storage/kustomization.yaml b/kubernetes/apps/storage/kustomization.yaml new file mode 100644 index 00000000..f057d62d --- /dev/null +++ b/kubernetes/apps/storage/kustomization.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./alert.yaml + - ./longhorn/ks.yaml + - ./volsync/ks.yaml + - ./local-path-provisioner/ks.yaml diff --git a/kubernetes/apps/storage/local-path-provisioner/app/helmrelease.yaml b/kubernetes/apps/storage/local-path-provisioner/app/helmrelease.yaml new file mode 100644 index 00000000..b4ef585a --- /dev/null +++ b/kubernetes/apps/storage/local-path-provisioner/app/helmrelease.yaml @@ -0,0 +1,84 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: local-path-provisioner +spec: + interval: 30m + chart: + spec: + chart: democratic-csi + version: 0.14.6 + sourceRef: + name: democratic-csi + kind: HelmRepository + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + + values: + fullnameOverride: local-path-provisioner + controller: + strategy: node + externalProvisioner: + image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.1 + extraArgs: + - --leader-election=false + - --node-deployment=true + - --node-deployment-immediate-binding=false + - --feature-gates=Topology=true + - --strict-topology=true + - --enable-capacity=true + - --capacity-ownerref-level=1 + externalResizer: + enabled: false + externalAttacher: + enabled: false + externalSnapshotter: + enabled: false + csiDriver: + name: local-hostpath.cluster.local + storageCapacity: true + attachRequired: false + fsGroupPolicy: File + storageClasses: + - name: local-hostpath + defaultClass: false + reclaimPolicy: Delete + volumeBindingMode: WaitForFirstConsumer + allowVolumeExpansion: true + driver: + config: + driver: local-hostpath + local-hostpath: + shareBasePath: &storagePath /var/democratic-csi/local + controllerBasePath: *storagePath + dirPermissionsMode: "0770" + dirPermissionsUser: 0 + dirPermissionsGroup: 0 + node: + hostPID: true + driver: + image: ghcr.io/democratic-csi/democratic-csi:v1.9.1 + extraVolumeMounts: + - name: local-hostpath + mountPath: *storagePath + mountPropagation: Bidirectional + extraEnv: + - name: ISCSIADM_HOST_STRATEGY + value: nsenter + - name: ISCSIADM_HOST_PATH + value: /usr/local/sbin/iscsiadm + iscsiDirHostPath: /usr/local/etc/iscsi + iscsiDirHostPathType: "" + extraVolumes: + - name: local-hostpath + hostPath: + path: *storagePath + type: DirectoryOrCreate diff --git a/kubernetes/apps/storage/local-path-provisioner/app/kustomization.yaml b/kubernetes/apps/storage/local-path-provisioner/app/kustomization.yaml new file mode 100644 index 00000000..17cbc72b --- /dev/null +++ b/kubernetes/apps/storage/local-path-provisioner/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/storage/local-path-provisioner/ks.yaml b/kubernetes/apps/storage/local-path-provisioner/ks.yaml new file mode 100644 index 00000000..31625a89 --- /dev/null +++ b/kubernetes/apps/storage/local-path-provisioner/ks.yaml @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app local-path-provisioner + namespace: flux-system +spec: + targetNamespace: storage + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/storage/local-path-provisioner/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/storage/longhorn/app/helmrelease.yaml b/kubernetes/apps/storage/longhorn/app/helmrelease.yaml new file mode 100644 index 00000000..9b44c4ba --- /dev/null +++ b/kubernetes/apps/storage/longhorn/app/helmrelease.yaml @@ -0,0 +1,50 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: longhorn +spec: + interval: 30m + chart: + spec: + chart: longhorn + version: 1.6.1 + sourceRef: + kind: HelmRepository + name: longhorn + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + monitoring: + enabled: true + createPrometheusRules: true + defaultSettings: + defaultReplicaCount: 3 + backupstorePollInterval: 0 + createDefaultDiskLabeledNodes: true + restoreVolumeRecurringJobs: true + storageOverProvisioningPercentage: 100 + storageMinimalAvailablePercentage: 1 + guaranteedEngineManagerCPU: 20 + guaranteedReplicaManagerCPU: 20 + orphanAutoDeletion: true + concurrentAutomaticEngineUpgradePerNodeLimit: 3 + defaultLonghornStaticStorageClass: longhorn + nodeDownPodDeletionPolicy: delete-both-statefulset-and-deployment-pod + ingress: + enabled: true + ingressClassName: internal + annotations: + gethomepage.dev/enabled: "true" + gethomepage.dev/icon: longhorn.png + gethomepage.dev/name: Longhorn + gethomepage.dev/group: Storage + host: longhorn.${SECRET_DOMAIN} + tls: true diff --git a/kubernetes/apps/storage/longhorn/app/kustomization.yaml b/kubernetes/apps/storage/longhorn/app/kustomization.yaml new file mode 100644 index 00000000..4de74b6b --- /dev/null +++ b/kubernetes/apps/storage/longhorn/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml + - ./snapshot.yaml diff --git a/kubernetes/apps/storage/longhorn/app/snapshot.yaml b/kubernetes/apps/storage/longhorn/app/snapshot.yaml new file mode 100644 index 00000000..df7c34c7 --- /dev/null +++ b/kubernetes/apps/storage/longhorn/app/snapshot.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +reclaimPolicy: Delete +provisioner: driver.longhorn.io +parameters: + dataLocality: disabled + numberOfReplicas: "1" # Faster restores from the snapshotclass + replicaAutoBalance: best-effort + staleReplicaTimeout: "30" +allowVolumeExpansion: true +volumeBindingMode: Immediate +metadata: + name: longhorn-snapshot diff --git a/kubernetes/apps/storage/longhorn/ks.yaml b/kubernetes/apps/storage/longhorn/ks.yaml new file mode 100644 index 00000000..9e02ae8f --- /dev/null +++ b/kubernetes/apps/storage/longhorn/ks.yaml @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app longhorn + namespace: flux-system +spec: + targetNamespace: storage + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/storage/longhorn/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/storage/namespace.yaml b/kubernetes/apps/storage/namespace.yaml new file mode 100644 index 00000000..9ce8ef61 --- /dev/null +++ b/kubernetes/apps/storage/namespace.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: storage + labels: + kustomize.toolkit.fluxcd.io/prune: disabled + pod-security.kubernetes.io/enforce: privileged diff --git a/kubernetes/apps/storage/volsync/app/helmrelease.yaml b/kubernetes/apps/storage/volsync/app/helmrelease.yaml new file mode 100644 index 00000000..6f04de4e --- /dev/null +++ b/kubernetes/apps/storage/volsync/app/helmrelease.yaml @@ -0,0 +1,40 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: volsync +spec: + interval: 30m + chart: + spec: + chart: volsync + version: 0.9.1 + sourceRef: + kind: HelmRepository + name: backube + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + dependsOn: + - name: snapshot-controller + namespace: storage + values: + manageCRDs: true + image: + # https://github.com/backube/volsync/issues/828 + repository: &image ghcr.io/onedr0p/volsync + tag: &tag 0.9.1 + rclone: + repository: *image + tag: *tag + restic: + repository: *image + tag: *tag + metrics: + disableAuth: true diff --git a/kubernetes/apps/storage/volsync/app/kustomization.yaml b/kubernetes/apps/storage/volsync/app/kustomization.yaml new file mode 100644 index 00000000..5e098843 --- /dev/null +++ b/kubernetes/apps/storage/volsync/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml + - ./prometheusrule.yaml diff --git a/kubernetes/apps/storage/volsync/app/prometheusrule.yaml b/kubernetes/apps/storage/volsync/app/prometheusrule.yaml new file mode 100644 index 00000000..880d6738 --- /dev/null +++ b/kubernetes/apps/storage/volsync/app/prometheusrule.yaml @@ -0,0 +1,28 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/monitoring.coreos.com/prometheusrule_v1.json +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: volsync +spec: + groups: + - name: volsync.rules + rules: + - alert: VolSyncComponentAbsent + annotations: + summary: VolSync component has disappeared from Prometheus target discovery. + expr: | + absent(up{job="volsync-metrics"}) + for: 15m + labels: + severity: critical + - alert: VolSyncVolumeOutOfSync + annotations: + summary: >- + {{ $labels.obj_namespace }}/{{ $labels.obj_name }} volume + is out of sync. + expr: | + volsync_volume_out_of_sync == 1 + for: 15m + labels: + severity: critical diff --git a/kubernetes/apps/storage/volsync/ks.yaml b/kubernetes/apps/storage/volsync/ks.yaml new file mode 100644 index 00000000..f909bb5f --- /dev/null +++ b/kubernetes/apps/storage/volsync/ks.yaml @@ -0,0 +1,42 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app snapshot-controller + namespace: flux-system +spec: + targetNamespace: storage + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/storage/volsync/snapshot-controller + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app volsync + namespace: flux-system +spec: + targetNamespace: storage + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/storage/volsync/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/storage/volsync/snapshot-controller/helmrelease.yaml b/kubernetes/apps/storage/volsync/snapshot-controller/helmrelease.yaml new file mode 100644 index 00000000..b4e6822c --- /dev/null +++ b/kubernetes/apps/storage/volsync/snapshot-controller/helmrelease.yaml @@ -0,0 +1,43 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: snapshot-controller +spec: + interval: 30m + chart: + spec: + chart: snapshot-controller + version: 2.2.2 + sourceRef: + kind: HelmRepository + name: piraeus + namespace: flux-system + install: + crds: CreateReplace + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + crds: CreateReplace + remediation: + retries: 3 + dependsOn: + - name: longhorn + namespace: storage + values: + controller: + volumeSnapshotClasses: + - name: longhorn-snapclass + annotations: + snapshot.storage.kubernetes.io/is-default-class: "true" + driver: driver.longhorn.io + deletionPolicy: Delete + # Ref: https://github.com/longhorn/longhorn/issues/2534#issuecomment-1010508714 + parameters: + type: snap + serviceMonitor: + create: true + webhook: + enabled: false diff --git a/kubernetes/apps/storage/volsync/snapshot-controller/kustomization.yaml b/kubernetes/apps/storage/volsync/snapshot-controller/kustomization.yaml new file mode 100644 index 00000000..17cbc72b --- /dev/null +++ b/kubernetes/apps/storage/volsync/snapshot-controller/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/bootstrap/flux/kustomization.yaml b/kubernetes/bootstrap/flux/kustomization.yaml new file mode 100644 index 00000000..1d9ad47f --- /dev/null +++ b/kubernetes/bootstrap/flux/kustomization.yaml @@ -0,0 +1,62 @@ +# IMPORTANT: This file is not tracked by flux and should never be. Its +# purpose is to only install the Flux components and CRDs into your cluster. +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - github.com/fluxcd/flux2/manifests/install?ref=v2.3.0 +patches: + # Remove the default network policies + - patch: |- + $patch: delete + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: not-used + target: + group: networking.k8s.io + kind: NetworkPolicy + # Resources renamed to match those installed by oci://ghcr.io/fluxcd/flux-manifests + - target: + kind: ResourceQuota + name: critical-pods + patch: | + - op: replace + path: /metadata/name + value: critical-pods-flux-system + - target: + kind: ClusterRoleBinding + name: cluster-reconciler + patch: | + - op: replace + path: /metadata/name + value: cluster-reconciler-flux-system + - target: + kind: ClusterRoleBinding + name: crd-controller + patch: | + - op: replace + path: /metadata/name + value: crd-controller-flux-system + - target: + kind: ClusterRole + name: crd-controller + patch: | + - op: replace + path: /metadata/name + value: crd-controller-flux-system + - target: + kind: ClusterRole + name: flux-edit + patch: | + - op: replace + path: /metadata/name + value: flux-edit-flux-system + - target: + kind: ClusterRole + name: flux-view + patch: | + - op: replace + path: /metadata/name + value: flux-view-flux-system diff --git a/kubernetes/bootstrap/helmfile.yaml b/kubernetes/bootstrap/helmfile.yaml new file mode 100644 index 00000000..c4c3983e --- /dev/null +++ b/kubernetes/bootstrap/helmfile.yaml @@ -0,0 +1,44 @@ +--- +helmDefaults: + wait: true + waitForJobs: true + timeout: 600 + recreatePods: true + force: true + +repositories: + - name: cilium + url: https://helm.cilium.io + - name: coredns + url: https://coredns.github.io/helm + - name: postfinance + url: https://postfinance.github.io/kubelet-csr-approver + +releases: + - name: cilium + namespace: kube-system + chart: cilium/cilium + version: 1.15.5 + values: + - ../apps/kube-system/cilium/app/helm-values.yaml + - name: coredns + namespace: kube-system + chart: coredns/coredns + version: 1.29.0 + values: + - ../apps/kube-system/coredns/app/helm-values.yaml + needs: ["cilium"] + - name: kubelet-csr-approver + namespace: kube-system + chart: postfinance/kubelet-csr-approver + version: 1.2.1 + values: + - ../apps/kube-system/kubelet-csr-approver/app/helm-values.yaml + needs: ["cilium", "coredns"] + - name: spegel + namespace: kube-system + chart: oci://ghcr.io/spegel-org/helm-charts/spegel + version: v0.0.22 + values: + - ../apps/kube-system/spegel/app/helm-values.yaml + needs: ["cilium", "coredns", "kubelet-csr-approver"] diff --git a/kubernetes/bootstrap/talos/clusterconfig/.gitignore b/kubernetes/bootstrap/talos/clusterconfig/.gitignore new file mode 100644 index 00000000..9fd0998c --- /dev/null +++ b/kubernetes/bootstrap/talos/clusterconfig/.gitignore @@ -0,0 +1,4 @@ +home-kubernetes-k8s-control-1.yaml +home-kubernetes-k8s-control-2.yaml +home-kubernetes-k8s-control-3.yaml +talosconfig diff --git a/kubernetes/bootstrap/talos/talconfig.yaml b/kubernetes/bootstrap/talos/talconfig.yaml new file mode 100644 index 00000000..eaffe44d --- /dev/null +++ b/kubernetes/bootstrap/talos/talconfig.yaml @@ -0,0 +1,202 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/budimanjojo/talhelper/master/pkg/config/schemas/talconfig.json +--- +# renovate: datasource=docker depName=ghcr.io/siderolabs/installer +talosVersion: v1.7.2 +# renovate: datasource=docker depName=ghcr.io/siderolabs/kubelet +kubernetesVersion: v1.30.1 + +clusterName: "home-kubernetes" +endpoint: https://192.168.20.60:6443 +clusterPodNets: + - "10.69.0.0/16" +clusterSvcNets: + - "10.96.0.0/16" +additionalApiServerCertSans: &sans + - "192.168.20.60" + - 127.0.0.1 # KubePrism +additionalMachineCertSans: *sans + +# Disable built-in Flannel to use Cilium +cniConfig: + name: none + +nodes: + - hostname: "k8s-control-1" + ipAddress: "192.168.20.51" + installDisk: /dev/sda + talosImageURL: factory.talos.dev/installer/88d1f7a5c4f1d3aba7df787c448c1d3d008ed29cfb34af53fa0df4336a56040b + controlPlane: true + nodeLabels: + "node.longhorn.io/create-default-disk": "true" + networkInterfaces: + - deviceSelector: + hardwareAddr: "bc:24:11:b5:dd:1f" + dhcp: false + addresses: + - "192.168.20.51/24" + routes: + - network: 0.0.0.0/0 + gateway: "192.168.20.1" + mtu: 1500 + vip: + ip: "192.168.20.60" + - hostname: "k8s-control-2" + ipAddress: "192.168.20.52" + installDisk: /dev/sda + talosImageURL: factory.talos.dev/installer/88d1f7a5c4f1d3aba7df787c448c1d3d008ed29cfb34af53fa0df4336a56040b + controlPlane: true + nodeLabels: + "node.longhorn.io/create-default-disk": "true" + networkInterfaces: + - deviceSelector: + hardwareAddr: "bc:24:11:0c:fd:22" + dhcp: false + addresses: + - "192.168.20.52/24" + routes: + - network: 0.0.0.0/0 + gateway: "192.168.20.1" + mtu: 1500 + vip: + ip: "192.168.20.60" + - hostname: "k8s-control-3" + ipAddress: "192.168.20.53" + installDisk: /dev/sda + talosImageURL: factory.talos.dev/installer/88d1f7a5c4f1d3aba7df787c448c1d3d008ed29cfb34af53fa0df4336a56040b + controlPlane: true + nodeLabels: + "node.longhorn.io/create-default-disk": "true" + networkInterfaces: + - deviceSelector: + hardwareAddr: "bc:24:11:a8:19:33" + dhcp: false + addresses: + - "192.168.20.53/24" + routes: + - network: 0.0.0.0/0 + gateway: "192.168.20.1" + mtu: 1500 + vip: + ip: "192.168.20.60" + +patches: + # Configure containerd + - |- + machine: + files: + - op: create + path: /etc/cri/conf.d/20-customization.part + content: |- + [plugins."io.containerd.grpc.v1.cri"] + enable_unprivileged_ports = true + enable_unprivileged_icmp = true + [plugins."io.containerd.grpc.v1.cri".containerd] + discard_unpacked_layers = false + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + discard_unpacked_layers = false + + # Disable search domain everywhere + - |- + machine: + network: + disableSearchDomain: true + + # Enable cluster discovery + - |- + cluster: + discovery: + registries: + kubernetes: + disabled: false + service: + disabled: false + + # Configure kubelet + - |- + machine: + kubelet: + extraArgs: + rotate-server-certificates: true + nodeIP: + validSubnets: + - 192.168.20.0/24 + + # Force nameserver + - |- + machine: + network: + nameservers: + - "1.1.1.1" + - "1.0.0.1" + + # Custom sysctl settings + - |- + machine: + sysctls: + fs.inotify.max_queued_events: "65536" + fs.inotify.max_user_watches: "524288" + fs.inotify.max_user_instances: "8192" + net.core.rmem_max: "2500000" + net.core.wmem_max: "2500000" + + # Mount longhorn in kubelet + - |- + machine: + kubelet: + extraMounts: + - destination: /var/lib/longhorn + type: bind + source: /var/lib/longhorn + options: + - bind + - rshared + - rw + - destination: /var/democratic-csi/local + type: bind + source: /var/democratic-csi/local + options: + - bind + - rshared + - rw + +controlPlane: + patches: + # Cluster configuration + - |- + cluster: + allowSchedulingOnControlPlanes: true + controllerManager: + extraArgs: + bind-address: 0.0.0.0 + coreDNS: + disabled: true + proxy: + disabled: true + scheduler: + extraArgs: + bind-address: 0.0.0.0 + + # ETCD configuration + - |- + cluster: + etcd: + extraArgs: + listen-metrics-urls: http://0.0.0.0:2381 + advertisedSubnets: + - 192.168.20.0/24 + + # Disable default API server admission plugins. + - |- + - op: remove + path: /cluster/apiServer/admissionControl + + # Enable K8s Talos API Access + - |- + machine: + features: + kubernetesTalosAPIAccess: + enabled: true + allowedRoles: + - os:admin + allowedKubernetesNamespaces: + - system-upgrade diff --git a/kubernetes/bootstrap/talos/talsecret.sops.yaml b/kubernetes/bootstrap/talos/talsecret.sops.yaml new file mode 100644 index 00000000..40854562 --- /dev/null +++ b/kubernetes/bootstrap/talos/talsecret.sops.yaml @@ -0,0 +1,43 @@ +cluster: + id: ENC[AES256_GCM,data:QbjE9IPiR9tJPQhj1C7blaGnBhxfrhSxRbnQ0EaavO3nMZ5ZmLSAhRNmsO4=,iv:rSDdOW02IsMDn7d1Wp4nj3IuXcXnEgAhTDXcLGqCTes=,tag:rvtrjbXkrg+j0v2xkXyaZg==,type:str] + secret: ENC[AES256_GCM,data:WzkHpllRwM2bDg+luHoJyY9M1sJK1wv2DaOzNAW+CBG8sXOciMsr2D8gbZE=,iv:TEWExl8k1KxJNm49bHQ7WUkYV8VyW8breQiFjxxJ8Cc=,tag:NGgoTWVXxkb1PjPge3Nw4A==,type:str] +secrets: + bootstraptoken: ENC[AES256_GCM,data:9Cv7bdGjaYaPge6bkp8BWi6vc8nax+I=,iv:GVdwUM/3nSAQ6NjTVJYkgEmUQ0CvPrkth+Rbrx1Nv9o=,tag:gHYWBgxe+rlf0+x4gxq5iA==,type:str] + secretboxencryptionsecret: ENC[AES256_GCM,data:vTLkg1GwsF1+lpfNk/is5UTsKj9d+ZtHwCs1c0SMaazT/kA8iCIKiv5Lk+k=,iv:WVFQWe82dPcfytPJqkaPb3BglbS+65GFAgVhiqoMyq0=,tag:4CZWulQHowfRPw8cfE/MVg==,type:str] +trustdinfo: + token: ENC[AES256_GCM,data:tL3MCYF8IAlpJCkOp99uPFIaIAedTvs=,iv:pTHRGFxMj+dSYF4UPWTGWzEQrfHR8MLXqZr+5rlFq+I=,tag:6lvc0E1+uegGM6d6DQpkVg==,type:str] +certs: + etcd: + crt: ENC[AES256_GCM,data:C/RmLESEFlw0dJExGIG3NxthZlr50zsej6srQpcrKkNg0v4AAfwCtyAvpX67w2a8/ivsErX1dFifaMDW5yJse1tbfUKhEZ5ZY0ZmmJkALDJMmS0SfVyIw3Hg8lEJer7JTCY7MYC3cFDPez5Yml2Y73ETiak2RB81OvFcItQseNsmo+2Ut5zgDP7MwW5WaDoaZVQcwMJHYp/OG/JD6UE8VwmLT0H8eGXowzNlc9OkD/UHlQbimRBCUykZE5/xOC07A3lqByZ2zNoDnD9sBbgRJ1cC5gbL8GNuPTm6Yu7vX4snCmc3vq954kYz46nHApj7v0+FpNg0f8RckLYRC8nRqWldnduwatCzua9AXdWmkftUCQgkNoUYF/MnGoINo6exJ5GaW5RNMj/qc9fTzHI7NArRhzAbe5y0kQo0L5/TtNSvTj+nBZ+beGwcTvQuzNKcRkRf3puOaPWwsjYAByiUdiqBEY8qWU3O4ojZkw7/ISd2qh4mgkySK7UXyCg1GFHCi0ywoILMbEqnk9sjCZuIilVUOEyghohJkjhGyviFcIwLvdaxvW8vv4rtIKc9e2FAHQM0RjY7KpXGxSf0vicrFfBR7ZLom0UK6Qa8p+3hR9TKNuySxgiclOM29qAiL6tDkQSEvsyCZpmIkkIWkTB4DjG2jAk3CvnnnWpr0QrdW6ES17Uq7q8X4iyCsZoUVCvPnS7XcEvnOxjMqRA4Ew+AyQgMFwLAko0IADK6TB9XPymDraveDFiRTGG4DhyZ9eVWJnjZu9k8PvHMHodaVq3UMWbYtz97LmIg1uAfl9C/L4sqf5F4WpuwwTZiJRatPt0vx6bKKqXkSN4ZjGr0FZBwSBd1NJD0ywvT68XaHpK9hp0D90oCWkIpX5zvZGRFtL9zcsMx1fnuS09rfNXuQcVGO1mqBwNLotqfuL7mEz6+POkAfgQbZuJGrkMv02jMWKdsg4KL9NCuLAQG6Axn8FklBJ22l2UFDTmyxVKKmAfPpAhC+fmERQNVivRYQNShYLqdICtxeA==,iv:oVeXHANV06t1BsVYO6mM168HH5/yyKY52qW20UVeRQU=,tag:QobVf5D46rKfC0FiZHhGBA==,type:str] + key: ENC[AES256_GCM,data:fVZwnBW6e1+Wf2k7qM9wjfh7vW/70UqTOG3wsfRvQ9OyvjeDgP6eJftK1kQ8d9tsvyE34+zX/XnrLH3fhco/36594zvq+P+3wDtDBOBj+DtjF5/+brO/u2LnWtNc5WoyRzw4FatzKLQIdN+Fi9jsUKdiAyBPot8cXMi6av4kyXl1ZJRzWLWTQ2tLnhCRHWOTte+tccD+7dpwikP1mqM4nOH4bYwUKXWn95Py2IYBFQaMVo5CzW5Mb7GdumLItLz1aIivQxDXSD4uJGXIdITtH5x3XnIawU5B6dvlP37qdtcfEfUM4DNYdm8f0oFPOEqo0/dkk8YwYko+AeJXKcajMD17kWeRYVuWo0FcT//6+ZZMIhlGa01OXUxtryxym6e7K1/TmZFdrLm1evqTooDZOA==,iv:kRkpwMPFviKYzN+6LB6RNQVmLNaUnUpxLYcz2kOdwUk=,tag:ANQ6nkyd1wkE3A32GCBXfQ==,type:str] + k8s: + crt: ENC[AES256_GCM,data:VNIWvJZD3yDPt0NN7sqOFKinYICxH2EY26SoiRcVaRlatltv4FebwpgXe+bl1yCznrGM/S3fvmqPI8zpRXaafRX2R38kDaaNyJwGkrsftNA7QpRl9FBxqDswoL7qOFzuSA4MQ/8iQlpdMsdB3AcniFKyrYDeQawmAyBWT/ailQ5V1DkoB1cbld4e5u+9wpqVQUKHwmFT0Cv0zOk7p4sTsw6wmDnu6ofUfs7sWMPT7MdCVepCgXHlPMirPDDl40KR9XAWL9YwBvhFx+hZWgfK9THOG6JNYbQ9MGT2QwiFABXyht/ldptxYlDlU/sk3pkbkNcz89hn+na3WNbBbXp7RtUG138HEq7bPCtd58kmFIpU0w7Cbz3kvWeLnZCc0DaCzkMH8Gia+0LkK5j52465MMXYc5tNDDX3vjwGrVB4aYx4HHIaleseKwjAgQeqxExZBdnFjS8J7lshOFYFHwROtZJPlDfRaGA/iBd7CnOjXjNHtGYzD7Xyy7Ek1fHF5Q1vWw0OudIYWrfHI/3G+yJeK+pAI+IA7/irLTp94jJ/aGm7Ty8oEi+uue8+cTFQrUC0Q9ASg00YYNX+yeZjCPtYcIkn50OKHGXsIx6zvfHNk97xCZcNXoPEtQ1fph5vlIOgCe2QN56jZcuG9QNWJgQ1Y2f0AmEl1M1rlwb3AbcI6UwOP57+oE9JwAzN+mDg/L8i72jLwbwwKe6lMKWCB4K+afNMzvsICIdzG4m0VWLM5irHdirK1y6CEr5UnmJ+MtNTDjqbyTq59zqydxjDpb4L4ICTOtkV+YJKh3xjetcAUjToyscHXPriXJAfgRkf4q1k9XNQ9HT/MtLHPT/zVDDfAoDhj+FlV8uIJRFpgMLmKu0KnRXSKLMW5wDTlAeRHrhMpc0W5hRw17+jVca8vFOq20k5mSDG7T7Bm9b0qlBSYb4NLMUcD+FBpoLBskI+tFnQR30Gc3qMZCYJFCHVQUUmVGEemNIB3KdF/0S9ARKJOT6C8aNWtZgMrrSUXPRdB46C/p30Cus+gwzqNQidy7gvNlgGmQo=,iv:Z0q8KsRr3d631InGWONKHyV4Wr2iCnK+8KDeX9Mm5TQ=,tag:OpYwMOKv5tsfUKk/fJEFYw==,type:str] + key: ENC[AES256_GCM,data:sGWbI1SjTmikn5MgiMo1D71oT9WssJlUOpUuGKWpTV9h1S8CpPGGug7YHul29wqSWF1pZk40qBpCXZ/Ab2pgkRdehCbis/Bx0MjMQZc3iT1svBlRt0jjuPLqkwXD6R8pjc9aqahn41Sl2lqO4EDLH8TpO+JsqWyQ8BFqPTVTSiBrxHfFKjCb+TH6H0hD48OVG65AOSD2CvX/JjSZe7jYam4JBMhsPQ1VVB8O9KwIUl4OADSlTuJwQIxSIZHQIVDcvhrElLMozOc1JQ8FrrW48zrnOIBEMHxbaEz6QpNFnM95Mfic3W4G46PE7W89VR4TZNQ6HyeJJplxa8Z+YHRCE7VJFMhSfzUTZYy9jDlF/bDqFteINBF+zSZ1IdWT3C4nKieTvsImvF31YG3kWlia9Q==,iv:B1iQcQ3RC62dzBMCcY3B4mNg4lCeEcZljgMG/RW8O/Y=,tag:Iqwz72NVXSmkopzWSu6T2A==,type:str] + k8saggregator: + crt: ENC[AES256_GCM,data:Yayva4S55WTYF+tGBkNfBsIi5+PqVM7Se7DXBvgDy7IrbGj2Sy+rxIjSO4lRS2M7nnsnTc0wIp7bwqFbgTHN/LqLAdVz3ZqPfZkSaeFCuJuQ4ZNM3qY0DmEiCHyu9EOBA1Yfe4F7v+Y9c+xKUK7SVVUMvBBvcK6FIQw2Ho2Y5XoJvS/Zop8dqr03OSxg6bjkKYt49XPU/EItH8Xyas1EWYZ4ciYSa4Mmz1/xu1L7vTicztI/sBlf0eHujPU26PPaQgxLQRr+Bk4oWFvqjR6ZRXUFGDicE2cM+wzKo2O95jG2ku0AW24pbbsQ9WRZX4SZ6G76Q1xEq34GgVvyj1ZvywMJh+wLZ3JGtu+cfRFxjba6vprOeAd4VuX/TOQroJoPB9MyIOb4ZcIfeGWllD6Myv3hUUh4dAof7CHhAQxGkTEjq07YYKQzQ5QlOvdgh69owvh5G44ug4+uAXbqVhX2x1aJRM+94JytKuahwgvxvpfOOWMWjBC0YzXQbAvQy320hjLt9k/lwiHI5MZwIfLsXLegzXhTNMHb7Xm2zrHcPMdpe1vovXDPDgJCbY7O+4bvTvO6xwDKYDS9SVh+tYE/aCaWaWj/4vYIlFpc8CPnKLsyLn9DNbtF5Gwj2k9TY/RWg9hTcuR0cGDhHqET3B4NBv4SDJXLPlg6v0DYfstGWh1NT6PG8DbxHc4q47WQoNmip/Q8A3asC9tJba0k24zBz1ubPrlawe5XxY9SbZUkdup5f1Eiay1CkcFPplEKpwQcj+ph4/0V/tCs0nm8qEQAner7lOO7RHve2H14zu+R6plNiKiYHGs+Nu9eEmbIXmoN/e7Q/HhpOAnYmMn7V00UHZjCm/5jDLdKONbt3FLHtFnDZbQ+epfNZRZGVTvTKoYWMJJXHozbbdDVgfHLKoWfONRa5ShyXeesYQiDwOEiuyzAMt6+CPNLJ9OrTcFOoMA9,iv:d8d65vq2XXeegEfN0eItNt764tXTbuNw06vNsjf5rcA=,tag:l7Ex5rtA5PPXf0DLbQQw0w==,type:str] + key: ENC[AES256_GCM,data:C9SsEWtBg1mnfW/bWjp7417VWmTlU2CggHDjutDLTamJrsecWREGlNfB+ZwyTAZb1YaXUNXuynMwBWW+MIsOFRTFe+FlfpflEROLrqYfP9lH+6gldVqnkixxocP1TISNMnjVAeUgZap9WOHs2rYV6xYuY2X3H9CWZ2ZaY1rYRvmIx+VWxa7VEuz5NPdB4s+ri8JFj/bFE0e26sHQdCgid0nqKobVk8AlvxkwxOYH8u392KNXwZd7d1oonB5t3nEXyXO0SZ1iiBeSVHjZ6xEOL5/26braJVYd6dius6zD6tL0vffBAAbGACFqRpn+sfo7J/ltiEJN4UDiQT7Pko6anHQFqFm86NilS0krMnP/y1S2u8l4WNWddQzJ917yODnkF6iDYycjw/5Ra2r9crkMEA==,iv:MfIk7+p66fyq8weFMQpOPpRkkxTwxRzb3SjbRwhvMdw=,tag:rtpIt2wtXXLp/RvYS2mkNw==,type:str] + k8sserviceaccount: + key: ENC[AES256_GCM,data:j930E4vkazSLg9dGEjw+5V5sWTHCf4QEm7knukRRPLSFRZCfIx9NaF1bebRA0PeM66PUWbf+HBb1X7C//Q1BpNSAylG1fYg5hsnrTp7s3fDKUkcoCd6LgSpHUDkFV6ki3RCCI4+xYPKBbrNza+bUaoR3Iw26JaVpn98D4S07IFhebQYzTTAFJbzqrZbmHgclL7cjgdU2oBIwYKlFyIzO1yRpx27iUDD94XeoNa4mLTIeWrhXr3BoQc+MRHwuYN4q0oPO+XbUejmEMi8Y8pTJYOGqqg9OWNxdon1eAqz/knUzjGR5u2ATlMFUyNH9JQqps7Wv76v5HaVpGzHs2eE4kwCTTd7E60kd73REwDWpu9/kLfUyBbGBlo0acarCl3PRBcVyYgmWEm9JYH9PpJCYe2OHSITkWUXazzKna12Hbr+VXjvmv+Pgs6wKaQjNmIPiDxBmT7AxyWGJpv6bqz8rg67FgdPRaYLJvkKnKz3mn3xcLvTMqadL13YdIc0vxjz17xWxxgzVtFlBYic5e77qy23h0mRg5unmxwWLOKVJoxf2+zRDcvNrN5uvdLVRSVbvN9DLM24Gaa3VvCHVhm4y5I0KQbUJxg5PTNXSNdcDANwFtrwbfUAtkYwzw0p9kc+OGTK5QMNMHJtL2go1Qftuh1/EEkK+qj7mZS9fkNjGisnNxEXAB1WC6VWBMjbEZ1RleSCvSOMerkk93W2U7t69tPeLwKZHHR6wdexyjN4P/HvM/NOpisza65Y2P1Lyb9WG6tm8+mhl+kradcdcD0cSPGlQBAWJXOARW3wSxdpYa+3Slt5r8l+bT5oT1FX6kGZZfEzr++KFUlQSwAYJulkKXUdse+CO6k2Nt7pySGpupadgYRzabCxwTh3AvVWmg9Ap//2povvypx33TLSr/zaeanIb3jYuRkgMcWIQ3irQjCTVroaBYtNqUD0oCgw/O+CHJzb1GX8Ulds8Aqc8RdL0I9jO75LnIq/dDSeWA8Z+6feW8vqS9CMDzRnWQV3XNjBAZq4i/7qFxLUwnkgwRPB8Em0IH7GMs009MlwKXHwItOxDVLlnCkzakPQPYf5Lwkoo8Qzf9CteeY9erhGlvHE5mj5+NLkoh/EylMCNrK40aldfHn0IRRLTrpp7pMoY/BshUQloW9fanDYWUflMnd9Vp9MU2DPiEbjFIAHrsDDk7/UAkHTjKd2l3x4NBBMAlBkfh4znFA5/BR6ahkSeas68Tu31ZgeVXY6IJOfnvt1iCikgw5sGe4vMvZyA/rkkZZCrTEij/WYlzhwvHOVTVnlm+bk6xv6E2Uvn6nWFJCp1XIYnPP14Z3ABG5kCXVjVJHREraKEhFXqMUqkpCT7CE4aH7EQe53pkOymKx+JCPOgum1wzLZKjVsJKc9jl5FMcV8ycHGHFzLSynGApYAYmYkzZbmVKMi2YR73StYEc5hKySNej10al5jxkb5MoYw0LqOa2ojvkPuR7UDPi+0G1qltsIYldfQ5MZfupXKCfbJ4HbHMP9BnFgTv6rTFUqjuk+lFg3mCdUEsQTERqi7PoBuaq70lyd5W2IOxZqrF8SVD6Cza+It1yxlWTiq6Lgo3tMOiyTKGvkuUf/A52y8VFrOOUUZySNpbqF8M/ru84eXiVzG8tDnNiDy1zSdDVChY80wZd0H9J+RSR8ODfF+JZQt4GEgmsU5cBn9zuftddc4RimI0tdhtorxA/xqfRKiZ2hpGyU6ULyEb6TpK3dkK8UEkSf71U2Ttr/lMdkerEteZ2eUlDY5fkiE59gN8oAZohV1KCTCpDG/ZPOSsyDkH+IJAmabPJGnTSsYQI3G69KdTUSu7AS9cIP0H3BNe8cHVujAB/DKQ6RW8bjVI/7f6cotDo4e2pxJ1wq0Tx2uvNBYLi1YUklobeDQkc/SsQiZDlL2BVBBMmMBNIgqVcEKFxF9V6tugrwEoEIYlN1aoJTypoq0bRbyZ8eCaf2ilmYzoFQxXhqM3mkOyohqsKqLLZvAq0hxvSw64pW6SN0Bnw9kJKeZw578XfnIKVzzHVQCyAfhAoy708sp7udVXkI63v+/tc4XY2LAzedFkJlp1Prp+WgRCSH/Cn6oGZsl6fdR6V2aIvoviyQEZnmWOyYVCtIZC/2GMk8lAmsXYw0DbK0X+JA9pRl6e7Bf+fmKHhwFoLVh27+SZi9JRByOk5/O4XKtSlfq6c2AFp4iYjdYpMXMmeBGZmuZEAkXZmX7NscBZ3obxHWcgM8zAV+N4q6Hc24tAaJ7pXkmAQWgxCDV8LJPdbOgn3wG/GKEHKBdpLVYV8NZsMPwaam9vRx93Uo79AhsS7+A12zw8Ko6lCnXlD3GksvhNbkx+mHEANkn7Hgz1PAPAYBLkRluOKDi40q1QvvJJg01evFNkQouPieQrpa20wqzC4rPVcb2f+GqaO+Tu85ycrcUdfZXwWMRGIcCNQEwIVchBeKeMzSDL3fEJc2sPi9KxCEg216PE75DZZJDBGT/tVRHbrQfLQtcRNMevoF2aVrAWmOX7enRhvs5RS1I48j38sNzylS0y4Ae8amUkj8TIzG4HYyRnxZm88cIUVKau4kSEmAL/FCQYfoQeHOyrk2w44JpUOe83NHpeFSXdMbrmc63jKYF0jIBciozBurAgdb1bjF/6KRejt/rhR7XN5iSt5ER83dAZJ061LMQ284wsQRmeFbFDzEUtkUgOKkj/ZYq6Cs3QKQBzcyXHslLQeb7HzT66ZjA3yiPmKEHnntnWatQMleKgRGsdG33x0qCBZBZ6LEYxsnkGnOyuOZUN2HilNXgonqI+DigjCxuZgTCQB4+7gD6VFaRs1AcjAplSHo8it/YJNHCmTa6gXShH2c7gc1ACpoxO4c2IaxD1Us0h2uKNdwkWsMv2l2DO8+I3chCSX8EuGm1RYwlJlzXlnEqIBkSJnbEVYipD+X01NPwSyg3YqX1NGd+8qi+ZZXn6UmItIJT2fXe3VwmB/VXCBOwJbvAaJfrSQUN0lCe34tQ5qF7Rfh9P74TlcNZtt0xMTvUIp91Ohe2BK3CHa6MmoiE5hVvJSkOb/uCv/s3BeldKeDxFvMJpQ77aE0P0fC+CByavhV3PL9JYwAx8dySgRb7giPzxceR187tEtpNWm89dE204MbkedGyBDxMUn4DWD/7UHlprquo62f5JvYNY46nJP6YVN1W6q+80QAvBtSYpdD8MGR6g9KRXRz4WaQaxoAakJ9t/fPyZILJROEzJixco70eSXFjLNyVCtVtqSveIdyIT3dc8zqv+FJCaquFJ2G08D9kytCUJRkCcvFxXJ3pUyXawzS94qdtCsmuyM/KSyEJF5nYRmnYxJvxWhVnJnqZdTIxlwFUnPuhk5X/sNogEHFqlJjTPb0Kb4CdqSpjo0QzZ35HuekRMeAFDymzMpQu9Lxco1913Ql3ev8COCeDY5J9hSWvVLDVZjjahWXiabzbsOr+69HtxIIyyRs9byUdhPeE3+L5v9O/vo4Tj8sHihY92Md5s6G96qRFgJ6g0zNs2jSpr8tP/CTUkdHaw013kTsjkwOx+uc/kciLXdYcg1DjwKK3i9aObHE+YPVtMEA4vLTvAE0d5Q53RRxKG9qWgGCDPau8VX6Oqfzdq1ZWy6wFuxQvqjx50VReAyE4wPzTXJ1eC004f8VUsHet9t+Xds352IYvzPIkn1071Hvnd0fCeXeOtfse9tKOHoc8mEOs9YfFWq/4NG0hce8AjyKAlc9SkEwgAi4whJAJV/s3Vzu9n8JIIblMxjzpcR1BP97qEgODC3Rcyv4pNu5izKVtvxlyILNdBf8HVyOxZGxO8u6Y137bisqApe7crbYvlC5FTyO6EjHJzSPMTl2O4M8s25BrksQCKLG5Tvr6eE1zid8goaPNK3z956gzZIFPJ2z3xRhk2jV03gfF8G7L8jlGrpa9b5HKgMg/9LgUQq815dSJGMK5MhA2kQQoEwWReZtMEdX7E0OJgbJPbB07ATsrEkd3O5zuvWlBmWC1LSsoMLzr87+Jb9zy8wYv5yh2oM8SlmL2wwpFvxDrUNLiltQxlm6kitJJz0Ckcw7hLtJOK2AzvGkryGifDdvEHqphV7J96SaGP8rT7rY7niX3NuGYqaBwg2PlPs4YuPrWcKaZNUEZprH3+uvczvV7v1XD0LtSMfx5EtZcB90QpGW8QCMDxKICWQPjknY1t4ibcY0yph51nz3MA8k33LHirMYHGyHrZnrM9DDScyqqQ0a6jitnd1PdshtwhliCZvFDTVro127KGxeOOAZv9DBmi2By9IMSvuDDuWDf3sHXethcP9SMoeBQ7XCjlrO+eMNzRV6PPwTJEg6+JHCiVgGSkfSXeTcOxSCfQU/liUExjKvFUtX/R3OhP9zbVjT4hXGULyC1JGBlJbGxzuM/MBx6Qoju4j8Xt/uVvU1KMoDz9ik6J48X4AR8iPgTsvNhKkrCuZ9dQXf3+bxDNToj/wqubZNeJ4FDhiZNcMibeyFmwpanqiC5SoryD73JVoF+N51eg83Beksj6oO0fuZjL/dMOTVFeUWI3/t5gXvxMLqYWwo3PYwhDgcy0vbBpf7wd77YQgF1iuCevGOItnFXnpBHEHC3rYVNvCAX7RDKms+CeTHwiCornRiWvLmnQ1zMgYN8RT+mfKqVv3gUMv8XMvEbOY7PSG8T4uMzyewzvXawkGojF3rKau9gLGwwpo7uclUq927TAYkT4C4n0aUe7LFrlKrillO+sTAHxXbmKaY+rwteHKO1Bbwoxvak/1aVfUmE+bovynsHDJnUbs+bnLVk9zUYA14SST/I28c/0cNgayTnbtImoqTZNtaES0kyIxJZZpEGXKvZ/ODpkTxJKSJCSDLKULMaXgUSxvk+bJ8HNyXXRtGORgbaoYeXG4a8z/PpDZiV/WKYB7rfn4uGy5ieJTHMqGpNS5hDz7UJwFPJwgU9R6nS2H9wrBhv7f5Z/E322MFT3BbIKYZ6mHoBjkqF18RfAoFryEkvzY5fuPkQAF1VXqvS9j5UL+SuPsWkKOGRlpwhcsws0kWj80Pl5G8BkwPKvjTX847mrfXyZcspivmcriTtTAYI8jRZ40o1O/XsuEB0Gw7fHTmjWiekToYT5XCWSxhrUJPKP9Rom/H2eCWXp4NIfm/O1t3jBElsPzv8yEnlgGcYIpQn/JYt7bGggPoh5pNLAyfKXKzkW69cRZjnTeUXhUlBHz0Odg9YFYiPUwh8bkxlcLRvNK+PYiRDjcjlr0vmrAH0pB4UorgJL3KAt7ZQsNErAGr9j49pMY9LdrfesJ4OcnSEneHrl4uxhX52M6gHlPDu2eVp+yl4E5McoALP1okwN6qhCGKqeUrBQ2uIp9DOixQzd51RkOh/aGj4/bCv5Zr+1IPG0+nULeE8hFaruCqVrC4lN3PkKK1BZSMHRjA3Pm4nHAHN/5K5lhtx26y38MhNkhBLPNjSkxM7MrZyl6+hZ7draewJm5U8Mx9hVCB9sySvlFInggY/G8KjdHQVyfNTq471djbU0vnrHwWz7YxwxeM3axFKwDr9hgGuSVdoeEEzFREQyTKmxB6pfksfuIZ/ejECU7Sr0hMSF0q6pq/kKkj5HIDw38KlaLjxwojr5pIGUA9pWo1sD2Mm8f5okmD5J3Gd2VL3AG72y1ZyzLVq/7UAicLrc3DTADN5sU+Pw9QFN4VdTHO7hEKtohQMOnooi8T8k+qLVseOP8cReBofqF6Xfeq3qdK4R9wLEshN8pigqOa0BTCgmIHDlZX9hVQ==,iv:kXHxGNjTXMS11sRK2XYVmClxlflUGtFKTSYjeEmLeIU=,tag:mXSvm/LLFQkeYLfh8BNlBQ==,type:str] + os: + crt: ENC[AES256_GCM,data:itxTjHaFxmzH+vfALnZaF08xtYca/tVNc5fbHuSSCK1hLiwmGfoefoiG15yurvrUdizYlK5w3086rmvoOUlUw1n0ecywavvGxTKZu0sOgTuOYKBakpTr+gux/le4RmCWbGnboMyDmnWBKJefbyp32jk6tuQIMonkOCBW6/D5urg8uZA4zZh/fT994jfQAFSoWtIDzFOGIkEG8SipeMGI4V9JDlXAgpGdmj3DBO/uaZLkMA5uRd27KlrFFNpVhZuTyKfE9jYoF1m4pOEL3B02E1agN0puk/r4MCopuQ7CXwe3YX6b40YifMKUvC5QkKnSH6ZWjj8XJnsiwNUSHJoogicqtAklRDj6Y67n51U+ljzfUmKt12Ece8qWj8JpyptbrRR8GCzX9O+9X0RR3CVRJAXiKVWbMRkHUW7HJVFWFx3Z+TjJ21IeOdmy6otibyQauQxWUCVJl7JOmFuEC/XlJkQL0JdLXpkYHbTromgRz1e4NKDNcFtwGuNlhHHIq+cyyRtp6ksBY4rG4433QlDYtiIsrNBr8bzZr8Hm4x3ylYRbtocCPuINqK+FOVfcrTszr+U2J/PbBe0G3nz6KNHfn5ho8m1DbsZm3msnoj5IIFfy6tefz+yL0LUbjOEeAMb1gf+qXSXNOxZDTH1AoSPXr5nRJxep9b32e2lxl9IpOAXDQ4JZe0uA/HW0fnanEnYcOHAVh1QRjPdq9eZl3cUMKhtRiYcy4YGHQgjrI07A+0S44eink4HaknVaDIHrz1r1jHjmiApcRGeDrWzcXoGG4WJ3uRTspIyfbTVQW9M5Z4H7WCPz0fX1K+JKJsrk+ot6oG/zZpRvAtO1dJyL30vP9zPJ5iPCzDMdFJ6I6/0doqHUXGws,iv:iuGQpiqfImaiABselgCSbRYUY6x2PYsJoXlbPndG6FA=,tag:FTXzPeL2tvfECJawCdsfCQ==,type:str] + key: ENC[AES256_GCM,data:pDSqpBWMaAR05pukZllr0UfunEEp1p+zGXTWNLbunrYlWc+1BT/4uLsk65iy5OXM/o3c+MJCyUqxyNBr+Z0fAoiqM8Qy8HPq5WZC/WWXtR4SZcBnmTHd7RwiGvIepw3omjeJzJ3hJ7Y4/8K+EmSftdXPOusGA0/36AAAkeH/Fd/NxPhBFgFc9p/oCfyed/oSiJkXelH5CPO6a8qTNgb0znKk+CDxZUOiffEfpQaYT/AwMiMP,iv:INTrpATj8TeA6BXx0nH/lXUQw21TSQnCI2pbNrfRlcY=,tag:qtR7mo/60iI48lOQfJXEsw==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1k5xl02aujw4rsgghnnd0sdymmwd095w5nqgjvf76warwrdc0uqpqsm2x8m + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBxaS9kQ0Y1Q1l5WktMZllK + WXo0U0I2UytlTFMyVnVrN0hBV2xUeG1raW1zCkZCODNBS0w1SUpqaUtlS3NPMnEw + UnkwOTJQTkt0aUhFanlNWjI4VHFEZWcKLS0tIDVRZ0I2Y0plRU9NRkJwYnJyUjFa + UTFrN1VETHBtV1NjK2xVQzZxMHVaZDQKPoDlgLPOoDAu1bCAbQnBo2i7u8v/fV4O + 6QNDDn4RDxt1kGDNuvCeXkWtnIP1Vcw/0Z8DNlqgOGgjY+oLsKY7fA== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2024-05-24T14:59:54Z" + mac: ENC[AES256_GCM,data:K+gdYNcbMQs3/IZuoziE4XOpuAro7eQdvv25S5/nI595DZyPW3dvH0G9IZObvzJWCwoYI6mVbI1jvidA3lhFbD1n9vyGFmOh15A80ll1QX+jjKEJFqN1230jwZaosaaZ8Eqb1gnjt+0wvTS/PpuwVP4SJL7iTY8p0aGQYK5YAa4=,iv:1f+fgn+Uq2+cM7h4TwZl74+AlmYpBTSyk/1J0gE8lGU=,tag:n+9uKWqvjIG4yCLNOHUCEw==,type:str] + pgp: [] + unencrypted_suffix: _unencrypted + version: 3.8.1 diff --git a/kubernetes/flux/apps.yaml b/kubernetes/flux/apps.yaml new file mode 100644 index 00000000..6d260916 --- /dev/null +++ b/kubernetes/flux/apps.yaml @@ -0,0 +1,57 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps + namespace: flux-system +spec: + interval: 30m + path: ./kubernetes/apps + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substituteFrom: + - kind: ConfigMap + name: cluster-settings + - kind: Secret + name: cluster-secrets + - kind: ConfigMap + name: cluster-settings-user + optional: true + - kind: Secret + name: cluster-secrets-user + optional: true + patches: + - patch: |- + apiVersion: kustomize.toolkit.fluxcd.io/v1 + kind: Kustomization + metadata: + name: not-used + spec: + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substituteFrom: + - kind: ConfigMap + name: cluster-settings + - kind: Secret + name: cluster-secrets + - kind: ConfigMap + name: cluster-settings-user + optional: true + - kind: Secret + name: cluster-secrets-user + optional: true + target: + group: kustomize.toolkit.fluxcd.io + kind: Kustomization + labelSelector: substitution.flux.home.arpa/disabled notin (true) diff --git a/kubernetes/flux/config/cluster.yaml b/kubernetes/flux/config/cluster.yaml new file mode 100644 index 00000000..0c6f3785 --- /dev/null +++ b/kubernetes/flux/config/cluster.yaml @@ -0,0 +1,42 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/gitrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: home-kubernetes + namespace: flux-system +spec: + interval: 30m + url: "https://github.com/MaksimShakavin/flux-homelab.git" + ref: + branch: "main" + ignore: | + # exclude all + /* + # include kubernetes directory + !/kubernetes +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster + namespace: flux-system +spec: + interval: 30m + path: ./kubernetes/flux + prune: true + wait: false + sourceRef: + kind: GitRepository + name: home-kubernetes + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substituteFrom: + - kind: ConfigMap + name: cluster-settings + - kind: Secret + name: cluster-secrets diff --git a/kubernetes/flux/config/flux.yaml b/kubernetes/flux/config/flux.yaml new file mode 100644 index 00000000..f714a906 --- /dev/null +++ b/kubernetes/flux/config/flux.yaml @@ -0,0 +1,88 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/ocirepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: flux-manifests + namespace: flux-system +spec: + interval: 10m + url: oci://ghcr.io/fluxcd/flux-manifests + ref: + tag: v2.3.0 +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: flux + namespace: flux-system +spec: + interval: 10m + path: ./ + prune: true + wait: true + sourceRef: + kind: OCIRepository + name: flux-manifests + patches: + # Remove the network policies that does not work with k3s + - patch: | + $patch: delete + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: not-used + target: + group: networking.k8s.io + kind: NetworkPolicy + # Increase the number of reconciliations that can be performed in parallel and bump the resources limits + # https://fluxcd.io/flux/cheatsheets/bootstrap/#increase-the-number-of-workers + - patch: | + - op: add + path: /spec/template/spec/containers/0/args/- + value: --concurrent=8 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --kube-api-qps=500 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --kube-api-burst=1000 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --requeue-dependency=5s + target: + kind: Deployment + name: (kustomize-controller|helm-controller|source-controller) + - patch: | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: not-used + spec: + template: + spec: + containers: + - name: manager + resources: + limits: + cpu: 2000m + memory: 2Gi + target: + kind: Deployment + name: (kustomize-controller|helm-controller|source-controller) + # Enable Helm near OOM detection + # https://fluxcd.io/flux/cheatsheets/bootstrap/#enable-helm-near-oom-detection + - patch: | + - op: add + path: /spec/template/spec/containers/0/args/- + value: --feature-gates=OOMWatch=true + - op: add + path: /spec/template/spec/containers/0/args/- + value: --oom-watch-memory-threshold=95 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --oom-watch-interval=500ms + target: + kind: Deployment + name: helm-controller diff --git a/kubernetes/flux/config/kustomization.yaml b/kubernetes/flux/config/kustomization.yaml new file mode 100644 index 00000000..762bc44e --- /dev/null +++ b/kubernetes/flux/config/kustomization.yaml @@ -0,0 +1,6 @@ +--- + +kind: Kustomization +resources: + - ./flux.yaml + - ./cluster.yaml diff --git a/kubernetes/flux/repositories/git/kustomization.yaml b/kubernetes/flux/repositories/git/kustomization.yaml new file mode 100644 index 00000000..8fb7c142 --- /dev/null +++ b/kubernetes/flux/repositories/git/kustomization.yaml @@ -0,0 +1,5 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: [] diff --git a/kubernetes/flux/repositories/helm/actions-runner-controller.yaml b/kubernetes/flux/repositories/helm/actions-runner-controller.yaml new file mode 100644 index 00000000..54fa67be --- /dev/null +++ b/kubernetes/flux/repositories/helm/actions-runner-controller.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: actions-runner-controller + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/actions/actions-runner-controller-charts diff --git a/kubernetes/flux/repositories/helm/backube.yaml b/kubernetes/flux/repositories/helm/backube.yaml new file mode 100644 index 00000000..acdca6dc --- /dev/null +++ b/kubernetes/flux/repositories/helm/backube.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: backube + namespace: flux-system +spec: + interval: 1h + url: https://backube.github.io/helm-charts diff --git a/kubernetes/flux/repositories/helm/bjw-s.yaml b/kubernetes/flux/repositories/helm/bjw-s.yaml new file mode 100644 index 00000000..c32ccd8d --- /dev/null +++ b/kubernetes/flux/repositories/helm/bjw-s.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: bjw-s + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/bjw-s/helm diff --git a/kubernetes/flux/repositories/helm/cilium.yaml b/kubernetes/flux/repositories/helm/cilium.yaml new file mode 100644 index 00000000..d6736ba4 --- /dev/null +++ b/kubernetes/flux/repositories/helm/cilium.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: cilium + namespace: flux-system +spec: + interval: 1h + url: https://helm.cilium.io diff --git a/kubernetes/flux/repositories/helm/cloudnative-pg.yaml b/kubernetes/flux/repositories/helm/cloudnative-pg.yaml new file mode 100644 index 00000000..4b2f0e61 --- /dev/null +++ b/kubernetes/flux/repositories/helm/cloudnative-pg.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: cloudnative-pg + namespace: flux-system +spec: + interval: 2h + url: https://cloudnative-pg.github.io/charts diff --git a/kubernetes/flux/repositories/helm/coredns.yaml b/kubernetes/flux/repositories/helm/coredns.yaml new file mode 100644 index 00000000..ed0bb65a --- /dev/null +++ b/kubernetes/flux/repositories/helm/coredns.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: coredns + namespace: flux-system +spec: + interval: 2h + url: https://coredns.github.io/helm diff --git a/kubernetes/flux/repositories/helm/democratic-csi.yaml b/kubernetes/flux/repositories/helm/democratic-csi.yaml new file mode 100644 index 00000000..a7fdc024 --- /dev/null +++ b/kubernetes/flux/repositories/helm/democratic-csi.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: democratic-csi + namespace: flux-system +spec: + interval: 2h + url: https://democratic-csi.github.io/charts/ diff --git a/kubernetes/flux/repositories/helm/external-dns.yaml b/kubernetes/flux/repositories/helm/external-dns.yaml new file mode 100644 index 00000000..f38c48ad --- /dev/null +++ b/kubernetes/flux/repositories/helm/external-dns.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: external-dns + namespace: flux-system +spec: + interval: 1h + url: https://kubernetes-sigs.github.io/external-dns diff --git a/kubernetes/flux/repositories/helm/external-secrets.yaml b/kubernetes/flux/repositories/helm/external-secrets.yaml new file mode 100644 index 00000000..2acd768a --- /dev/null +++ b/kubernetes/flux/repositories/helm/external-secrets.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: external-secrets + namespace: flux-system +spec: + interval: 2h + url: https://charts.external-secrets.io diff --git a/kubernetes/flux/repositories/helm/grafana.yaml b/kubernetes/flux/repositories/helm/grafana.yaml new file mode 100644 index 00000000..eb1a6fb0 --- /dev/null +++ b/kubernetes/flux/repositories/helm/grafana.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: grafana + namespace: flux-system +spec: + interval: 2h + url: https://grafana.github.io/helm-charts diff --git a/kubernetes/flux/repositories/helm/ingress-nginx.yaml b/kubernetes/flux/repositories/helm/ingress-nginx.yaml new file mode 100644 index 00000000..492d9cdf --- /dev/null +++ b/kubernetes/flux/repositories/helm/ingress-nginx.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: ingress-nginx + namespace: flux-system +spec: + interval: 1h + url: https://kubernetes.github.io/ingress-nginx diff --git a/kubernetes/flux/repositories/helm/intel.yaml b/kubernetes/flux/repositories/helm/intel.yaml new file mode 100644 index 00000000..fb2c66b0 --- /dev/null +++ b/kubernetes/flux/repositories/helm/intel.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: intel + namespace: flux-system +spec: + interval: 2h + url: https://intel.github.io/helm-charts diff --git a/kubernetes/flux/repositories/helm/jetstack.yaml b/kubernetes/flux/repositories/helm/jetstack.yaml new file mode 100644 index 00000000..b513441b --- /dev/null +++ b/kubernetes/flux/repositories/helm/jetstack.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: jetstack + namespace: flux-system +spec: + interval: 1h + url: https://charts.jetstack.io diff --git a/kubernetes/flux/repositories/helm/k8s-gateway.yaml b/kubernetes/flux/repositories/helm/k8s-gateway.yaml new file mode 100644 index 00000000..428b19f9 --- /dev/null +++ b/kubernetes/flux/repositories/helm/k8s-gateway.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: k8s-gateway + namespace: flux-system +spec: + interval: 1h + url: https://ori-edge.github.io/k8s_gateway diff --git a/kubernetes/flux/repositories/helm/kustomization.yaml b/kubernetes/flux/repositories/helm/kustomization.yaml new file mode 100644 index 00000000..ce911ede --- /dev/null +++ b/kubernetes/flux/repositories/helm/kustomization.yaml @@ -0,0 +1,29 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./bjw-s.yaml + - ./cilium.yaml + - ./external-dns.yaml + - ./ingress-nginx.yaml + - ./k8s-gateway.yaml + - ./prometheus-community.yaml + - ./stevehipwell.yaml + - ./grafana.yaml + - ./portainer-charts.yaml + - ./external-secrets.yaml + - ./jetstack.yaml + - ./metrics-server.yaml + - ./stakater.yaml + - ./cloudnative-pg.yaml + - ./longhorn.yaml + - ./backube.yaml + - ./piraeus.yaml + - ./democratic-csi.yaml + - ./node-feature-discovery.yaml + - ./intel.yaml + - ./actions-runner-controller.yaml + - ./spegel.yaml + - ./coredns.yaml + - ./postfinance.yaml diff --git a/kubernetes/flux/repositories/helm/longhorn.yaml b/kubernetes/flux/repositories/helm/longhorn.yaml new file mode 100644 index 00000000..c0abf7f0 --- /dev/null +++ b/kubernetes/flux/repositories/helm/longhorn.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: longhorn + namespace: flux-system +spec: + interval: 1h + url: https://charts.longhorn.io diff --git a/kubernetes/flux/repositories/helm/metrics-server.yaml b/kubernetes/flux/repositories/helm/metrics-server.yaml new file mode 100644 index 00000000..62a6473b --- /dev/null +++ b/kubernetes/flux/repositories/helm/metrics-server.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: metrics-server + namespace: flux-system +spec: + interval: 1h + url: https://kubernetes-sigs.github.io/metrics-server diff --git a/kubernetes/flux/repositories/helm/node-feature-discovery.yaml b/kubernetes/flux/repositories/helm/node-feature-discovery.yaml new file mode 100644 index 00000000..5e45d5a8 --- /dev/null +++ b/kubernetes/flux/repositories/helm/node-feature-discovery.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: node-feature-discovery + namespace: flux-system +spec: + interval: 2h + url: https://kubernetes-sigs.github.io/node-feature-discovery/charts diff --git a/kubernetes/flux/repositories/helm/piraeus.yaml b/kubernetes/flux/repositories/helm/piraeus.yaml new file mode 100644 index 00000000..ebd0fa59 --- /dev/null +++ b/kubernetes/flux/repositories/helm/piraeus.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: piraeus + namespace: flux-system +spec: + interval: 1h + url: https://piraeus.io/helm-charts diff --git a/kubernetes/flux/repositories/helm/portainer-charts.yaml b/kubernetes/flux/repositories/helm/portainer-charts.yaml new file mode 100644 index 00000000..b7cc33e4 --- /dev/null +++ b/kubernetes/flux/repositories/helm/portainer-charts.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: portainer-charts + namespace: flux-system +spec: + interval: 1h + url: https://portainer.github.io/k8s/ diff --git a/kubernetes/flux/repositories/helm/postfinance.yaml b/kubernetes/flux/repositories/helm/postfinance.yaml new file mode 100644 index 00000000..015568bf --- /dev/null +++ b/kubernetes/flux/repositories/helm/postfinance.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: postfinance + namespace: flux-system +spec: + interval: 2h + url: https://postfinance.github.io/kubelet-csr-approver diff --git a/kubernetes/flux/repositories/helm/prometheus-community.yaml b/kubernetes/flux/repositories/helm/prometheus-community.yaml new file mode 100644 index 00000000..78c4f0c0 --- /dev/null +++ b/kubernetes/flux/repositories/helm/prometheus-community.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: prometheus-community + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/prometheus-community/charts diff --git a/kubernetes/flux/repositories/helm/spegel.yaml b/kubernetes/flux/repositories/helm/spegel.yaml new file mode 100644 index 00000000..0350b3ad --- /dev/null +++ b/kubernetes/flux/repositories/helm/spegel.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: spegel + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/spegel-org/helm-charts diff --git a/kubernetes/flux/repositories/helm/stakater.yaml b/kubernetes/flux/repositories/helm/stakater.yaml new file mode 100644 index 00000000..bcc3304b --- /dev/null +++ b/kubernetes/flux/repositories/helm/stakater.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: stakater + namespace: flux-system +spec: + interval: 1h + url: https://stakater.github.io/stakater-charts diff --git a/kubernetes/flux/repositories/helm/stevehipwell.yaml b/kubernetes/flux/repositories/helm/stevehipwell.yaml new file mode 100644 index 00000000..832684b7 --- /dev/null +++ b/kubernetes/flux/repositories/helm/stevehipwell.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: stevehipwell + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/stevehipwell/helm-charts diff --git a/kubernetes/flux/repositories/kustomization.yaml b/kubernetes/flux/repositories/kustomization.yaml new file mode 100644 index 00000000..ae7e0ad4 --- /dev/null +++ b/kubernetes/flux/repositories/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./git + - ./helm + - ./oci diff --git a/kubernetes/flux/repositories/oci/kustomization.yaml b/kubernetes/flux/repositories/oci/kustomization.yaml new file mode 100644 index 00000000..8fb7c142 --- /dev/null +++ b/kubernetes/flux/repositories/oci/kustomization.yaml @@ -0,0 +1,5 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: [] diff --git a/kubernetes/flux/vars/cluster-secrets.sops.yaml b/kubernetes/flux/vars/cluster-secrets.sops.yaml new file mode 100644 index 00000000..4bf6f7eb --- /dev/null +++ b/kubernetes/flux/vars/cluster-secrets.sops.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Secret +metadata: + name: cluster-secrets + namespace: flux-system +stringData: + SECRET_EXAMPLE: ENC[AES256_GCM,data:9YUm8BOmLfWPmPSnFfv4yqQ8whU4TaZbTQBo3wRspApYvWNqmroZ3EN6pGHcBLxDHGR2JhQ8SrDNps0dZwOJfk/a5Me2GRoDsNalx5zGF8bcHtNoPRrG1dNIw0heuw==,iv:QG3FEJh4TBSCo/fObNQqqGYWUKrXeNlwWcUaGXo0hxU=,tag:lSPMvF/VtrLxgf8mCQEvQw==,type:str] + SECRET_DOMAIN: ENC[AES256_GCM,data:wIYcdMzbnNTbcV2LJA==,iv:0Uv+rP/oyYciSkNb7RQ5mF4iqHPVP1uBtT34ZhLowUc=,tag:+XQceahKYlYPKgeTnDiKSg==,type:str] + SECRET_ACME_EMAIL: ENC[AES256_GCM,data:RfWJZgnzIDbI2purDft7jjrrGJhjDdUxwg==,iv:XVTUbnM8CuXEe0UaZ4LadaPnj/pAC2YB+n34lDYC9mw=,tag:WlO3dsmY5SSAG1VQIqWrow==,type:str] + SECRET_CLOUDFLARE_TUNNEL_ID: ENC[AES256_GCM,data:E/e0M+QKytLvZgD9U6egSGwV4qbpKVUV1ZUKaq2bd9S2FoDi,iv:CBzu//vbiSr3sxNd3Wz72o6/csx3AEU9lSN03Fzf4V8=,tag:ESOgRxclJqa/I+j30HwMhg==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1k5xl02aujw4rsgghnnd0sdymmwd095w5nqgjvf76warwrdc0uqpqsm2x8m + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB0d05DamV1TWNOc0o1aXoz + Q0ltajRxWmEraDlsRE4veFhZdXpqNFZBM2tBCjFPZnhQNGlNQTUxQSt0U3oreFZJ + T1RaSWVzVTNDUWFuOHBUbGNmbFdBSk0KLS0tIG9UNUlSdFd0emRuN2NGVzZiUjZy + VGx5R1lKZU1aejkvUjZDcytvQkczalkKjKsV4X9HnVtQG80TpVctxHdio//g3vJN + oD7AdZ0iGC3Z0W8VD9N7kLYIpB4BvC1QVP2cEVw6YW/9x+M2Y/to0w== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2024-02-17T21:45:25Z" + mac: ENC[AES256_GCM,data:RUzvexrVSeRFAmzJvLKQkXdFft3erBN9ZOUPgMAl+RJr0cSu/x5EC2EyogxxD4KY9x25HeVI/u986rgovwWkm95vDVV8BT/xWk+x1QgsuUguY2eAY2tJaebgS4CoE607vUj9M7g0EfytoWpAKrmWuGFn+vRVfQfBVntYCHnoJhs=,iv:sIM7jCiYfyfcVR+HsG/jULWrYIXxIktd/hOx/v/3wcM=,tag:Yc/LoFIvemzf8/jJDj7+ow==,type:str] + pgp: [] + encrypted_regex: ^(data|stringData)$ + version: 3.7.3 diff --git a/kubernetes/flux/vars/cluster-settings.yaml b/kubernetes/flux/vars/cluster-settings.yaml new file mode 100644 index 00000000..cd25252f --- /dev/null +++ b/kubernetes/flux/vars/cluster-settings.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cluster-settings + namespace: flux-system +data: + TIMEZONE: "Europe/Warsaw" + CLUSTER_CIDR: "10.69.0.0/16" + NODE_CIDR: "192.168.20.0/24" + CLUSTER_LB_QBITTORRENT: "192.168.20.64" + CLUSTER_LB_PLEX: "192.168.20.65" + NAS_URL: "192.168.20.5" + RPI_URL: "192.168.20.3" + NAS_PATH: "/volume1/kubernetes" diff --git a/kubernetes/flux/vars/kustomization.yaml b/kubernetes/flux/vars/kustomization.yaml new file mode 100644 index 00000000..9ea91972 --- /dev/null +++ b/kubernetes/flux/vars/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./cluster-settings.yaml + - ./cluster-secrets.sops.yaml diff --git a/kubernetes/talos/clusterconfig/talosconfig b/kubernetes/talos/clusterconfig/talosconfig new file mode 100644 index 00000000..e35e5db5 --- /dev/null +++ b/kubernetes/talos/clusterconfig/talosconfig @@ -0,0 +1,2 @@ +context: "" +contexts: {} diff --git a/kubernetes/templates/gatus/external/configmap.yaml b/kubernetes/templates/gatus/external/configmap.yaml new file mode 100644 index 00000000..34725b2c --- /dev/null +++ b/kubernetes/templates/gatus/external/configmap.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: "${APP}-gatus-ep" + labels: + gatus.io/enabled: "true" +data: + config.yaml: | + endpoints: + - name: "${APP}" + group: external + url: "https://${GATUS_SUBDOMAIN:-${APP}}.${SECRET_DOMAIN}${GATUS_PATH:-/}" + interval: 1m + client: + dns-resolver: tcp://1.1.1.1:53 + conditions: + - "[STATUS] == ${GATUS_STATUS:-200}" + alerts: + - type: discord diff --git a/kubernetes/templates/gatus/external/kustomization.yaml b/kubernetes/templates/gatus/external/kustomization.yaml new file mode 100644 index 00000000..e09060b9 --- /dev/null +++ b/kubernetes/templates/gatus/external/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./configmap.yaml diff --git a/kubernetes/templates/gatus/internal/configmap.yaml b/kubernetes/templates/gatus/internal/configmap.yaml new file mode 100644 index 00000000..039321aa --- /dev/null +++ b/kubernetes/templates/gatus/internal/configmap.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: "${APP}-gatus-ep" + labels: + gatus.io/enabled: "true" +data: + config.yaml: | + endpoints: + - name: "${APP}" + group: internal + url: 1.1.1.1 + interval: 1m + ui: + hide-hostname: true + hide-url: true + dns: + query-name: "${GATUS_SUBDOMAIN:-${APP}}.${SECRET_DOMAIN}" + query-type: A + conditions: + - "len([BODY]) == 0" + alerts: + - type: discord diff --git a/kubernetes/templates/gatus/internal/kustomization.yaml b/kubernetes/templates/gatus/internal/kustomization.yaml new file mode 100644 index 00000000..e09060b9 --- /dev/null +++ b/kubernetes/templates/gatus/internal/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./configmap.yaml diff --git a/kubernetes/templates/volsync/claim.yaml b/kubernetes/templates/volsync/claim.yaml new file mode 100644 index 00000000..d492771d --- /dev/null +++ b/kubernetes/templates/volsync/claim.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "${VOLSYNC_CLAIM:-${APP}}" +spec: + accessModes: ["${VOLSYNC_ACCESSMODES:-ReadWriteOnce}"] + dataSourceRef: + kind: ReplicationDestination + apiGroup: volsync.backube + name: "${APP}-bootstrap" + resources: + requests: + storage: "${VOLSYNC_CAPACITY}" + storageClassName: "${VOLSYNC_STORAGECLASS:-longhorn-snapshot}" diff --git a/kubernetes/templates/volsync/kustomization.yaml b/kubernetes/templates/volsync/kustomization.yaml new file mode 100644 index 00000000..793a6d4a --- /dev/null +++ b/kubernetes/templates/volsync/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./secret.sops.yaml + - ./claim.yaml + - ./minio.yaml diff --git a/kubernetes/templates/volsync/minio.yaml b/kubernetes/templates/volsync/minio.yaml new file mode 100644 index 00000000..51262499 --- /dev/null +++ b/kubernetes/templates/volsync/minio.yaml @@ -0,0 +1,51 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/volsync.backube/replicationsource_v1alpha1.json +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: "${APP}" +spec: + sourcePVC: "${CLAIM:-${APP}}" + trigger: + schedule: "0 * * * *" #every hour + restic: + copyMethod: "${VOLSYNC_COPYMETHOD:-Snapshot}" + pruneIntervalDays: 7 + repository: "${APP}-volsync-secret" + volumeSnapshotClassName: "${VOLSYNC_SNAPSHOTCLASS:-longhorn-snapclass}" + cacheCapacity: "${VOLSYNC_CACHE_CAPACITY:-8Gi}" + cacheStorageClassName: "${VOLSYNC_CACHE_SNAPSHOTCLASS:-local-hostpath}" + cacheAccessModes: ["${VOLSYNC_CACHE_ACCESSMODES:-ReadWriteOnce}"] + storageClassName: "${VOLSYNC_STORAGECLASS:-longhorn-snapshot}" + accessModes: ["${VOLSYNC_ACCESSMODES:-ReadWriteOnce}"] + moverSecurityContext: + runAsUser: "${VOLSYNC_UID:-568}" + runAsGroup: "${VOLSYNC_GID:-568}" + fsGroup: "${VOLSYNC_GID:-568}" + retain: + hourly: 24 + daily: 7 + weekly: 5 +--- +# yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/volsync.backube/replicationdestination_v1alpha1.json +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationDestination +metadata: + name: "${APP}-bootstrap" +spec: + trigger: + manual: restore-once + restic: + repository: "${APP}-volsync-secret" + copyMethod: Snapshot # must be Snapshot + volumeSnapshotClassName: "${VOLSYNC_SNAPSHOTCLASS:-longhorn-snapclass}" + cacheStorageClassName: "${VOLSYNC_CACHE_SNAPSHOTCLASS:-local-hostpath}" + cacheAccessModes: ["${VOLSYNC_CACHE_ACCESSMODES:-ReadWriteOnce}"] + cacheCapacity: "${VOLSYNC_CACHE_CAPACITY:-8Gi}" + storageClassName: "${VOLSYNC_STORAGECLASS:-longhorn-snapshot}" + accessModes: ["${VOLSYNC_ACCESSMODES:-ReadWriteOnce}"] + capacity: "${VOLSYNC_CAPACITY}" + moverSecurityContext: + runAsUser: "${VOLSYNC_UID:-568}" + runAsGroup: "${VOLSYNC_GID:-568}" + fsGroup: "${VOLSYNC_GID:-568}" diff --git a/kubernetes/templates/volsync/secret.sops.yaml b/kubernetes/templates/volsync/secret.sops.yaml new file mode 100644 index 00000000..4ce6c6eb --- /dev/null +++ b/kubernetes/templates/volsync/secret.sops.yaml @@ -0,0 +1,34 @@ +apiVersion: v1 +kind: Secret +metadata: + name: ${APP}-volsync-secret +type: Opaque +stringData: + #ENC[AES256_GCM,data:4r+r5roqSAxfBgtvUL+4PC0J7g==,iv:BEHtl0dVZH12As/cfFApfvXizNIYVQRUHbqXbZSVxwU=,tag:ooSC5NGx0b5kyLu0SeWgRA==,type:comment] + RESTIC_REPOSITORY: ENC[AES256_GCM,data:3QZlzc+3cUZnZyfBU7X2ATXQsSR/FPUOsETtDwgKceI=,iv:Nkzrllwf2TWjUAK7m0G/u9opYaTmGagDyBxo6GMFM3w=,tag:wHtjyqZD6e6sCDHEh+h2HA==,type:str] + #ENC[AES256_GCM,data:dhkzp+nuSvs1YpWlZJzRHrZW06tH1uGaJ87P7zwZ,iv:4Ix8Uuy1+yzCkK6rPIH0VXMiPmJRTOuSaIrUphzhEwQ=,tag:4b/WROlAdcQR5utwXJmKKQ==,type:comment] + RESTIC_PASSWORD: ENC[AES256_GCM,data:v9Sx4UJrTsgzCZ2DXJab7A==,iv:84oVrjzPYDunXYWZdfooCA2/R6YhD6joAHGunrqBxC8=,tag:wHBdRvO+3QMcwkwEl2gNsw==,type:str] + #ENC[AES256_GCM,data:1HuwOiAG75sZBJm/F/UsyKZgU1I/DbGTdrhy+pdLg2MjMlMm7YA1NlU=,iv:S/csA+nGkf2oaYNnxKcD4aNjYlgyBj/zl+p7ty9O7Hs=,tag:txcjP4OFGF70BKMeYJyibQ==,type:comment] + #ENC[AES256_GCM,data:hD66uez8UQJxgVKRJ6XEjdtT4pus4KIGbwWrKbWnQpTouBwi6OXlY/yYmMiSWZocJ3lTuOQkH3g9vwLM+QfjLs6gp/E3uQ==,iv:wzLyjng0HKU1PzHD43BAR/EtgaOu7T7tZnF3Y80YkV8=,tag:MN/nQoO74U8kXAkQEaoTiQ==,type:comment] + AWS_ACCESS_KEY_ID: ENC[AES256_GCM,data:SFhY+1NbjfGugBQhASXWTSOSeLk=,iv:PHKsy4xU8Ban68X+n/5Fh9F0vaTIBUXjalpIxaE5tJs=,tag:xe1ma8LZu+K5aDiWEqXd2A==,type:str] + AWS_SECRET_ACCESS_KEY: ENC[AES256_GCM,data:+pbnoHZWLM1SRIxCyt3O7EiB6+RF21NgBymf9mR2NVA6IVeO1XEk7A==,iv:VaN/jhN+zr4/gDkG1SNwBmmHKlP3ZM+CVRYda50LRww=,tag:/yu6fA2748pLN8GK+9iOZg==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1k5xl02aujw4rsgghnnd0sdymmwd095w5nqgjvf76warwrdc0uqpqsm2x8m + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBrQVNNUXo1bnVWQW9aWHds + NXNNMUF1QVNxMFlBdUV0ZGtHdWZGMnNrYzNZCktwZmI2OTZrclJlVU5LNlg5bGpl + amc5MEdTb01xNG1UTGVNREE1WmUvRUkKLS0tIHg1eHBaT2RjSDEvaUhTYXB3Rzd3 + YUVOVFdFRFhLYm1MaW5JQXFmYldMYk0KA7BFGNUu7bLkJMJR9BtOPEKuTcVksOOP + sOyKkPQ1feSqEOmr+9iIOQRsPkbOHnUBmodrCt3exgWKGK/et2cpcA== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2024-02-24T16:16:22Z" + mac: ENC[AES256_GCM,data:AZKPKgTJuYTfjeOi0csjKLUwkMbT9KDFosMJ/hRnPSww4xFFpjmZqEbhtMmipbLM8KmlAjIoGsE9vmKAQjCH1wooLpY/4U6KMqIzF3NY0b6wjtP/+j6Alqxdt6wvbrIXUZ9AuLUYvrJ0ltaci3ZC/sgo8Fj64yi06NuLWHtbS+w=,iv:CTAWc54lslkF61iBxu/mx7dXrIok2aKtZ8BxI3/Ojfs=,tag:DUuCZFY2PtG9szztUYf6uw==,type:str] + pgp: [] + encrypted_regex: ^(data|stringData)$ + version: 3.7.3 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..bdc5b50e --- /dev/null +++ b/requirements.txt @@ -0,0 +1,6 @@ +bcrypt==4.1.3 +cloudflare==2.20.0 +email-validator==2.1.1 +makejinja==2.6.0 +netaddr==1.2.1 +passlib==1.7.4 diff --git a/scripts/kubeconform.sh b/scripts/kubeconform.sh new file mode 100755 index 00000000..a69308b1 --- /dev/null +++ b/scripts/kubeconform.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +set -o errexit +set -o pipefail + +KUBERNETES_DIR=$1 + +[[ -z "${KUBERNETES_DIR}" ]] && echo "Kubernetes location not specified" && exit 1 + +kustomize_args=("--load-restrictor=LoadRestrictionsNone") +kustomize_config="kustomization.yaml" +kubeconform_args=( + "-strict" + "-ignore-missing-schemas" + "-skip" + "Secret" + "-schema-location" + "default" + "-schema-location" + "https://kubernetes-schemas.pages.dev/{{.Group}}/{{.ResourceKind}}_{{.ResourceAPIVersion}}.json" + "-verbose" +) + +echo "=== Validating standalone manifests in ${KUBERNETES_DIR}/flux ===" +find "${KUBERNETES_DIR}/flux" -maxdepth 1 -type f -name '*.yaml' -print0 | while IFS= read -r -d $'\0' file; + do + kubeconform "${kubeconform_args[@]}" "${file}" + if [[ ${PIPESTATUS[0]} != 0 ]]; then + exit 1 + fi +done + +echo "=== Validating kustomizations in ${KUBERNETES_DIR}/flux ===" +find "${KUBERNETES_DIR}/flux" -type f -name $kustomize_config -print0 | while IFS= read -r -d $'\0' file; + do + echo "=== Validating kustomizations in ${file/%$kustomize_config} ===" + kustomize build "${file/%$kustomize_config}" "${kustomize_args[@]}" | \ + kubeconform "${kubeconform_args[@]}" + if [[ ${PIPESTATUS[0]} != 0 ]]; then + exit 1 + fi +done + +echo "=== Validating kustomizations in ${KUBERNETES_DIR}/apps ===" +find "${KUBERNETES_DIR}/apps" -type f -name $kustomize_config -print0 | while IFS= read -r -d $'\0' file; + do + echo "=== Validating kustomizations in ${file/%$kustomize_config} ===" + kustomize build "${file/%$kustomize_config}" "${kustomize_args[@]}" | \ + kubeconform "${kubeconform_args[@]}" + if [[ ${PIPESTATUS[0]} != 0 ]]; then + exit 1 + fi +done diff --git a/talosconfig b/talosconfig new file mode 100644 index 00000000..e35e5db5 --- /dev/null +++ b/talosconfig @@ -0,0 +1,2 @@ +context: "" +contexts: {}