diff --git a/.github/actions/setup-libgit2/action.yaml b/.github/actions/setup-libgit2/action.yaml new file mode 100644 index 00000000..7028c797 --- /dev/null +++ b/.github/actions/setup-libgit2/action.yaml @@ -0,0 +1,28 @@ +name: 'Setup libgit2' +description: 'Action for the libgit2 setup' + +inputs: + version: + description: 'libgit2 version to checkout' + required: true + default: 'v1.5.2' + +runs: + using: 'composite' + steps: + - name: Install libssh2 + run: | + sudo apt update + sudo apt install libssh2-1-dev -y + shell: bash + + - name: Install libgit2 + run: | + git clone https://github.com/libgit2/libgit2.git + cd libgit2 + git checkout ${{ inputs.version }} + cmake . -DBUILD_TESTS=OFF -DBUILD_CLI=OFF -DUSE_SSH=ON + sudo make install + sudo ldconfig + shell: bash + diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..aa1e19da --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,38 @@ +# https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file + +version: 2 +updates: + - package-ecosystem: "gomod" + directory: "/" + labels: + - "area/dependency" + - "kind/chore" + schedule: + interval: "weekly" + commit-message: + prefix: "gomod" + include: "scope" + ignore: + # ignore minor k8s updates, e.g. 1.27.x -> 1.28.x + - dependency-name: "k8s.io/*" + update-types: ["version-update:semver-minor"] + - dependency-name: "sigs.k8s.io/*" + update-types: ["version-update:semver-minor"] + - dependency-name: "helm.sh/helm/v3" + update-types: ["version-update:semver-minor"] + groups: + k8s-io: + patterns: + - "k8s.io/*" + + - package-ecosystem: "docker" + directory: "/components/operator" + labels: + - "area/dependency" + - "kind/chore" + schedule: + interval: "weekly" + commit-message: + prefix: "operator" + include: "scope" + diff --git a/.github/scripts/create_changelog.sh b/.github/scripts/create_changelog.sh new file mode 100755 index 00000000..ace47064 --- /dev/null +++ b/.github/scripts/create_changelog.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash + +PREVIOUS_RELEASE=$2 # for testability + +# standard bash error handling +set -o nounset # treat unset variables as an error and exit immediately. +set -o errexit # exit immediately when a command fails. +set -E # needs to be set if we want the ERR trap +set -o pipefail # prevents errors in a pipeline from being masked + +RELEASE_TAG=$1 + +REPOSITORY=${REPOSITORY:-kyma-project/docker-registry} +GITHUB_URL=https://api.github.com/repos/${REPOSITORY} +GITHUB_AUTH_HEADER="Authorization: token ${GITHUB_TOKEN}" +CHANGELOG_FILE="CHANGELOG.md" + +if [ "${PREVIOUS_RELEASE}" == "" ] +then + PREVIOUS_RELEASE=$(git describe --tags --abbrev=0) +fi + +echo "## What has changed" >> ${CHANGELOG_FILE} + +git log ${PREVIOUS_RELEASE}..HEAD --pretty=tformat:"%h" --reverse | while read -r commit +do + COMMIT_AUTHOR=$(curl -H "${GITHUB_AUTH_HEADER}" -sS "${GITHUB_URL}/commits/${commit}" | jq -r '.author.login') + if [ "${COMMIT_AUTHOR}" != "kyma-bot" ]; then + git show -s ${commit} --format="* %s by @${COMMIT_AUTHOR}" >> ${CHANGELOG_FILE} + fi +done + +NEW_CONTRIB=$$.new + +join -v2 \ +<(curl -H "${GITHUB_AUTH_HEADER}" -sS "${GITHUB_URL}/compare/$(git rev-list --max-parents=0 HEAD)...${PREVIOUS_RELEASE}" | jq -r '.commits[].author.login' | sort -u) \ +<(curl -H "${GITHUB_AUTH_HEADER}" -sS "${GITHUB_URL}/compare/${PREVIOUS_RELEASE}...HEAD" | jq -r '.commits[].author.login' | sort -u) >${NEW_CONTRIB} + +if [ -s ${NEW_CONTRIB} ] +then + echo -e "\n## New contributors" >> ${CHANGELOG_FILE} + while read -r user + do + REF_PR=$(grep "@${user}" ${CHANGELOG_FILE} | head -1 | grep -o " (#[0-9]\+)" || true) + if [ -n "${REF_PR}" ] #reference found + then + REF_PR=" in ${REF_PR}" + fi + echo "* @${user} made first contribution${REF_PR}" >> ${CHANGELOG_FILE} + done <${NEW_CONTRIB} +fi + +echo -e "\n**Full changelog**: https://github.com/$REPOSITORY/compare/${PREVIOUS_RELEASE}...${RELEASE_TAG}" >> ${CHANGELOG_FILE} + +# cleanup +rm ${NEW_CONTRIB} || echo "cleaned up" \ No newline at end of file diff --git a/.github/scripts/create_draft_release.sh b/.github/scripts/create_draft_release.sh new file mode 100755 index 00000000..75957632 --- /dev/null +++ b/.github/scripts/create_draft_release.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +# This script returns the id of the draft release + +# standard bash error handling +set -o nounset # treat unset variables as an error and exit immediately. +set -o errexit # exit immediately when a command fails. +set -E # needs to be set if we want the ERR trap +set -o pipefail # prevents errors in a pipeline from being masked + +RELEASE_TAG=$1 + +REPOSITORY=${REPOSITORY:-kyma-project/docker-registry} +GITHUB_URL=https://api.github.com/repos/${REPOSITORY} +GITHUB_AUTH_HEADER="Authorization: Bearer ${GITHUB_TOKEN}" +CHANGELOG_FILE=$(cat CHANGELOG.md) + +JSON_PAYLOAD=$(jq -n \ + --arg tag_name "$RELEASE_TAG" \ + --arg name "$RELEASE_TAG" \ + --arg body "$CHANGELOG_FILE" \ + '{ + "tag_name": $tag_name, + "name": $name, + "body": $body, + "draft": true + }') + +CURL_RESPONSE=$(curl -L \ + -X POST \ + -H "Accept: application/vnd.github+json" \ + -H "${GITHUB_AUTH_HEADER}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + ${GITHUB_URL}/releases \ + -d "$JSON_PAYLOAD") + +echo "$(echo $CURL_RESPONSE | jq -r ".id")" diff --git a/.github/scripts/publish_release.sh b/.github/scripts/publish_release.sh new file mode 100755 index 00000000..e56797ab --- /dev/null +++ b/.github/scripts/publish_release.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +# This script publishes a draft release + +# standard bash error handling +set -o nounset # treat unset variables as an error and exit immediately. +set -o errexit # exit immediately when a command fails. +set -E # needs to be set if we want the ERR trap +set -o pipefail # prevents errors in a pipeline from being masked + +RELEASE_ID=$1 +IS_LATEST_RELEASE=$2 + +REPOSITORY=${REPOSITORY:-kyma-project/docker-registry} +GITHUB_URL=https://api.github.com/repos/${REPOSITORY} +GITHUB_AUTH_HEADER="Authorization: Bearer ${GITHUB_TOKEN}" + +CURL_RESPONSE=$(curl -L \ + -X POST \ + -H "Accept: application/vnd.github+json" \ + -H "${GITHUB_AUTH_HEADER}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + ${GITHUB_URL}/releases/${RELEASE_ID} \ + -d '{"draft": false, "make_latest": '"$IS_LATEST_RELEASE"'}') diff --git a/.github/scripts/release.sh b/.github/scripts/release.sh new file mode 100755 index 00000000..cc583cb5 --- /dev/null +++ b/.github/scripts/release.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash + +# standard bash error handling +set -o nounset # treat unset variables as an error and exit immediately. +set -o errexit # exit immediately when a command fails. +set -E # needs to be set if we want the ERR trap +set -o pipefail # prevents errors in a pipeline from being masked + +# Expected variables: +IMG=${IMG?"Define IMG env"} # operator image +PULL_BASE_REF=${PULL_BASE_REF?"Define PULL_BASE_REF env"} # name of the tag +GITHUB_TOKEN=${GITHUB_TOKEN?"Define GITHUB_TOKEN env"} # github token used to upload the template yaml + +uploadFile() { + filePath=${1} + ghAsset=${2} + + echo "Uploading ${filePath} as ${ghAsset}" + response=$(curl -s -o output.txt -w "%{http_code}" \ + --request POST --data-binary @"$filePath" \ + -H "Authorization: token $GITHUB_TOKEN" \ + -H "Content-Type: text/yaml" \ + $ghAsset) + if [[ "$response" != "201" ]]; then + echo "Unable to upload the asset ($filePath): " + echo "HTTP Status: $response" + cat output.txt + exit 1 + else + echo "$filePath uploaded" + fi +} + +echo "IMG: ${IMG}" +IMG=${IMG} make -C components/operator/ render-manifest + +echo "Generated dockerregistry-operator.yaml:" +cat dockerregistry-operator.yaml + +echo "Fetching releases" +CURL_RESPONSE=$(curl -w "%{http_code}" -sL \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer $GITHUB_TOKEN"\ + https://api.github.com/repos/kyma-project/docker-registry/releases) +JSON_RESPONSE=$(sed '$ d' <<< "${CURL_RESPONSE}") +HTTP_CODE=$(tail -n1 <<< "${CURL_RESPONSE}") +if [[ "${HTTP_CODE}" != "200" ]]; then + echo "${CURL_RESPONSE}" + exit 1 +fi + +echo "Finding release id for: ${PULL_BASE_REF}" +RELEASE_ID=$(jq <<< ${JSON_RESPONSE} --arg tag "${PULL_BASE_REF}" '.[] | select(.tag_name == $ARGS.named.tag) | .id') + +echo "Got '${RELEASE_ID}' release id" +if [ -z "${RELEASE_ID}" ] +then + echo "No release with tag = ${PULL_BASE_REF}" + exit 1 +fi + +echo "Updating github release with assets" +UPLOAD_URL="https://uploads.github.com/repos/kyma-project/docker-registry/releases/${RELEASE_ID}/assets" + +uploadFile "dockerregistry-operator.yaml" "${UPLOAD_URL}?name=dockerregistry-operator.yaml" +uploadFile "config/samples/default-dockerregistry-cr.yaml" "${UPLOAD_URL}?name=default-dockerregistry-cr.yaml" diff --git a/.github/scripts/upgrade-sec-scanners-config.sh b/.github/scripts/upgrade-sec-scanners-config.sh new file mode 100755 index 00000000..a9b2fdec --- /dev/null +++ b/.github/scripts/upgrade-sec-scanners-config.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +IMG_VERSION=${IMG_VERSION?"Define IMG_VERSION env"} + +yq eval-all --inplace ' + select(fileIndex == 0).protecode=[ + select(fileIndex == 1) + | .global.containerRegistry.path as $registryPath + | ( + { + "dockerregistry_operator" : { + "name" : "dockerregistry-operator", + "directory" : "prod", + "version" : env(IMG_VERSION) + } + } + + .global.images + )[] + | $registryPath + "/" + .directory + "/" + .name + ":" + .version + ] + | select(fileIndex == 0) + ' sec-scanners-config.yaml config/docker-registry/values.yaml \ No newline at end of file diff --git a/.github/scripts/verify-actions-status.sh b/.github/scripts/verify-actions-status.sh new file mode 100755 index 00000000..598000d0 --- /dev/null +++ b/.github/scripts/verify-actions-status.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +echo "Checking status of github actions for docker-registry" + +REF_NAME="${1:-"main"}" +RAW_EXPECTED_SHA=$(git log "${REF_NAME}" --max-count 1 --format=format:%H) +REPOSITORY_ID="563346860" + +STATUS_URL="https://api.github.com/repositories/${REPOSITORY_ID}/actions/workflows/gardener-integration.yaml/runs?head_sha=${RAW_EXPECTED_SHA}" +GET_STATUS_JQ_QUERY=".workflow_runs[0] | \"\(.status)-\(.conclusion)\"" +GET_COUNT_JQ_QUERY=".total_count" + +response=`curl -s ${STATUS_URL}` + +count=`echo $response | jq -r "${GET_COUNT_JQ_QUERY}"` +if [[ "$count" == "0" ]]; then + echo "No actions to verify" +else + fullstatus=`echo $response | jq -r "${GET_STATUS_JQ_QUERY}"` + if [[ "$fullstatus" == "completed-success" ]]; then + echo "All actions succeeded" + else + echo "Actions failed or pending - Check github actions status" + exit 1 + fi +fi diff --git a/.github/scripts/verify-docker-registry-jobs-status.sh b/.github/scripts/verify-docker-registry-jobs-status.sh new file mode 100755 index 00000000..5adf1528 --- /dev/null +++ b/.github/scripts/verify-docker-registry-jobs-status.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +### Verify post-submit prow jobs status +# +# Optional input args: +# - REF_NAME - branch/tag/commit +# Return status: +# - return 0 - if status is "success" +# - return 1 - if status is "failure" or after timeout (~25min) + +# wait until Prow trigger pipelines +sleep 10 + +echo "Checking status of POST Jobs for docker-registry" + +REF_NAME="${1:-"main"}" +STATUS_URL="https://api.github.com/repos/kyma-project/docker-registry/commits/${REF_NAME}/status" + +function verify_github_jobs_status () { + local number=1 + while [[ $number -le 100 ]] ; do + echo ">--> checking docker-registry job status #$number" + local STATUS=`curl -L -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" ${STATUS_URL} | jq -r .state ` + echo "jobs status: ${STATUS:='UNKNOWN'}" + [[ "$STATUS" == "success" ]] && return 0 + [[ "$STATUS" == "failure" ]] && return 1 + sleep 15 + ((number = number + 1)) + done + + exit 1 +} + +verify_github_jobs_status \ No newline at end of file diff --git a/.github/scripts/verify-image-changes.sh b/.github/scripts/verify-image-changes.sh new file mode 100755 index 00000000..8e03bdff --- /dev/null +++ b/.github/scripts/verify-image-changes.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +MAIN_IMAGES=(${MAIN_IMAGES?"Define MAIN_IMAGES env"}) +PR_NOT_MAIN_IMAGES=(${PR_NOT_MAIN_IMAGES?"Define PR_NOT_MAIN_IMAGES env"}) + +FAIL=false +for main_image in "${MAIN_IMAGES[@]}"; do + echo "${main_image} checking..." + + for pr_image in "${PR_NOT_MAIN_IMAGES[@]}"; do + if [ "${main_image}" == "${pr_image}" ]; then + echo " warning: ${pr_image} tag/version seems to be modified (should be main)!" + FAIL=true + fi + done +done + +if $FAIL; then + exit 1 +fi diff --git a/.github/stale.yml b/.github/stale.yml new file mode 100644 index 00000000..b70d9bad --- /dev/null +++ b/.github/stale.yml @@ -0,0 +1,19 @@ +# Number of days of inactivity before an issue becomes stale +daysUntilStale: 60 +# Number of days of inactivity before a stale issue is closed +daysUntilClose: 7 +# Label to use when marking an issue as stale +staleLabel: lifecycle/stale +# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable +exemptLabels: + - lifecycle/frozen + - lifecycle/active +# Comment to post when marking an issue as stale. Set to `false` to disable +markComment: | + This issue has been automatically marked as stale due to the lack of recent activity. It will soon be closed if no further activity occurs. + Thank you for your contributions. +# Comment to post when closing a stale issue. Set to `false` to disable +closeComment: | + This issue has been automatically closed due to the lack of recent activity. + /lifecycle rotten + diff --git a/.github/workflows/create-release.yaml b/.github/workflows/create-release.yaml new file mode 100644 index 00000000..b9650864 --- /dev/null +++ b/.github/workflows/create-release.yaml @@ -0,0 +1,118 @@ +name: "Create release" + +on: + workflow_dispatch: + inputs: + name: + description: 'Release name ( e.g. "2.1.3" )' + default: "" + required: true + latest_release: + description: 'Latest release' + type: boolean + default: false + +jobs: + verify-head-status: + name: Verify HEAD + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Verify prow post jobs + run: ./.github/scripts/verify-docker-registry-jobs-status.sh ${{ github.ref_name }} + + - name: Verify github actions + run: ./.github/scripts/verify-actions-status.sh ${{ github.ref_name }} + + upgrade-images: + name: Upgrade main images + needs: verify-head-status + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + token: ${{ secrets.BOT_TOKEN }} + fetch-depth: 0 + + - name: Bump sec-scanners-config.yaml based on values.yaml + run: ./.github/scripts/upgrade-sec-scanners-config.sh + env: + IMG_VERSION: ${{ github.event.inputs.name }} + + - name: Commit&Push + run: | + git config --local user.email "team-otters@sap.com" + git config --local user.name "ottersbot" + + git add . + git commit --allow-empty -m "upgrade dependencies" + git push origin ${{ github.ref_name }} + + create-draft: + name: Create draft release + needs: upgrade-images + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + ref: ${{ github.ref_name }} # checkout to latest branch changes ( by default this action checkouts to the SHA that triggers action ) + + - name: Create changelog + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PULL_BASE_REF: ${{ github.event.inputs.name }} + run: ./.github/scripts/create_changelog.sh ${{ github.event.inputs.name }} + + - name: Create draft release + id: create-draft + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + RELEASE_ID=$(./.github/scripts/create_draft_release.sh ${{ github.event.inputs.name }}) + echo "release_id=$RELEASE_ID" >> $GITHUB_OUTPUT + + - name: Create lightweight tag + run: | + git tag ${{ github.event.inputs.name }} + git push origin ${{ github.event.inputs.name }} + + - name: Create release assets + id: create-assets + env: + IMG: "europe-docker.pkg.dev/kyma-project/prod/dockerregistry-operator:${{ github.event.inputs.name }}" + PULL_BASE_REF: ${{ github.event.inputs.name }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: ./.github/scripts/release.sh + + - name: Verify prow release jobs + run: ./.github/scripts/verify-docker-registry-jobs-status.sh ${{ github.ref_name }} + + outputs: + release_id: ${{ steps.create-draft.outputs.release_id }} + + publish-release: + name: Publish release + needs: create-draft + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + ref: ${{ github.event.inputs.name }} # checkout to latest branch changes ( by default this action checkouts to the SHA that triggers action ) + + - name: Publish release + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: ./.github/scripts/publish_release.sh ${{ needs.create-draft.outputs.release_id }} ${{ github.event.inputs.latest_release }} diff --git a/.github/workflows/images-verify.yaml b/.github/workflows/images-verify.yaml new file mode 100644 index 00000000..5bd1a746 --- /dev/null +++ b/.github/workflows/images-verify.yaml @@ -0,0 +1,44 @@ +name: Images verify +run-name: ${{github.event.pull_request.title}} +on: + pull_request: + branches: + - main + paths: + - sec-scanners-config.yaml + - config/docker-registry/values.yaml + +jobs: + # check if developer doesn't change `main` images in the values.yaml and sec-scanners-config.yaml files + check-main-tags: + runs-on: ubuntu-latest + steps: + - name: Checkout to main + uses: actions/checkout@v4 + with: + ref: main + + - name: Export main images from the main ref + run: | + # export sec-scanners-config.yaml images with the main tag as github env + echo SSC_MAIN_IMAGES=$(yq '.protecode[] | select(contains(":main")) | sub(":.*", "")' sec-scanners-config.yaml) >> $GITHUB_ENV + + # export values. images with the main tag as github env + echo VALUES_MAIN_IMAGES=$(yq '.global.images[] | select(.version == "main") | .name' config/docker-registry/values.yaml) >> $GITHUB_ENV + + - name: Checkout to context + uses: actions/checkout@v4 + + - name: Verify sec-scanners-config.yaml images + run: | + PR_NOT_MAIN_IMAGES=$(yq '.protecode[] | select(contains(":main") | not ) | sub(":.*", "")' sec-scanners-config.yaml) \ + .github/scripts/verify-image-changes.sh + env: + MAIN_IMAGES: ${{ env.SSC_MAIN_IMAGES }} + + - name: Verify values.yaml images + run: | + PR_NOT_MAIN_IMAGES=$(yq '.global.images[] | select(.version != "main") | .name' config/docker-registry/values.yaml) \ + .github/scripts/verify-image-changes.sh + env: + MAIN_IMAGES: ${{ env.VALUES_MAIN_IMAGES }} diff --git a/.github/workflows/lint-markdown-links.yml b/.github/workflows/lint-markdown-links.yml deleted file mode 100644 index d1790224..00000000 --- a/.github/workflows/lint-markdown-links.yml +++ /dev/null @@ -1,14 +0,0 @@ -name: Lint Markdown Links -run-name: ${{github.event.pull_request.title}} -on: [ pull_request ] -jobs: - markdown-link-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: gaurav-nelson/github-action-markdown-link-check@v1 - with: - use-verbose-mode: 'no' - config-file: '.mlc.config.json' - folder-path: '.' - max-depth: -1 diff --git a/.github/workflows/markdown.yaml b/.github/workflows/markdown.yaml new file mode 100644 index 00000000..a871742d --- /dev/null +++ b/.github/workflows/markdown.yaml @@ -0,0 +1,18 @@ +name: Markdown +run-name: ${{github.event.pull_request.title}} +on: + schedule: + - cron: "0 5 * * *" # Run everyday at 5:00 AM + pull_request: +jobs: + link-check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 +# - uses: gaurav-nelson/github-action-markdown-link-check@v1 //TODO: after adjusting *.md bring the test back +# with: +# use-quiet-mode: 'yes' +# use-verbose-mode: 'yes' +# config-file: '.mlc.config.json' +# folder-path: '.' +# max-depth: -1 diff --git a/.github/workflows/operator-verify.yaml b/.github/workflows/operator-verify.yaml new file mode 100644 index 00000000..f2a14b10 --- /dev/null +++ b/.github/workflows/operator-verify.yaml @@ -0,0 +1,82 @@ +name: Operator verify + +on: + push: + branches: [ "main", "release-*" ] + pull_request: + paths-ignore: + - 'docs/**' + - 'examples/**' + types: + - opened + - reopened + - synchronize + - ready_for_review + - converted_to_draft + +jobs: + lint: + if: github.event_name == 'pull_request' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: golangci/golangci-lint-action@v3 + with: + version: latest + working-directory: 'components/operator' + + unit-test: + if: github.event_name == 'pull_request' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: run test + run: make -C components/operator test + + +# upgrade-test: //TODO: change it to run dockerregistry verify (need to be implemented) +# runs-on: ubuntu-latest +# if: github.event_name == 'push' +# steps: +# - uses: actions/checkout@v4 +# - name: create single cluster +# uses: AbsaOSS/k3d-action@4e8b3239042be1dc0aed6c5eb80c13b18200fc79 #v2.4.0 +# with: +# cluster-name: "k3dCluster" +# args: >- +# --agents 1 +# --image rancher/k3s:v1.28.6-k3s1 +# --port 80:80@loadbalancer +# --port 443:443@loadbalancer +# --wait +# - name: upgrade test +# run: make -C hack upgrade-test +# env: +# IMG: europe-docker.pkg.dev/kyma-project/prod/dockerregistry-operator:${{ github.sha }} + +# gardener-integration-test: //TODO: change it to run dockerregistry verify (need to be implemented) +# if: github.event_name == 'push' +# runs-on: ubuntu-latest +# steps: +# - uses: actions/checkout@v4 +# - name: save sa +# shell: bash +# run: 'echo "$GARDENER_SA" > /tmp/gardener-sa.yaml' +# env: +# GARDENER_SA: ${{ secrets.GARDENER_SA }} +# - name: provision gardener +# run: make -C hack provision-gardener +# env: +# GARDENER_SECRET_NAME: ${{ secrets.GARDENER_SECRET_NAME }} +# GARDENER_PROJECT: ${{ secrets.GARDENER_PROJECT }} +# GARDENER_SA_PATH: /tmp/gardener-sa.yaml +# - name: run test +# run: make -C hack integration-test-on-cluster +# env: +# IMG: europe-docker.pkg.dev/kyma-project/prod/dockerregistry-operator:${{ github.sha }} +# - name: deprovision gardener +## https://docs.github.com/en/actions/learn-github-actions/expressions#always +# if: ${{ always() }} +# run: make -C hack deprovision-gardener +# env: +# GARDENER_SA_PATH: /tmp/gardener-sa.yaml diff --git a/.github/workflows/pull-gitleaks.yml b/.github/workflows/pull-gitleaks.yml deleted file mode 100644 index ee92cf44..00000000 --- a/.github/workflows/pull-gitleaks.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: pull-gitleaks -on: - pull_request: - types: [opened, edited, synchronize, reopened, ready_for_review] - -env: - GITLEAKS_VERSION: 8.18.2 - -jobs: - scan: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Fetch gitleaks ${{ env.GITLEAKS_VERSION }} - run: curl -Lso gitleaks.tar.gz https://github.com/gitleaks/gitleaks/releases/download/v${{ env.GITLEAKS_VERSION }}/gitleaks_${{ env.GITLEAKS_VERSION }}_linux_x64.tar.gz && tar -xvzf ./gitleaks.tar.gz - - name: Run gitleaks - # Scan commits between base and head of the pull request - run: ./gitleaks detect --log-opts=${PULL_BASE_SHA}...${PULL_HEAD_SHA} --verbose --redact - env: - PULL_BASE_SHA: ${{ github.event.pull_request.base.sha }} - PULL_HEAD_SHA: ${{ github.event.pull_request.head.sha }} diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml deleted file mode 100644 index 1d415fed..00000000 --- a/.github/workflows/stale.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: 'Manage Stale Issues and Pull Requests' - -on: - schedule: - - cron: '0 0 * * *' # Runs daily at midnight - workflow_dispatch: # Allows manual triggering of the workflow - -jobs: - stale: - runs-on: ubuntu-latest - steps: - - uses: actions/stale@v9 - with: - days-before-stale: 60 - days-before-close: 7 - stale-issue-label: 'lifecycle/stale' - stale-pr-label: 'lifecycle/stale' - exempt-issue-labels: 'lifecycle/frozen,lifecycle/active' - exempt-pr-labels: 'lifecycle/frozen,lifecycle/active' - stale-issue-message: | - This issue has been automatically marked as stale due to the lack of recent activity. It will soon be closed if no further activity occurs. - Thank you for your contributions. - stale-pr-message: | - This pull request has been automatically marked as stale due to the lack of recent activity. It will soon be closed if no further activity occurs. - Thank you for your contributions. - close-issue-message: | - This issue has been automatically closed due to the lack of recent activity. - /lifecycle rotten - close-pr-message: | - This pull request has been automatically closed due to the lack of recent activity. - /lifecycle rotten diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..7d4dd69e --- /dev/null +++ b/.gitignore @@ -0,0 +1,44 @@ +# IDEs +.vscode +.idea +*.swp +*.swo +*~ + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin +testbin/* +Dockerfile.cross + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Kubernetes Generated files - skip generated files, except for vendored files + +!vendor/**/zz_generated.* + +module-chart +module-chart-test +mod +default.yaml +moduletemplate.yaml +moduletemplate-k3d.yaml +docs/.DS_Store +.DS_Store +__debug_bin +vendor + +moduletemplate-latest.yaml +module-config.yaml +dockerregistry-operator.yaml +dockerregistry.yaml + +examples/python-text2img/resources/secrets/deepai.env \ No newline at end of file diff --git a/.markdownlint.yaml b/.markdownlint.yaml new file mode 100644 index 00000000..51f4cf9c --- /dev/null +++ b/.markdownlint.yaml @@ -0,0 +1,26 @@ +# This is a configuration file for the markdownlint. You can use this file to overwrite the default settings. +# MD013 is set to false by default because many files include lines longer than the conventional 80 character limit +MD013: false +# Disable the Multiple headings with the same content rule +MD024: false +# MD029 is set to false because it generated some issues with longer lists +MD029: false +# MD044 is used to set capitalization for particular words. You can determine whether it should be used also for code blocks and HTML elements +MD044: + code_blocks: false + html_elements: false + names: + - Kyma + - Kubernetes + - ConfigMap + - CronJob + - CustomResourceDefinition + - Ingress + - Node + - PodPreset + - Pod + - ProwJob + - Secret + - ServiceBinding + - ServiceClass + - ServiceInstance \ No newline at end of file diff --git a/.markdownlintignore b/.markdownlintignore new file mode 100644 index 00000000..578db9cb --- /dev/null +++ b/.markdownlintignore @@ -0,0 +1 @@ +_sidebar.md \ No newline at end of file diff --git a/.mlc.config.json b/.mlc.config.json index 9e9e139e..70d4d516 100644 --- a/.mlc.config.json +++ b/.mlc.config.json @@ -1,14 +1,12 @@ { - "_comment": "This is a configuration file for the [Markdown link check](https://github.com/tcort/markdown-link-check).", - "_comment": "All `/kyma-project` repositories in GitHub use [Markdown link check](https://github.com/tcort/markdown-link-check) to check their Markdown files for broken links.", - "_comment": "Configuration and maintenance of the Markdown link check tool is the responsibility of a repository owner.", - "_comment": "See the following configuration example.", - "_comment": "For more details read the [repository guidelines](https://github.com/kyma-project/community/blob/main/docs/guidelines/repository-guidelines/01-new-repository-settings.md).", - "replacementPatterns": [ - { - "_comment": "a replacement rule for all the in-repository references", - "pattern": "^/", - "replacement": "{{BASEURL}}/" - } - ] -} + "replacementPatterns": [ + { + "_comment": "a replacement rule for all the in-repository references", + "pattern": "^/", + "replacement": "{{BASEURL}}/" + } + ], + "timeout": "20s", + "retryCount": 5, + "fallbackRetryDelay": "30s" + } \ No newline at end of file diff --git a/.reuse/dep5 b/.reuse/dep5 old mode 100755 new mode 100644 index 96d499f7..62c36819 --- a/.reuse/dep5 +++ b/.reuse/dep5 @@ -1,11 +1,11 @@ Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: -Upstream-Contact: -Source: -Disclaimer: The code in this project may include calls to APIs ("API Calls") of +Upstream-Name: docker-registry +Upstream-Contact: krzysztof.kwiatosz@sap.com +Source: https://github.com/kyma-project/docker-registry +Disclaimer: The code in this project may include calls to APIs (“API Calls”) of SAP or third-party products or services developed outside of this project - ("External Products"). - "APIs" means application programming interfaces, as well as their respective + (“External Products”). + “APIs” means application programming interfaces, as well as their respective specifications and implementing code that allows software to communicate with other software. API Calls to External Products are not licensed under the open source license @@ -16,7 +16,7 @@ Disclaimer: The code in this project may include calls to APIs ("API Calls") of alter, expand or supersede any terms of the applicable additional agreements. If you have a valid license agreement with SAP for the use of a particular SAP External Product, then you may make use of any API Calls included in this - project's code for that SAP External Product, subject to the terms of such + project’s code for that SAP External Product, subject to the terms of such license agreement. If you do not have a valid license agreement for the use of a particular SAP External Product, then you may only make use of any API Calls in this project for that SAP External Product for your internal, non-productive @@ -24,14 +24,6 @@ Disclaimer: The code in this project may include calls to APIs ("API Calls") of you any rights to use or access any SAP External Product, or provide any third parties the right to use of access any SAP External Product, through API Calls. -Files: -Copyright: SAP SE or an SAP affiliate company and contributors +Files: * +Copyright: 2023 SAP SE or an SAP affiliate company and Kyma contributors License: Apache-2.0 - -Files: -Copyright: -License: - -Files: -Copyright: -License: \ No newline at end of file diff --git a/CODEOWNERS b/CODEOWNERS index 35d99741..164aba18 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,38 +1,8 @@ -# Overview +# These are the default owners for the whole content of the `dockerregistry` repository. The default owners are automatically added as reviewers when you open a pull request unless different owners are specified in the file. +* @kyma-project/otters -# The CODEOWNERS file is a GitHub's feature which allows you to create an overview of the code ownership structure in your repository. -# Specify the default owners of your repository and code owners of particular repository parts to define who is automatically requested for a review each time a contributor creates a pull request to the main branch. -# Modify the default settings of the repository and select the "Require review from Code Owners" option on the protected main branch to require one approval from the owners of every part of the repository included in the pull request. For more details, read the following article on GitHub: https://help.github.com/articles/enabling-required-reviews-for-pull-requests/. +# All files and subdirectories in /docs +/docs/ @kyma-project/technical-writers -# Details - -# The CODEOWNERS file is located at the root of your repository and includes a specification of the code ownership structure of the repository. -# It is up to you to decide who is responsible for the review of particular parts and types of files in your repository. - -# When defining the file, keep in mind the following rules: - -# Lines starting with a hash (#) are comments. -# Each line of the file is a file pattern followed by one or more owners. -# You can use individual GitHub usernames, e-mail addresses, or team names to define owners. To define the owners with a team name, first add the team to your repository as collaborators with write access permissions. For more details, read the following article on GitHub: https://help.github.com/articles/adding-outside-collaborators-to-repositories-in-your-organization/. -# Define the default owners of the repository. They are automatically requested for a review of any content at the root of the repository and any content for which no owners are specified in this file. -# Provide granular ownership specification for folders and subfolders. You can also define the owners of specific file types in your repository. -# The order is important. The last matching pattern in the file has the most precedence. - -# Examples - -# These are the default owners for the whole content of the repository, including the content for which no owners are specified in the file. -# * @global-owner1 globalowner@example.com @org/team-name -# The following rule indicates that if a pull request affects folder1 at the root of the repository and any content in that folder, only this owner is requested for a review. -# /folder1/ @testuser1 -# When you use the following pattern, you specify that @testuser2 is responsible for the review of any file in folder2, excluding subfolders located therein. -# /folder2/* @testuser2 -# In this example, you define @testuser3 as the owner of any content in every "docs" folder in the repository. -# docs/ @testuser3 -# When you open a pull request that modifies the "yaml" files, only @testuser4 is requested for a review, and the global owner(s) are not. -# *.yaml @testuser4 - -# Reference - -# For more details, read the following articles on GitHub: -# https://help.github.com/articles/about-codeowners/ -# https://github.com/blog/2392-introducing-code-owners/ +# All .md files +*.md @kyma-project/technical-writers diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index ed7a6b01..b86285c7 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,3 +1,3 @@ -# Code of Conduct +# Code of conduct -Each contributor and maintainer of this project agrees to follow the community [Code of Conduct](https://github.com/kyma-project/community/blob/main/docs/contributing/01-code-of-conduct.md) that relies on the CNCF Code of Conduct. Read it to learn about the agreed standards of behavior, shared values that govern our community, and details on how to report any suspected Code of Conduct violations. +Each contributor and maintainer of this project agrees to follow the [community Code of Conduct](https://github.com/kyma-project/community/blob/main/docs/contributing/01-code-of-conduct.md) that relies on the CNCF Code of Conduct. Read it to learn about the agreed standards of behavior, shared values that govern our community, and details on how to report any suspected Code of Conduct violations. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1ac0012c..71bc0fc6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,4 +1,4 @@ -# Contributing +## Overview -To contribute to this project, follow the general [Contributing Rules](https://github.com/kyma-project/community/blob/main/docs/contributing/02-contributing.md). +To contribute to this project, follow the general [contributing](https://github.com/kyma-project/community/blob/main/docs/contributing/02-contributing.md) guidelines. diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..a2ecafa1 --- /dev/null +++ b/Makefile @@ -0,0 +1,31 @@ +PROJECT_ROOT=. +OPERATOR_ROOT=./components/operator +include ${PROJECT_ROOT}/hack/help.mk +include ${PROJECT_ROOT}/hack/k3d.mk + +##@ Installation +.PHONY: install-dockerregistry-main +install-dockerregistry-main: ## Install dockerregistry with operator using default dockerregistry cr + make -C ${OPERATOR_ROOT} deploy-main apply-default-dockerregistry-cr check-dockerregistry-installation + +.PHONY: install-dockerregistry-custom-operator +install-dockerregistry-custom-operator: ## Install dockerregistry with operator from IMG env using default dockerregistry cr + $(call check-var,IMG) + make -C ${OPERATOR_ROOT} deploy apply-default-dockerregistry-cr check-dockerregistry-installation + +.PHONY: install-dockerregistry-latest-release +install-dockerregistry-latest-release: ## Install dockerregistry from latest release + kubectl create namespace kyma-system || true + kubectl apply -f https://github.com/kyma-project/docker-registry/releases/latest/download/dockerregistry-operator.yaml + kubectl apply -f https://github.com/kyma-project/docker-registry/releases/latest/download/default-dockerregistry-cr.yaml -n kyma-system + make -C ${OPERATOR_ROOT} check-dockerregistry-installation + +.PHONY: remove-dockerregistry +remove-dockerregistry: ## Remove dockerregistry-cr and dockerregistry operator + make -C ${OPERATOR_ROOT} remove-dockerregistry undeploy + +.PHONY: run +run: create-k3d install-dockerregistry-main ## Create k3d cluster and install dockerregistry from main + +check-var = $(if $(strip $($1)),,$(error "$1" is not defined)) + diff --git a/README.md b/README.md index 5714e17b..3933cd73 100644 --- a/README.md +++ b/README.md @@ -1,55 +1,105 @@ -> **NOTE:** This is a general template that you can use for a project README.md. Except for the mandatory sections, use only those sections that suit your use case but keep the proposed section order. -> -> Mandatory sections: -> - `Overview` -> - `Prerequisites`, if there are any requirements regarding hard- or software -> - `Installation` -> - `Contributing` - do not change this! -> - `Code of Conduct` - do not change this! -> - `Licensing` - do not change this! - -# {Project Title} - -> Modify the title and insert the name of your project. Use Heading 1 (H1). +# Serverless + +## Status +![GitHub tag checks state](https://img.shields.io/github/checks-status/kyma-project/serverless-manager/main?label=serverless-operator&link=https%3A%2F%2Fgithub.com%2Fkyma-project%2Fserverless-manager%2Fcommits%2Fmain) + +[![REUSE status](https://api.reuse.software/badge/github.com/kyma-project/docker-registry)](https://api.reuse.software/info/github.com/kyma-project/docker-registry) + ## Overview - -> Provide a description of the project's functionality. -> -> If it is an example README.md, describe what the example illustrates. +Serverless Operator allows deploying the [Serverless](https://kyma-project.io/docs/kyma/latest/01-overview/serverless/) component in the Kyma cluster in compatibility with [Lifecycle Manager](https://github.com/kyma-project/lifecycle-manager). -## Prerequisites +## Install -> List the requirements to run the project or example. +Create the `kyma-system` namespace: -## Installation +```bash +kubectl create namespace kyma-system +``` -> Explain the steps to install your project. If there are multiple installation options, mention the recommended one and include others in a separate document. Create an ordered list for each installation task. -> -> If it is an example README.md, describe how to build, run locally, and deploy the example. Format the example as code blocks and specify the language, highlighting where possible. Explain how you can validate that the example ran successfully. For example, define the expected output or commands to run which check a successful deployment. -> -> Add subsections (H3) for better readability. +Apply the following script to install Serverless Operator: -## Usage +```bash +kubectl apply -f https://github.com/kyma-project/docker-registry/releases/latest/download/serverless-operator.yaml +``` -> Explain how to use the project. You can create multiple subsections (H3). Include the instructions or provide links to the related documentation. +To get Serverless installed, apply the sample Serverless CR: + +```bash +kubectl apply -f https://github.com/kyma-project/docker-registry/releases/latest/download/default-serverless-cr.yaml +``` ## Development -> Add instructions on how to develop the project or example. It must be clear what to do and, for example, how to trigger the tests so that other contributors know how to make their pull requests acceptable. Include the instructions or provide links to related documentation. +### Prerequisites + +- Access to a Kubernetes (v1.24 or higher) cluster +- [Go](https://go.dev/) +- [k3d](https://k3d.io/) +- [Docker](https://www.docker.com/) +- [kubectl](https://kubernetes.io/docs/tasks/tools/) +- [Kubebuilder](https://book.kubebuilder.io/) + + +## Installation in the k3d Cluster Using Make Targets + +1. Clone the project. + + ```bash + git clone https://github.com/kyma-project/docker-registry.git && cd serverless/ + ``` + +2. Build Serverless Operator locally and run it in the k3d cluster. + + ```bash + make run + ``` + +> **NOTE:** To clean up the k3d cluster, use the `make delete-k3d` make target. + + +## Using Serverless Operator + +- Create a Serverless instance. + + ```bash + kubectl apply -f config/samples/default-dockerregistry-cr.yaml + ``` + +- Delete a Serverless instance. -## Contributing - + ```bash + kubectl delete -f config/samples/default-dockerregistry-cr.yaml + ``` -See the [Contributing Rules](CONTRIBUTING.md). +- Use external registry. -## Code of Conduct - + The following example shows how you can modify the Serverless Docker registry address using the `serverless.operator.kyma-project.io` CR: -See the [Code of Conduct](CODE_OF_CONDUCT.md) document. + ```bash + kubectl create secret generic my-secret \ + --namespace kyma-system \ + --from-literal username="" \ + --from-literal password="" \ + --from-literal serverAddress="" \ + --from-literal registryAddress="" + ``` -## Licensing - + > **NOTE:** For DockerHub: + + > - SERVER_ADDRESS is "https://index.docker.io/v1/", + > - USERNAME and REGISTRY_ADDRESS must be identical. -See the [license](./LICENSE) file. + ```bash + cat < $(PROJECT_ROOT)/dockerregistry-operator.yaml + + +.PHONY: apply-default-dockerregistry-cr +apply-default-dockerregistry-cr: ## Apply the k3d dockerregistry CR. + kubectl apply \ + -f ${PROJECT_ROOT}/config/samples/default-dockerregistry-cr.yaml + +.PHONY: remove-dockerregistry +remove-dockerregistry: ## Remove Dockerregistry CR + kubectl delete dockerregistry -n kyma-system default --timeout 2m || (kubectl get dockerregistry -n kyma-system -oyaml && false) + + +.PHONY: check-dockerregistry-installation +check-dockerregistry-installation: ## Wait for Dockerregistry CR to be in Ready state. + # wait some time to make sure operator starts the reconciliation first + sleep 10 + + ./hack/verify_dockerregistry_status.sh || \ + (make print-dockerregistry-details && false) + + kubectl wait --for condition=Available -n kyma-system deployment dockerregistry-operator --timeout=60s || \ + (make print-dockerregistry-details && false) + +.PHONY: print-dockerregistry-details +print-dockerregistry-details: ## Print all pods, deploys and dockerregistry CRs in the kyma-system namespace. + kubectl get dockerregistry -n kyma-system -oyaml + kubectl get deploy -n kyma-system -oyaml + kubectl get pods -n kyma-system -oyaml + + +##@ Module +.PHONY: module-image +module-image: docker-build docker-push ## Build the Module Image and push it to a registry defined in IMG. + echo "built and pushed module image $(IMG)" diff --git a/components/operator/api/v1alpha1/dockerregistry_types.go b/components/operator/api/v1alpha1/dockerregistry_types.go new file mode 100644 index 00000000..4d3b574a --- /dev/null +++ b/components/operator/api/v1alpha1/dockerregistry_types.go @@ -0,0 +1,162 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type Endpoint struct { + Endpoint string `json:"endpoint"` +} + +// DockerRegistrySpec defines the desired state of DockerRegistry +type DockerRegistrySpec struct { + // Sets the timeout for the Function health check. The default value in seconds is `10` + HealthzLivenessTimeout string `json:"healthzLivenessTimeout,omitempty"` +} + +type State string + +type Served string + +type ConditionReason string + +type ConditionType string + +const ( + StateReady State = "Ready" + StateProcessing State = "Processing" + StateWarning State = "Warning" + StateError State = "Error" + StateDeleting State = "Deleting" + + ServedTrue Served = "True" + ServedFalse Served = "False" + + // installation and deletion details + ConditionTypeInstalled = ConditionType("Installed") + + // prerequisites and soft dependencies + ConditionTypeConfigured = ConditionType("Configured") + + // deletion + ConditionTypeDeleted = ConditionType("Deleted") + + ConditionReasonConfiguration = ConditionReason("Configuration") + ConditionReasonConfigurationErr = ConditionReason("ConfigurationErr") + ConditionReasonConfigured = ConditionReason("Configured") + ConditionReasonInstallation = ConditionReason("Installation") + ConditionReasonInstallationErr = ConditionReason("InstallationErr") + ConditionReasonInstalled = ConditionReason("Installed") + ConditionReasonDuplicated = ConditionReason("Duplicated") + ConditionReasonDeletion = ConditionReason("Deletion") + ConditionReasonDeletionErr = ConditionReason("DeletionErr") + ConditionReasonDeleted = ConditionReason("Deleted") + + Finalizer = "dockerregistry-operator.kyma-project.io/deletion-hook" +) + +type DockerRegistryStatus struct { + SecretName string `json:"secretName,omitempty"` + + HealthzLivenessTimeout string `json:"healthzLivenessTimeout,omitempty"` + + // State signifies current state of DockerRegistry. + // Value can be one of ("Ready", "Processing", "Error", "Deleting"). + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum=Processing;Deleting;Ready;Error;Warning + State State `json:"state,omitempty"` + + // Served signifies that current DockerRegistry is managed. + // Value can be one of ("True", "False"). + // +kubebuilder:validation:Enum=True;False + Served Served `json:"served"` + + // Conditions associated with CustomStatus. + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +k8s:deepcopy-gen=true + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:printcolumn:name="Configured",type="string",JSONPath=".status.conditions[?(@.type=='Configured')].status" +//+kubebuilder:printcolumn:name="Installed",type="string",JSONPath=".status.conditions[?(@.type=='Installed')].status" +//+kubebuilder:printcolumn:name="generation",type="integer",JSONPath=".metadata.generation" +//+kubebuilder:printcolumn:name="age",type="date",JSONPath=".metadata.creationTimestamp" +//+kubebuilder:printcolumn:name="state",type="string",JSONPath=".status.state" + +// DockerRegistry is the Schema for the dockerregistry API +type DockerRegistry struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DockerRegistrySpec `json:"spec,omitempty"` + Status DockerRegistryStatus `json:"status,omitempty"` +} + +func (s *DockerRegistry) UpdateConditionFalse(c ConditionType, r ConditionReason, err error) { + condition := metav1.Condition{ + Type: string(c), + Status: "False", + LastTransitionTime: metav1.Now(), + Reason: string(r), + Message: err.Error(), + } + meta.SetStatusCondition(&s.Status.Conditions, condition) +} + +func (s *DockerRegistry) UpdateConditionUnknown(c ConditionType, r ConditionReason, msg string) { + condition := metav1.Condition{ + Type: string(c), + Status: "Unknown", + LastTransitionTime: metav1.Now(), + Reason: string(r), + Message: msg, + } + meta.SetStatusCondition(&s.Status.Conditions, condition) +} + +func (s *DockerRegistry) UpdateConditionTrue(c ConditionType, r ConditionReason, msg string) { + condition := metav1.Condition{ + Type: string(c), + Status: "True", + LastTransitionTime: metav1.Now(), + Reason: string(r), + Message: msg, + } + meta.SetStatusCondition(&s.Status.Conditions, condition) +} + +func (s *DockerRegistry) IsServedEmpty() bool { + return s.Status.Served == "" +} + +//+kubebuilder:object:root=true + +// DockerRegistryList contains a list of DockerRegistry +type DockerRegistryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DockerRegistry `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DockerRegistry{}, &DockerRegistryList{}) +} diff --git a/components/operator/api/v1alpha1/groupversion_info.go b/components/operator/api/v1alpha1/groupversion_info.go new file mode 100644 index 00000000..9a7c8cb2 --- /dev/null +++ b/components/operator/api/v1alpha1/groupversion_info.go @@ -0,0 +1,41 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains API Schema definitions for the operator v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=operator.kyma-project.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +const ( + DockerregistryGroup = "operator.kyma-project.io" + DockerregistryVersion = "v1alpha1" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: DockerregistryGroup, Version: DockerregistryVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/components/operator/api/v1alpha1/helpers.go b/components/operator/api/v1alpha1/helpers.go new file mode 100644 index 00000000..eea1d0e1 --- /dev/null +++ b/components/operator/api/v1alpha1/helpers.go @@ -0,0 +1,26 @@ +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (s *DockerRegistry) IsInState(state State) bool { + return s.Status.State == state +} + +func (s *DockerRegistry) IsCondition(conditionType ConditionType) bool { + return meta.FindStatusCondition( + s.Status.Conditions, string(conditionType), + ) != nil +} + +func (s *DockerRegistry) IsConditionTrue(conditionType ConditionType) bool { + condition := meta.FindStatusCondition(s.Status.Conditions, string(conditionType)) + return condition != nil && condition.Status == metav1.ConditionTrue +} + +const ( + DefaultEnableInternal = false + EndpointDisabled = "" +) diff --git a/components/operator/api/v1alpha1/zz_generated.deepcopy.go b/components/operator/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..74d48ab7 --- /dev/null +++ b/components/operator/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,137 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerRegistry) DeepCopyInto(out *DockerRegistry) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerRegistry. +func (in *DockerRegistry) DeepCopy() *DockerRegistry { + if in == nil { + return nil + } + out := new(DockerRegistry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DockerRegistry) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerRegistryList) DeepCopyInto(out *DockerRegistryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DockerRegistry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerRegistryList. +func (in *DockerRegistryList) DeepCopy() *DockerRegistryList { + if in == nil { + return nil + } + out := new(DockerRegistryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DockerRegistryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerRegistrySpec) DeepCopyInto(out *DockerRegistrySpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerRegistrySpec. +func (in *DockerRegistrySpec) DeepCopy() *DockerRegistrySpec { + if in == nil { + return nil + } + out := new(DockerRegistrySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerRegistryStatus) DeepCopyInto(out *DockerRegistryStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerRegistryStatus. +func (in *DockerRegistryStatus) DeepCopy() *DockerRegistryStatus { + if in == nil { + return nil + } + out := new(DockerRegistryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} diff --git a/components/operator/controllers/controller.go b/components/operator/controllers/controller.go new file mode 100644 index 00000000..658ee537 --- /dev/null +++ b/components/operator/controllers/controller.go @@ -0,0 +1,80 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + "github.com/kyma-project/docker-registry/components/operator/internal/predicate" + "github.com/kyma-project/docker-registry/components/operator/internal/state" + "github.com/kyma-project/docker-registry/components/operator/internal/tracing" + "github.com/pkg/errors" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// dockerRegistryReconciler reconciles a DockerRegistry object +type dockerRegistryReconciler struct { + initStateMachine func(*zap.SugaredLogger) state.StateReconciler + client client.Client + log *zap.SugaredLogger +} + +func NewDockerRegistryReconciler(client client.Client, config *rest.Config, recorder record.EventRecorder, log *zap.SugaredLogger, chartPath string) *dockerRegistryReconciler { + cache := chart.NewSecretManifestCache(client) + + return &dockerRegistryReconciler{ + initStateMachine: func(log *zap.SugaredLogger) state.StateReconciler { + return state.NewMachine(client, config, recorder, log, cache, chartPath) + }, + client: client, + log: log, + } +} + +// SetupWithManager sets up the controller with the Manager. +func (sr *dockerRegistryReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.DockerRegistry{}, builder.WithPredicates(predicate.NoStatusChangePredicate{})). + Watches(&corev1.Service{}, tracing.ServiceCollectorWatcher()). + Complete(sr) +} + +func (sr *dockerRegistryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := sr.log.With("request", req) + log.Info("reconciliation started") + + instance, err := state.GetDockerRegistryOrServed(ctx, req, sr.client) + if err != nil { + log.Warnf("while getting dockerregistry, got error: %s", err.Error()) + return ctrl.Result{}, errors.Wrap(err, "while fetching dockerregistry instance") + } + if instance == nil { + log.Info("Couldn't find proper instance of dockerregistry") + return ctrl.Result{}, nil + } + + r := sr.initStateMachine(log) + return r.Reconcile(ctx, *instance) +} diff --git a/components/operator/controllers/controller_rbac.go b/components/operator/controllers/controller_rbac.go new file mode 100644 index 00000000..4d94ff8a --- /dev/null +++ b/components/operator/controllers/controller_rbac.go @@ -0,0 +1,35 @@ +package controllers + +// TODO: dockerregistry-manager doesn't need almost half of these rbscs. It uses them only to create another rbacs ( is there any onther option? - investigate ) + +//+kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;patch +//+kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=services;secrets;serviceaccounts;configmaps,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=nodes,verbs=list;watch;get +//+kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete;deletecollection + +//+kubebuilder:rbac:groups=apps,resources=replicasets,verbs=list +//+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=apps,resources=deployments/status,verbs=get +//+kubebuilder:rbac:groups=apps,resources=daemonsets,verbs=get;list;watch;create;update;patch;delete;deletecollection + +//+kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;create;update;patch;delete;deletecollection + +//+kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=batch,resources=jobs/status,verbs=get + +//+kubebuilder:rbac:groups=policy,resources=podsecuritypolicies,verbs=use + +//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles;clusterrolebindings,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings;roles,verbs=get;list;watch;create;update;patch;delete;deletecollection + +//+kubebuilder:rbac:groups=operator.kyma-project.io,resources=dockerregistries,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=operator.kyma-project.io,resources=dockerregistries/status,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=operator.kyma-project.io,resources=dockerregistries/finalizers,verbs=get;list;watch;create;update;patch;delete;deletecollection + +//+kubebuilder:rbac:groups=admissionregistration.k8s.io,resources=validatingwebhookconfigurations;mutatingwebhookconfigurations,verbs=get;list;watch;create;update;patch;delete;deletecollection + +//+kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get;list;watch;create;update;patch;delete;deletecollection + +//+kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=scheduling.k8s.io,resources=priorityclasses,verbs=get;list;watch;create;update;patch;delete;deletecollection diff --git a/components/operator/controllers/controller_test.go b/components/operator/controllers/controller_test.go new file mode 100644 index 00000000..30c26993 --- /dev/null +++ b/components/operator/controllers/controller_test.go @@ -0,0 +1,99 @@ +package controllers + +import ( + "context" + "github.com/kyma-project/docker-registry/components/operator/internal/registry" + "time" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/utils/ptr" +) + +var _ = Describe("DockerRegistry controller", func() { + Context("When creating fresh instance", func() { + const ( + namespaceName = "kyma-system" + crName = "cr-test" + deploymentName = "internal-docker-registry" + registrySecret = registry.SecretName + ) + + var ( + defaultData = dockerRegistryData{ + TraceCollectorURL: ptr.To[string](v1alpha1.EndpointDisabled), + EnableInternal: ptr.To[bool](v1alpha1.DefaultEnableInternal), + } + ) + + It("The status should be Success", func() { + h := testHelper{ + ctx: context.Background(), + namespaceName: namespaceName, + } + h.createNamespace() + + { + emptyData := v1alpha1.DockerRegistrySpec{} + shouldCreateDockerRegistry(h, crName, deploymentName, emptyData) + shouldPropagateSpecProperties(h, registrySecret, defaultData) + } + + shouldDeleteDockerRegistry(h, crName, deploymentName) + }) + }) +}) + +func shouldCreateDockerRegistry(h testHelper, name, deploymentName string, spec v1alpha1.DockerRegistrySpec) { + // act + h.createDockerRegistry(name, spec) + + // we have to update deployment status manually + h.updateDeploymentStatus(deploymentName) + + // assert + Eventually(h.getDockerRegistryStatusFunc(name)). + WithPolling(time.Second * 2). + WithTimeout(time.Second * 20). + Should(ConditionTrueMatcher()) +} + +func shouldPropagateSpecProperties(h testHelper, registrySecretName string, expected dockerRegistryData) { + Eventually(h.createCheckRegistrySecretFunc(registrySecretName, expected.registrySecretData)). + WithPolling(time.Second * 2). + WithTimeout(time.Second * 10). + Should(BeTrue()) +} + +func shouldDeleteDockerRegistry(h testHelper, name, deploymentName string) { + // initial assert + var deployList appsv1.DeploymentList + Eventually(h.listKubernetesObjectFunc(&deployList)). + WithPolling(time.Second * 2). + WithTimeout(time.Second * 10). + Should(BeTrue()) + + Expect(deployList.Items).To(HaveLen(1)) + + // act + var dockerRegistry v1alpha1.DockerRegistry + Eventually(h.getKubernetesObjectFunc(name, &dockerRegistry)). + WithPolling(time.Second * 2). + WithTimeout(time.Second * 10). + Should(BeTrue()) + + Expect(k8sClient.Delete(h.ctx, &dockerRegistry)).To(Succeed()) + + Eventually(h.getKubernetesObjectFunc(name, &dockerRegistry)). + WithPolling(time.Second * 2). + WithTimeout(time.Second * 10). + Should(BeTrue()) + + // assert + Eventually(h.getKubernetesObjectFunc(deploymentName, &appsv1.Deployment{})). + WithPolling(time.Second * 2). + WithTimeout(time.Second * 10). + Should(BeTrue()) +} diff --git a/components/operator/controllers/suite_test.go b/components/operator/controllers/suite_test.go new file mode 100644 index 00000000..64dca95e --- /dev/null +++ b/components/operator/controllers/suite_test.go @@ -0,0 +1,120 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + uberzap "go.uber.org/zap" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + operatorv1alpha1 "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + //+kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var ( + config *rest.Config + k8sClient client.Client + testEnv *envtest.Environment + + suiteCtx context.Context + cancelSuiteCtx context.CancelFunc +) + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{ + filepath.Join("..", "..", "..", "config", "operator", "base", "crd", "bases"), + }, + BinaryAssetsDirectory: filepath.Join("..", "..", "..", "..", "..", "bin", "k8s", "kubebuilder_assets"), + ErrorIfCRDPathMissing: true, + } + + var err error + // cfg is defined in this file globally. + config, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(config).NotTo(BeNil()) + + err = operatorv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + k8sClient, err = client.New(config, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + k8sManager, err := ctrl.NewManager(config, ctrl.Options{ + Scheme: scheme.Scheme, + }) + Expect(err).ToNot(HaveOccurred()) + + config := uberzap.NewProductionConfig() + reconcilerLogger, err := config.Build() + Expect(err).NotTo(HaveOccurred()) + + chartPath := filepath.Join("..", "..", "..", "config", "docker-registry") + err = (NewDockerRegistryReconciler( + k8sManager.GetClient(), + k8sManager.GetConfig(), + record.NewFakeRecorder(100), + reconcilerLogger.Sugar(), + chartPath)). + SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + go func() { + defer GinkgoRecover() + + suiteCtx, cancelSuiteCtx = context.WithCancel(context.Background()) + + err = k8sManager.Start(suiteCtx) + Expect(err).ToNot(HaveOccurred(), "failed to run manager") + }() +}) + +var _ = AfterSuite(func() { + cancelSuiteCtx() + + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/components/operator/controllers/testhelper_test.go b/components/operator/controllers/testhelper_test.go new file mode 100644 index 00000000..bb5b299d --- /dev/null +++ b/components/operator/controllers/testhelper_test.go @@ -0,0 +1,285 @@ +package controllers + +import ( + "context" + "fmt" + "time" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + gomegatypes "github.com/onsi/gomega/types" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type conditionMatcher struct { + expectedState v1alpha1.State + expectedConditionStatus metav1.ConditionStatus +} + +func ConditionTrueMatcher() gomegatypes.GomegaMatcher { + return &conditionMatcher{ + expectedState: v1alpha1.StateReady, + expectedConditionStatus: metav1.ConditionTrue, + } +} + +func (matcher *conditionMatcher) Match(actual interface{}) (success bool, err error) { + status, ok := actual.(v1alpha1.DockerRegistryStatus) + if !ok { + return false, fmt.Errorf("ConditionMatcher matcher expects an v1alpha1.DockerRegistryStatus") + } + + if status.State != matcher.expectedState { + return false, nil + } + + for _, condition := range status.Conditions { + if condition.Status != matcher.expectedConditionStatus { + return false, nil + } + } + + return true, nil +} + +func (matcher *conditionMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n\t%#v\nto be in %s state with all %s conditions", + actual, matcher.expectedState, matcher.expectedConditionStatus) +} + +func (matcher *conditionMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n\t%#v\nto be in %s state with all %s conditions", + actual, matcher.expectedState, matcher.expectedConditionStatus) +} + +type testHelper struct { + ctx context.Context + namespaceName string +} + +func (h *testHelper) updateDeploymentStatus(deploymentName string) { + By(fmt.Sprintf("Updating deployment status: %s", deploymentName)) + var deployment appsv1.Deployment + Eventually(h.getKubernetesObjectFunc(deploymentName, &deployment)). + WithPolling(time.Second * 2). + WithTimeout(time.Second * 30). + Should(BeTrue()) + + deployment.Status.Conditions = append(deployment.Status.Conditions, appsv1.DeploymentCondition{ + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionTrue, + Reason: "test-reason", + Message: "test-message", + }) + deployment.Status.Replicas = 1 + Expect(k8sClient.Status().Update(h.ctx, &deployment)).To(Succeed()) + + replicaSetName := h.createReplicaSetForDeployment(deployment) + + var replicaSet appsv1.ReplicaSet + Eventually(h.getKubernetesObjectFunc(replicaSetName, &replicaSet)). + WithPolling(time.Second * 2). + WithTimeout(time.Second * 30). + Should(BeTrue()) + + replicaSet.Status.ReadyReplicas = 1 + replicaSet.Status.Replicas = 1 + Expect(k8sClient.Status().Update(h.ctx, &replicaSet)).To(Succeed()) + + By(fmt.Sprintf("Deployment status updated: %s", deploymentName)) +} + +func (h *testHelper) createReplicaSetForDeployment(deployment appsv1.Deployment) string { + replicaSetName := fmt.Sprintf("%s-replica-set", deployment.Name) + By(fmt.Sprintf("Creating replica set (for deployment): %s", replicaSetName)) + var ( + trueValue = true + one = int32(1) + ) + replicaSet := appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: replicaSetName, + Namespace: h.namespaceName, + Labels: deployment.Spec.Selector.MatchLabels, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Name: deployment.Name, + UID: deployment.GetUID(), + Controller: &trueValue, + }, + }, + }, + // dummy values + Spec: appsv1.ReplicaSetSpec{ + Replicas: &one, + Selector: deployment.Spec.Selector, + Template: deployment.Spec.Template, + }, + } + Expect(k8sClient.Create(h.ctx, &replicaSet)).To(Succeed()) + By(fmt.Sprintf("Replica set (for deployment) created: %s", replicaSetName)) + return replicaSetName +} + +func (h *testHelper) createDockerRegistry(crName string, spec v1alpha1.DockerRegistrySpec) { + By(fmt.Sprintf("Creating cr: %s", crName)) + dockerRegistry := v1alpha1.DockerRegistry{ + ObjectMeta: metav1.ObjectMeta{ + Name: crName, + Namespace: h.namespaceName, + Labels: map[string]string{ + "operator.kyma-project.io/kyma-name": "test", + }, + }, + Spec: spec, + } + Expect(k8sClient.Create(h.ctx, &dockerRegistry)).To(Succeed()) + By(fmt.Sprintf("Crd created: %s", crName)) +} + +func (h *testHelper) createNamespace() { + By(fmt.Sprintf("Creating namespace: %s", h.namespaceName)) + namespace := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: h.namespaceName, + }, + } + Expect(k8sClient.Create(h.ctx, &namespace)).To(Succeed()) + By(fmt.Sprintf("Namespace created: %s", h.namespaceName)) +} + +func (h *testHelper) getKubernetesObjectFunc(objectName string, obj client.Object) func() (bool, error) { + return func() (bool, error) { + return h.getKubernetesObject(objectName, obj) + } +} + +func (h *testHelper) getKubernetesObject(objectName string, obj client.Object) (bool, error) { + key := types.NamespacedName{ + Name: objectName, + Namespace: h.namespaceName, + } + + err := k8sClient.Get(h.ctx, key, obj) + if err != nil { + return false, err + } + return true, err +} + +func (h *testHelper) listKubernetesObjectFunc(list client.ObjectList) func() (bool, error) { + return func() (bool, error) { + return h.listKubernetesObject(list) + } +} + +func (h *testHelper) listKubernetesObject(list client.ObjectList) (bool, error) { + opts := client.ListOptions{ + Namespace: h.namespaceName, + } + + err := k8sClient.List(h.ctx, list, &opts) + if err != nil { + return false, err + } + return true, err +} + +func (h *testHelper) getDockerRegistryStatusFunc(name string) func() (v1alpha1.DockerRegistryStatus, error) { + return func() (v1alpha1.DockerRegistryStatus, error) { + return h.getDockerRegistryStatus(name) + } +} + +func (h *testHelper) getDockerRegistryStatus(name string) (v1alpha1.DockerRegistryStatus, error) { + var dockerRegistry v1alpha1.DockerRegistry + key := types.NamespacedName{ + Name: name, + Namespace: h.namespaceName, + } + err := k8sClient.Get(h.ctx, key, &dockerRegistry) + if err != nil { + return v1alpha1.DockerRegistryStatus{}, err + } + return dockerRegistry.Status, nil +} + +type dockerRegistryData struct { + EventPublisherProxyURL *string + TraceCollectorURL *string + EnableInternal *bool + registrySecretData +} + +type registrySecretData struct { + Username *string + Password *string + ServerAddress *string + RegistryAddress *string +} + +func (d *registrySecretData) toMap() map[string]string { + result := map[string]string{} + if d.Username != nil { + result["username"] = *d.Username + } + if d.Password != nil { + result["password"] = *d.Password + } + if d.ServerAddress != nil { + result["serverAddress"] = *d.ServerAddress + } + if d.RegistryAddress != nil { + result["registryAddress"] = *d.RegistryAddress + } + return result +} + +func (h *testHelper) createCheckRegistrySecretFunc(registrySecret string, expected registrySecretData) func() (bool, error) { + return func() (bool, error) { + var configurationSecret corev1.Secret + + if ok, err := h.getKubernetesObject( + registrySecret, &configurationSecret); !ok || err != nil { + return ok, err + } + if ok, err := secretContainsSameValues( + expected.toMap(), configurationSecret); err != nil { + return ok, err + } + if ok, err := secretContainsRequired(configurationSecret); err != nil { + return ok, err + } + return true, nil + } +} + +func secretContainsRequired(configurationSecret corev1.Secret) (bool, error) { + for _, k := range []string{"username", "password", "pullRegAddr", "pushRegAddr", ".dockerconfigjson"} { + _, ok := configurationSecret.Data[k] + if !ok { + return false, fmt.Errorf("values not propagated (%s is required)", k) + } + } + return false, nil +} + +func secretContainsSameValues(expected map[string]string, configurationSecret corev1.Secret) (bool, error) { + for k, expectedV := range expected { + v, okV := configurationSecret.Data[k] + if okV == false { + return false, fmt.Errorf("values not propagated (%s: nil != %s )", k, expectedV) + } + if expectedV != string(v) { + return false, fmt.Errorf("values not propagated (%s: %s != %s )", k, string(v), expectedV) + } + } + return false, nil +} diff --git a/components/operator/hack/boilerplate.go.txt b/components/operator/hack/boilerplate.go.txt new file mode 100755 index 00000000..29c55ecd --- /dev/null +++ b/components/operator/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ \ No newline at end of file diff --git a/components/operator/hack/verify_dockerregistry_status.sh b/components/operator/hack/verify_dockerregistry_status.sh new file mode 100755 index 00000000..5726552c --- /dev/null +++ b/components/operator/hack/verify_dockerregistry_status.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +function get_dockerregistry_status () { + local number=1 + while [[ $number -le 100 ]] ; do + echo ">--> checking dockerregistry status #$number" + local STATUS=$(kubectl get dockerregistry -n kyma-system default -o jsonpath='{.status.state}') + echo "dockerregistry status: ${STATUS:='UNKNOWN'}" + [[ "$STATUS" == "Ready" ]] && return 0 + sleep 5 + ((number = number + 1)) + done + + kubectl get all --all-namespaces + exit 1 +} + +get_dockerregistry_status diff --git a/components/operator/internal/annotation/disclaimer.go b/components/operator/internal/annotation/disclaimer.go new file mode 100644 index 00000000..1ed584fc --- /dev/null +++ b/components/operator/internal/annotation/disclaimer.go @@ -0,0 +1,22 @@ +package annotation + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +const ( + annotation = "dockerregistry-manager.kyma-project.io/managed-by-dockerregistry-manager-disclaimer" + message = "DO NOT EDIT - This resource is managed by DockerRegistry-Manager.\nAny modifications are discarded and the resource is reverted to the original state." +) + +func AddDoNotEditDisclaimer(obj unstructured.Unstructured) unstructured.Unstructured { + annotations := obj.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + + annotations[annotation] = message + obj.SetAnnotations(annotations) + + return obj +} diff --git a/components/operator/internal/annotation/disclaimer_test.go b/components/operator/internal/annotation/disclaimer_test.go new file mode 100644 index 00000000..26f013f1 --- /dev/null +++ b/components/operator/internal/annotation/disclaimer_test.go @@ -0,0 +1,17 @@ +package annotation + +import ( + "testing" + + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +func TestAddDoNotEditDisclaimer(t *testing.T) { + t.Run("add disclaimer", func(t *testing.T) { + obj := unstructured.Unstructured{} + obj = AddDoNotEditDisclaimer(obj) + + require.Equal(t, message, obj.GetAnnotations()[annotation]) + }) +} diff --git a/components/operator/internal/chart/cache.go b/components/operator/internal/chart/cache.go new file mode 100644 index 00000000..3582308f --- /dev/null +++ b/components/operator/internal/chart/cache.go @@ -0,0 +1,142 @@ +package chart + +import ( + "context" + "sync" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/json" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + _ ManifestCache = (*inMemoryManifestCache)(nil) + _ ManifestCache = (*secretManifestCache)(nil) +) + +var ( + emptySpecManifest = DockerRegistrySpecManifest{} +) + +type ManifestCache interface { + Set(context.Context, client.ObjectKey, DockerRegistrySpecManifest) error + Get(context.Context, client.ObjectKey) (DockerRegistrySpecManifest, error) + Delete(context.Context, client.ObjectKey) error +} + +// inMemoryManifestCache provides an in-memory processor to store dockerregistry Spec and rendered chart manifest. By using sync.Map for caching, +// concurrent operations to the processor from diverse reconciliations are considered safe. +// +// Inside the processor is stored chart manifest with used custom flags by client.ObjectKey key. +type inMemoryManifestCache struct { + processor sync.Map +} + +// NewInMemoryManifestCache returns a new instance of inMemoryManifestCache. +func NewInMemoryManifestCache() *inMemoryManifestCache { + return &inMemoryManifestCache{ + processor: sync.Map{}, + } +} + +// Get loads the DockerRegistrySpecManifest from inMemoryManifestCache for the passed client.ObjectKey. +func (r *inMemoryManifestCache) Get(_ context.Context, key client.ObjectKey) (DockerRegistrySpecManifest, error) { + value, ok := r.processor.Load(key) + if !ok { + return emptySpecManifest, nil + } + + return *value.(*DockerRegistrySpecManifest), nil +} + +// Set saves the passed flags and manifest into inMemoryManifestCache for the client.ObjectKey. +func (r *inMemoryManifestCache) Set(_ context.Context, key client.ObjectKey, spec DockerRegistrySpecManifest) error { + r.processor.Store(key, &spec) + + return nil +} + +// Delete deletes flags and manifest from inMemoryManifestCache for the passed client.ObjectKey. +func (r *inMemoryManifestCache) Delete(_ context.Context, key client.ObjectKey) error { + r.processor.Delete(key) + return nil +} + +// secretManifestCache - provides a Secret based processor to store dockerregistry Spec and rendered chart manifest. +// +// Inside the secret we store manifest and flags used to render it. +type secretManifestCache struct { + client client.Client +} + +type DockerRegistrySpecManifest struct { + ManagerUID string + CustomFlags map[string]interface{} + Manifest string +} + +// NewSecretManifestCache - returns a new instance of SecretManifestCache. +func NewSecretManifestCache(client client.Client) *secretManifestCache { + return &secretManifestCache{ + client: client, + } +} + +// Delete - removes Secret cache based on the passed client.ObjectKey. +func (m *secretManifestCache) Delete(ctx context.Context, key client.ObjectKey) error { + err := m.client.Delete(ctx, &corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + }, + }) + + return client.IgnoreNotFound(err) +} + +// Get - loads the DockerRegistrySpecManifest from SecretManifestCache based on the passed client.ObjectKey. +func (m *secretManifestCache) Get(ctx context.Context, key client.ObjectKey) (DockerRegistrySpecManifest, error) { + secret := corev1.Secret{} + err := m.client.Get(ctx, key, &secret) + if errors.IsNotFound(err) { + return emptySpecManifest, nil + } + if err != nil { + return emptySpecManifest, err + } + + spec := DockerRegistrySpecManifest{} + err = json.Unmarshal(secret.Data["spec"], &spec) + if err != nil { + return emptySpecManifest, err + } + + return spec, nil +} + +// Set - saves the passed flags and manifest into Secret based on the client.ObjectKey. +func (m *secretManifestCache) Set(ctx context.Context, key client.ObjectKey, spec DockerRegistrySpecManifest) error { + byteSpec, err := json.Marshal(&spec) + if err != nil { + return err + } + + secret := corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + }, + Data: map[string][]byte{ + "spec": byteSpec, + }, + } + + err = m.client.Update(ctx, &secret) + if !errors.IsNotFound(err) { + return err + } + + return m.client.Create(ctx, &secret) +} diff --git a/components/operator/internal/chart/cache_test.go b/components/operator/internal/chart/cache_test.go new file mode 100644 index 00000000..7b182399 --- /dev/null +++ b/components/operator/internal/chart/cache_test.go @@ -0,0 +1,262 @@ +package chart + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + apiextensionsscheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +const testSecretNamespace = "kyma-system" + +func TestManifestCache_Delete(t *testing.T) { + t.Run("delete secret", func(t *testing.T) { + key := types.NamespacedName{ + Name: "test-name", + Namespace: testSecretNamespace, + } + ctx := context.TODO() + client := fake.NewClientBuilder().WithRuntimeObjects( + fixSecretCache(t, key, emptySpecManifest), + ).Build() + + cache := NewSecretManifestCache(client) + + err := cache.Delete(ctx, key) + require.NoError(t, err) + + var secret corev1.Secret + err = client.Get(ctx, key, &secret) + require.True(t, errors.IsNotFound(err), fmt.Sprintf("got error: %v", err)) + }) + + t.Run("delete error", func(t *testing.T) { + scheme := runtime.NewScheme() + // apiextensionscheme does not contains v1.Secret scheme + require.NoError(t, apiextensionsscheme.AddToScheme(scheme)) + + key := types.NamespacedName{ + Name: "test-name", + Namespace: testSecretNamespace, + } + ctx := context.TODO() + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + cache := NewSecretManifestCache(client) + + err := cache.Delete(ctx, key) + require.Error(t, err) + }) + + t.Run("do nothing when cache is not found", func(t *testing.T) { + key := types.NamespacedName{ + Name: "test-name", + Namespace: testSecretNamespace, + } + ctx := context.TODO() + client := fake.NewClientBuilder().Build() + + cache := NewSecretManifestCache(client) + + err := cache.Delete(ctx, key) + require.NoError(t, err) + }) +} + +func TestManifestCache_Get(t *testing.T) { + t.Run("get secret value", func(t *testing.T) { + key := types.NamespacedName{ + Name: "test-name", + Namespace: testSecretNamespace, + } + ctx := context.TODO() + client := fake.NewClientBuilder().WithRuntimeObjects( + fixSecretCache(t, key, DockerRegistrySpecManifest{ + CustomFlags: map[string]interface{}{ + "flag1": "val1", + "flag2": "val2", + }, + Manifest: "schmetterling", + }), + ).Build() + + cache := NewSecretManifestCache(client) + + result, err := cache.Get(ctx, key) + require.NoError(t, err) + + expectedResult := DockerRegistrySpecManifest{ + CustomFlags: map[string]interface{}{ + "flag1": "val1", + "flag2": "val2", + }, + Manifest: "schmetterling", + } + require.Equal(t, expectedResult, result) + }) + + t.Run("client error", func(t *testing.T) { + scheme := runtime.NewScheme() + // apiextensionscheme does not contains v1.Secret scheme + require.NoError(t, apiextensionsscheme.AddToScheme(scheme)) + + key := types.NamespacedName{ + Name: "test-name", + Namespace: testSecretNamespace, + } + ctx := context.TODO() + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + cache := NewSecretManifestCache(client) + + result, err := cache.Get(ctx, key) + require.Error(t, err) + require.Equal(t, emptySpecManifest, result) + }) + + t.Run("secret not found", func(t *testing.T) { + key := types.NamespacedName{ + Name: "test-name", + Namespace: testSecretNamespace, + } + ctx := context.TODO() + client := fake.NewClientBuilder().Build() + + cache := NewSecretManifestCache(client) + + result, err := cache.Get(ctx, key) + require.NoError(t, err) + require.Equal(t, emptySpecManifest, result) + }) + + t.Run("conversion error", func(t *testing.T) { + key := types.NamespacedName{ + Name: "test-name", + Namespace: testSecretNamespace, + } + ctx := context.TODO() + client := fake.NewClientBuilder().WithRuntimeObjects( + &corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + }, + Data: map[string][]byte{ + "spec": []byte("{UNEXPECTED}"), + }, + }).Build() + + cache := NewSecretManifestCache(client) + + result, err := cache.Get(ctx, key) + require.Error(t, err) + require.Equal(t, emptySpecManifest, result) + }) +} + +func TestManifestCache_Set(t *testing.T) { + t.Run("create secret", func(t *testing.T) { + key := types.NamespacedName{ + Name: "test-name", + Namespace: testSecretNamespace, + } + ctx := context.TODO() + client := fake.NewClientBuilder().Build() + + cache := NewSecretManifestCache(client) + expectedSpec := DockerRegistrySpecManifest{ + Manifest: "schmetterling", + CustomFlags: map[string]interface{}{ + "flag1": "val1", + "flag2": "val2", + }, + } + + err := cache.Set(ctx, key, expectedSpec) + require.NoError(t, err) + + var secret corev1.Secret + require.NoError(t, client.Get(ctx, key, &secret)) + + actualSpec := DockerRegistrySpecManifest{} + err = json.Unmarshal(secret.Data["spec"], &actualSpec) + require.NoError(t, err) + + require.Equal(t, expectedSpec, actualSpec) + }) + + t.Run("update secret", func(t *testing.T) { + key := types.NamespacedName{ + Name: "test-name", + Namespace: testSecretNamespace, + } + ctx := context.TODO() + client := fake.NewClientBuilder().WithRuntimeObjects( + fixSecretCache(t, key, emptySpecManifest), + ).Build() + + cache := NewSecretManifestCache(client) + expectedSpec := DockerRegistrySpecManifest{ + Manifest: "schmetterling", + CustomFlags: map[string]interface{}{ + "flag1": "val1", + "flag2": "val2", + }, + } + err := cache.Set(ctx, key, expectedSpec) + require.NoError(t, err) + + var secret corev1.Secret + require.NoError(t, client.Get(ctx, key, &secret)) + + actualSpec := DockerRegistrySpecManifest{} + err = json.Unmarshal(secret.Data["spec"], &actualSpec) + require.NoError(t, err) + + require.Equal(t, expectedSpec, actualSpec) + }) + + t.Run("marshal error", func(t *testing.T) { + key := types.NamespacedName{ + Name: "test-name", + Namespace: testSecretNamespace, + } + ctx := context.TODO() + client := fake.NewClientBuilder().Build() + wrongFlags := map[string]interface{}{ + "flag1": func() {}, + } + + cache := NewSecretManifestCache(client) + + err := cache.Set(ctx, key, DockerRegistrySpecManifest{ + Manifest: "", + CustomFlags: wrongFlags, + }) + require.Error(t, err) + }) +} + +func fixSecretCache(t *testing.T, key types.NamespacedName, spec DockerRegistrySpecManifest) *corev1.Secret { + byteSpec, err := json.Marshal(&spec) + require.NoError(t, err) + + return &corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + }, + Data: map[string][]byte{ + "spec": byteSpec, + }, + } +} diff --git a/components/operator/internal/chart/chart.go b/components/operator/internal/chart/chart.go new file mode 100644 index 00000000..330d4d92 --- /dev/null +++ b/components/operator/internal/chart/chart.go @@ -0,0 +1,143 @@ +package chart + +import ( + "context" + "fmt" + "io" + "reflect" + "strings" + + "go.uber.org/zap" + "gopkg.in/yaml.v3" + "helm.sh/helm/v3/pkg/action" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/kube" + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/storage" + "helm.sh/helm/v3/pkg/storage/driver" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Config struct { + Ctx context.Context + Log *zap.SugaredLogger + Cache ManifestCache + CacheKey types.NamespacedName + ManagerUID string + Cluster Cluster + Release Release +} + +type Release struct { + ChartPath string + Name string + Namespace string +} + +type Cluster struct { + Client client.Client + Config *rest.Config +} + +func parseManifest(manifest string) ([]unstructured.Unstructured, error) { + results := make([]unstructured.Unstructured, 0) + decoder := yaml.NewDecoder(strings.NewReader(manifest)) + + for { + var obj map[string]interface{} + err := decoder.Decode(&obj) + + if err == io.EOF { + break + } + + if err != nil { + return nil, err + } + + // no obj between separators + if len(obj) == 0 { + continue + } + + u := unstructured.Unstructured{Object: obj} + // some resources need to be applied first (before workloads) + // if this statement gets bigger then extract it to the separated place + if u.GetObjectKind().GroupVersionKind().Kind == "CustomResourceDefinition" || + u.GetObjectKind().GroupVersionKind().Kind == "PriorityClass" { + results = append([]unstructured.Unstructured{u}, results...) + continue + } + results = append(results, u) + } + + return results, nil +} + +func getCachedAndCurrentManifest(config *Config, customFlags map[string]interface{}, renderChartFunc func(config *Config, customFlags map[string]interface{}) (*release.Release, error)) (string, string, error) { + cachedSpecManifest, err := config.Cache.Get(config.Ctx, config.CacheKey) + if err != nil { + return "", "", fmt.Errorf("could not get manifest from cache : %s", err.Error()) + } + + if !shouldRenderAgain(cachedSpecManifest, config, customFlags) { + return cachedSpecManifest.Manifest, cachedSpecManifest.Manifest, nil + } + + currentRelease, err := renderChartFunc(config, customFlags) + if err != nil { + return cachedSpecManifest.Manifest, "", fmt.Errorf("could not render manifest : %s", err.Error()) + } + + return cachedSpecManifest.Manifest, currentRelease.Manifest, nil +} + +func shouldRenderAgain(cachedSpec DockerRegistrySpecManifest, config *Config, customFlags map[string]interface{}) bool { + // cachedSpec is up-to-date only if flags used to render and manager is the same one who rendered it before + equalFlags := reflect.DeepEqual(cachedSpec.CustomFlags, customFlags) + return !(cachedSpec.ManagerUID == config.ManagerUID && equalFlags) +} + +func renderChart(config *Config, customFlags map[string]interface{}) (*release.Release, error) { + chart, err := loader.Load(config.Release.ChartPath) + if err != nil { + return nil, fmt.Errorf("while loading chart from path '%s': %s", config.Release.ChartPath, err.Error()) + } + + installAction := newInstallAction(config) + + rel, err := installAction.Run(chart, customFlags) + if err != nil { + return nil, fmt.Errorf("while templating chart: %s", err.Error()) + } + + return rel, nil +} + +func newInstallAction(config *Config) *action.Install { + helmRESTGetter := &clientGetter{ + config: config.Cluster.Config, + } + + helmClient := kube.New(helmRESTGetter) + helmClient.Log = config.Log.Debugf + + actionConfig := new(action.Configuration) + actionConfig.KubeClient = helmClient + actionConfig.Log = helmClient.Log + + actionConfig.Releases = storage.Init(driver.NewMemory()) + actionConfig.RESTClientGetter = helmRESTGetter + + action := action.NewInstall(actionConfig) + action.ReleaseName = config.Release.Name + action.Namespace = config.Release.Namespace + action.Replace = true + action.IsUpgrade = true + action.DryRun = true + + return action +} diff --git a/components/operator/internal/chart/chart_test.go b/components/operator/internal/chart/chart_test.go new file mode 100644 index 00000000..0d155da6 --- /dev/null +++ b/components/operator/internal/chart/chart_test.go @@ -0,0 +1,94 @@ +package chart + +import ( + "context" + "testing" + + "helm.sh/helm/v3/pkg/release" + "k8s.io/apimachinery/pkg/types" +) + +func Test_getOrRenderManifestWithRenderer(t *testing.T) { + noCRDManifestKey := types.NamespacedName{ + Name: "no", Namespace: "crd", + } + + cache := NewInMemoryManifestCache() + _ = cache.Set(context.Background(), noCRDManifestKey, + DockerRegistrySpecManifest{Manifest: testDeploy}) + + type args struct { + config *Config + customFlags map[string]interface{} + renderChartFunc func(config *Config, customFlags map[string]interface{}) (*release.Release, error) + } + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "return manifest when flags and managerUID are not changed", + args: args{ + config: &Config{ + Ctx: context.Background(), + Cache: cache, + CacheKey: noCRDManifestKey, + }, + }, + want: testDeploy, + wantErr: false, + }, + { + name: "render manifest when flags are changed", + args: args{ + renderChartFunc: fixManifestRenderFunc("test-new-manifest"), + customFlags: map[string]interface{}{ + "flag1": "val1", + }, + config: &Config{ + Ctx: context.Background(), + Cache: cache, + CacheKey: noCRDManifestKey, + }, + }, + want: "test-new-manifest", + wantErr: false, + }, + { + name: "render manifest when managerUID is changed", + args: args{ + renderChartFunc: fixManifestRenderFunc("test-new-manifest-2"), + config: &Config{ + Ctx: context.Background(), + Cache: cache, + CacheKey: noCRDManifestKey, + ManagerUID: "new-UID", + }, + }, + want: "test-new-manifest-2", + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, gotCurrent, err := getCachedAndCurrentManifest(tt.args.config, tt.args.customFlags, tt.args.renderChartFunc) + if (err != nil) != tt.wantErr { + t.Errorf("getCachedAndCurrentManifest() error = %v, wantErr %v", err, tt.wantErr) + return + } + if gotCurrent != tt.want { + t.Errorf("getCachedAndCurrentManifest() = %v, want %v", gotCurrent, tt.want) + } + }) + } +} + +func fixManifestRenderFunc(manifest string) func(config *Config, customFlags map[string]interface{}) (*release.Release, error) { + return func(config *Config, customFlags map[string]interface{}) (*release.Release, error) { + return &release.Release{ + Manifest: manifest, + }, nil + } +} diff --git a/components/operator/internal/chart/check.go b/components/operator/internal/chart/check.go new file mode 100644 index 00000000..77436174 --- /dev/null +++ b/components/operator/internal/chart/check.go @@ -0,0 +1,94 @@ +package chart + +import ( + "fmt" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func CheckCRDOrphanResources(config *Config) error { + spec, err := config.Cache.Get(config.Ctx, config.CacheKey) + if err != nil { + return fmt.Errorf("could not render manifest from chart: %s", err.Error()) + } + + objs, err := parseManifest(spec.Manifest) + if err != nil { + return fmt.Errorf("could not parse chart manifest: %s", err.Error()) + } + + for _, obj := range objs { + // continue if obj is not crd + if !isCRD(obj) { + continue + } + + // check if crd exist on the cluster + objCopy := unstructured.Unstructured{Object: obj.Object} + err := config.Cluster.Client.Get(config.Ctx, types.NamespacedName{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + }, &objCopy) + if errors.IsNotFound(err) { + continue + } + if err != nil { + return err + } + + // check if CRs exist on the cluster + crList, err := buildResourceListFromCRD(obj) + if err != nil { + return err + } + + err = config.Cluster.Client.List(config.Ctx, &crList) + if client.IgnoreNotFound(err) != nil { + return err + } + + if len(crList.Items) > 0 { + return fmt.Errorf("found %d items with VersionKind %s", len(crList.Items), crList.GetAPIVersion()) + } + } + + return nil +} + +func isCRD(u unstructured.Unstructured) bool { + return u.GroupVersionKind().GroupKind() == apiextensionsv1.Kind("CustomResourceDefinition") +} + +func buildResourceListFromCRD(u unstructured.Unstructured) (unstructured.UnstructuredList, error) { + crd := apiextensionsv1.CustomResourceDefinition{} + crdList := unstructured.UnstructuredList{} + + err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, &crd) + if err != nil { + return crdList, err + } + + crdList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: crd.Spec.Group, + Version: getCRDStoredVersion(crd), + Kind: crd.Spec.Names.Kind, + }) + + return crdList, nil +} + +func getCRDStoredVersion(crd apiextensionsv1.CustomResourceDefinition) string { + for _, version := range crd.Spec.Versions { + if version.Storage { + return version.Name + } + } + + return "" +} diff --git a/components/operator/internal/chart/check_test.go b/components/operator/internal/chart/check_test.go new file mode 100644 index 00000000..6b0e10d9 --- /dev/null +++ b/components/operator/internal/chart/check_test.go @@ -0,0 +1,179 @@ +package chart + +import ( + "context" + "fmt" + "github.com/stretchr/testify/require" + "testing" + + apiextensionsscheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +const ( + testOrphanCR = ` +apiVersion: test.group/v1alpha2 +kind: TestKind +metadata: + name: test-deploy + namespace: default +` +) + +var ( + testOrphanObj = unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "test.group/v1alpha2", + "kind": "TestKind", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "namespace", + }, + }, + } +) + +func TestCheckCRDOrphanResources(t *testing.T) { + noCRDManifestKey := types.NamespacedName{ + Name: "no", Namespace: "crd", + } + noOrphanManifestKey := types.NamespacedName{ + Name: "no", Namespace: "orphan", + } + oneOrphanManifestKey := types.NamespacedName{ + Name: "one", Namespace: "orphan", + } + emptyManifestKey := types.NamespacedName{ + Name: "empty", Namespace: "manifest", + } + wrongManifestKey := types.NamespacedName{ + Name: "wrong", Namespace: "manifest", + } + + cache := NewInMemoryManifestCache() + _ = cache.Set(context.Background(), noCRDManifestKey, + DockerRegistrySpecManifest{Manifest: fmt.Sprint(testDeploy)}) + _ = cache.Set(context.Background(), noOrphanManifestKey, + DockerRegistrySpecManifest{Manifest: fmt.Sprint(testCRD, separator, testDeploy)}) + _ = cache.Set(context.Background(), oneOrphanManifestKey, + DockerRegistrySpecManifest{Manifest: fmt.Sprint(testCRD, separator, testOrphanCR)}) + _ = cache.Set(context.Background(), emptyManifestKey, + DockerRegistrySpecManifest{Manifest: ""}) + _ = cache.Set(context.Background(), wrongManifestKey, + DockerRegistrySpecManifest{Manifest: "api: test\n\tversion: test"}) + + type args struct { + config *Config + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "empty manifest", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: emptyManifestKey, + }, + }, + wantErr: false, + }, + { + name: "parse manifest error", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: wrongManifestKey, + }, + }, + wantErr: true, + }, + { + name: "no CRDs in manifest", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: noCRDManifestKey, + }, + }, + wantErr: false, + }, + { + name: "no orphan for CRD", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: noOrphanManifestKey, + Ctx: context.Background(), + Cluster: Cluster{ + Client: fake.NewClientBuilder(). + WithScheme(apiextensionsscheme.Scheme). + WithObjects(testCRDObj). + Build(), + }, + }, + }, + wantErr: false, + }, + { + name: "one orphan for CRD", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: oneOrphanManifestKey, + Ctx: context.Background(), + Cluster: Cluster{ + Client: func() client.Client { + scheme := runtime.NewScheme() + scheme.AddKnownTypes(schema.GroupVersion{ + Group: "test.group", + Version: "v1alpha2", + }, &testOrphanObj) + require.NoError(t, apiextensionsscheme.AddToScheme(scheme)) + c := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(&testOrphanObj). + WithObjects(testCRDObj). + Build() + return c + }(), + }, + }, + }, + wantErr: true, + }, + { + name: "missing CRD on cluster", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: oneOrphanManifestKey, + Ctx: context.Background(), + Cluster: Cluster{ + Client: func() client.Client { + scheme := runtime.NewScheme() + require.NoError(t, apiextensionsscheme.AddToScheme(scheme)) + c := fake.NewClientBuilder().WithScheme(scheme).Build() + return c + }(), + }, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := CheckCRDOrphanResources(tt.args.config); (err != nil) != tt.wantErr { + t.Errorf("CheckCRDOrphanResources() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/components/operator/internal/chart/client_getter.go b/components/operator/internal/chart/client_getter.go new file mode 100644 index 00000000..969ae132 --- /dev/null +++ b/components/operator/internal/chart/client_getter.go @@ -0,0 +1,44 @@ +package chart + +import ( + "helm.sh/helm/v3/pkg/action" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/client-go/discovery" + "k8s.io/client-go/discovery/cached/memory" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + "k8s.io/client-go/tools/clientcmd" +) + +var _ action.RESTClientGetter = &clientGetter{} + +type clientGetter struct { + config *rest.Config +} + +func (cg *clientGetter) ToRESTConfig() (*rest.Config, error) { + return cg.config, nil +} + +func (cg *clientGetter) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { + discoveryClient, _ := discovery.NewDiscoveryClientForConfig(cg.config) + return memory.NewMemCacheClient(discoveryClient), nil +} + +func (cg *clientGetter) ToRESTMapper() (meta.RESTMapper, error) { + discoveryClient, err := cg.ToDiscoveryClient() + if err != nil { + return nil, err + } + + mapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient) + expander := restmapper.NewShortcutExpander(mapper, discoveryClient, nil) + return expander, nil +} + +func (cg *clientGetter) ToRawKubeConfigLoader() clientcmd.ClientConfig { + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig + overrides := &clientcmd.ConfigOverrides{ClusterDefaults: clientcmd.ClusterDefaults} + return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides) +} diff --git a/components/operator/internal/chart/flags.go b/components/operator/internal/chart/flags.go new file mode 100644 index 00000000..02b6d9bc --- /dev/null +++ b/components/operator/internal/chart/flags.go @@ -0,0 +1,94 @@ +package chart + +import ( + "fmt" + "strings" +) + +type FlagsBuilder interface { + Build() map[string]interface{} + WithControllerConfiguration(healthzLivenessTimeout string) *flagsBuilder + WithRegistryCredentials(username string, password string) *flagsBuilder + WithRegistryHttpSecret(httpSecret string) *flagsBuilder + WithNodePort(nodePort int64) *flagsBuilder +} + +type flagsBuilder struct { + flags map[string]interface{} +} + +func NewFlagsBuilder() FlagsBuilder { + return &flagsBuilder{ + flags: map[string]interface{}{}, + } +} + +func (fb *flagsBuilder) Build() map[string]interface{} { + flags := map[string]interface{}{} + for key, value := range fb.flags { + flagPath := strings.Split(key, ".") + appendFlag(flags, flagPath, value) + } + return flags +} + +func appendFlag(flags map[string]interface{}, flagPath []string, value interface{}) { + currentFlag := flags + for i, pathPart := range flagPath { + createIfEmpty(currentFlag, pathPart) + if lastElement(flagPath, i) { + currentFlag[pathPart] = value + } else { + currentFlag = nextDeeperFlag(currentFlag, pathPart) + } + } +} + +func createIfEmpty(flags map[string]interface{}, key string) { + if _, ok := flags[key]; !ok { + flags[key] = map[string]interface{}{} + } +} + +func lastElement(values []string, i int) bool { + return i == len(values)-1 +} + +func nextDeeperFlag(currentFlag map[string]interface{}, path string) map[string]interface{} { + return currentFlag[path].(map[string]interface{}) +} + +func (fb *flagsBuilder) WithControllerConfiguration(healthzLivenessTimeout string) *flagsBuilder { + optionalFlags := []struct { + key string + value string + }{ + {"healthzLivenessTimeout", healthzLivenessTimeout}, + } + + for _, flag := range optionalFlags { + if flag.value != "" { + fullPath := fmt.Sprintf("containers.manager.configuration.data.%s", flag.key) + fb.flags[fullPath] = flag.value + } + } + + return fb +} + +func (fb *flagsBuilder) WithRegistryCredentials(username, password string) *flagsBuilder { + fb.flags["dockerRegistry.username"] = username + fb.flags["dockerRegistry.password"] = password + return fb +} + +func (fb *flagsBuilder) WithRegistryHttpSecret(httpSecret string) *flagsBuilder { + fb.flags["docker-registry.rollme"] = "dontrollplease" + fb.flags["docker-registry.registryHTTPSecret"] = httpSecret + return fb +} + +func (fb *flagsBuilder) WithNodePort(nodePort int64) *flagsBuilder { + fb.flags["global.registryNodePort"] = nodePort + return fb +} diff --git a/components/operator/internal/chart/flags_test.go b/components/operator/internal/chart/flags_test.go new file mode 100644 index 00000000..bb8f5599 --- /dev/null +++ b/components/operator/internal/chart/flags_test.go @@ -0,0 +1,85 @@ +package chart + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_flagsBuilder_Build(t *testing.T) { + t.Run("build empty flags", func(t *testing.T) { + flags := NewFlagsBuilder().Build() + require.Equal(t, map[string]interface{}{}, flags) + }) + + t.Run("build flags", func(t *testing.T) { + expectedFlags := map[string]interface{}{ + "containers": map[string]interface{}{ + "manager": map[string]interface{}{ + "configuration": map[string]interface{}{ + "data": map[string]interface{}{ + "healthzLivenessTimeout": "testHealthzLivenessTimeout", + }, + }, + }, + }, + "docker-registry": map[string]interface{}{ + "registryHTTPSecret": "testHttpSecret", + "rollme": "dontrollplease", + }, + "dockerRegistry": map[string]interface{}{ + "password": "testPassword", + "username": "testUsername", + }, + "global": map[string]interface{}{ + "registryNodePort": int64(1234), + }, + } + + flags := NewFlagsBuilder(). + WithNodePort(1234). + WithRegistryCredentials("testUsername", "testPassword"). + WithRegistryHttpSecret("testHttpSecret"). + WithControllerConfiguration( + "testHealthzLivenessTimeout", + ).Build() + + require.Equal(t, expectedFlags, flags) + }) + + t.Run("build registry flags only", func(t *testing.T) { + expectedFlags := map[string]interface{}{ + "dockerRegistry": map[string]interface{}{ + "password": "testPassword", + "username": "testUsername", + }, + } + + flags := NewFlagsBuilder(). + WithRegistryCredentials("testUsername", "testPassword"). + Build() + + require.Equal(t, expectedFlags, flags) + }) + + t.Run("build not empty controller configuration flags only", func(t *testing.T) { + expectedFlags := map[string]interface{}{ + "containers": map[string]interface{}{ + "manager": map[string]interface{}{ + "configuration": map[string]interface{}{ + "data": map[string]interface{}{ + "healthzLivenessTimeout": "testHealthzLivenessTimeout", + }, + }, + }, + }, + } + + flags := NewFlagsBuilder(). + WithControllerConfiguration( + "testHealthzLivenessTimeout", + ).Build() + + require.Equal(t, expectedFlags, flags) + }) +} diff --git a/components/operator/internal/chart/install.go b/components/operator/internal/chart/install.go new file mode 100644 index 00000000..ee396dd4 --- /dev/null +++ b/components/operator/internal/chart/install.go @@ -0,0 +1,102 @@ +package chart + +import ( + "fmt" + + "github.com/kyma-project/docker-registry/components/operator/internal/annotation" + "github.com/pkg/errors" + "helm.sh/helm/v3/pkg/release" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func Install(config *Config, customFlags map[string]interface{}) error { + return install(config, customFlags, renderChart) +} + +func install(config *Config, customFlags map[string]interface{}, renderChartFunc func(config *Config, customFlags map[string]interface{}) (*release.Release, error)) error { + cachedManifest, currentManifest, err := getCachedAndCurrentManifest(config, customFlags, renderChartFunc) + if err != nil { + return err + } + + objs, unusedObjs, err := getObjectsToInstallAndRemove(cachedManifest, currentManifest) + if err != nil { + return err + } + + err = updateObjects(config, objs) + if err != nil { + return err + } + + err = uninstallObjects(config, unusedObjs) + if err != nil { + return err + } + + return config.Cache.Set(config.Ctx, config.CacheKey, DockerRegistrySpecManifest{ + ManagerUID: config.ManagerUID, + CustomFlags: customFlags, + Manifest: currentManifest, + }) +} + +func getObjectsToInstallAndRemove(cachedManifest string, currentManifest string) ([]unstructured.Unstructured, []unstructured.Unstructured, error) { + objs, err := parseManifest(currentManifest) + if err != nil { + return nil, nil, fmt.Errorf("could not parse chart manifest: %s", err.Error()) + } + + oldObjs, err := parseManifest(cachedManifest) + if err != nil { + return nil, nil, fmt.Errorf("could not parse chart manifest: %s", err.Error()) + } + + unusedObjs := unusedOldObjects(oldObjs, objs) + return objs, unusedObjs, nil +} + +func updateObjects(config *Config, objs []unstructured.Unstructured) error { + for i := range objs { + u := objs[i] + config.Log.Debugf("creating %s %s/%s", u.GetKind(), u.GetNamespace(), u.GetName()) + + u = annotation.AddDoNotEditDisclaimer(u) + if IsPVC(u.GroupVersionKind()) { + modifiedObj, err := AdjustDockerRegToClusterPVCSize(config.Ctx, config.Cluster.Client, u) + if err != nil { + return errors.Wrap(err, "while adjusting pvc size") + } + u = modifiedObj + } + + // TODO: what if Path returns error in the middle of manifest? + // maybe we should in this case translate applied objs into manifest and set it into cache? + err := config.Cluster.Client.Patch(config.Ctx, &u, client.Apply, &client.PatchOptions{ + Force: ptr.To[bool](true), + FieldManager: "dockerregistry-operator", + }) + if err != nil { + return fmt.Errorf("could not install object %s/%s: %s", u.GetNamespace(), u.GetName(), err.Error()) + } + } + return nil +} + +func unusedOldObjects(previousObjs []unstructured.Unstructured, currentObjs []unstructured.Unstructured) []unstructured.Unstructured { + currentNames := make(map[string]struct{}, len(currentObjs)) + for _, obj := range currentObjs { + objFullName := fmt.Sprintf("%s/%s/%s", obj.GetKind(), obj.GetNamespace(), obj.GetName()) + currentNames[objFullName] = struct{}{} + } + result := []unstructured.Unstructured{} + for _, obj := range previousObjs { + objFullName := fmt.Sprintf("%s/%s/%s", obj.GetKind(), obj.GetNamespace(), obj.GetName()) + if _, found := currentNames[objFullName]; !found { + result = append(result, obj) + } + } + return result +} diff --git a/components/operator/internal/chart/install_test.go b/components/operator/internal/chart/install_test.go new file mode 100644 index 00000000..377def8a --- /dev/null +++ b/components/operator/internal/chart/install_test.go @@ -0,0 +1,247 @@ +package chart + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsscheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +const ( + separator = `---` + testCRD = ` +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: test-crd +spec: + group: test.group + names: + kind: TestKind + versions: + - storage: false + name: v1alpha1 + - storage: true + name: v1alpha2 +` + testDeploy = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-deploy + namespace: default +` + testServiceAccount = ` +apiVersion: v1 +kind: ServiceAccount +metadata: + name: test-service-account + namespace: test-namespace + labels: + label-key: 'label-val' +` +) + +var ( + testDeployCR = &appsv1.Deployment{ + ObjectMeta: v1.ObjectMeta{ + Name: "test-deploy", + Namespace: "default", + }, + Status: appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{ + { + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionStatus(v1.ConditionTrue), + }, + }, + }, + } + testCRDObj = &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: v1.ObjectMeta{ + Name: "test-crd", + }, + } +) + +func Test_install_delete(t *testing.T) { + t.Run("should delete all unused resources", func(t *testing.T) { + testManifestKey := types.NamespacedName{ + Name: "test", Namespace: "testnamespace", + } + cache := NewInMemoryManifestCache() + _ = cache.Set(context.Background(), testManifestKey, + DockerRegistrySpecManifest{Manifest: fmt.Sprint(testCRD, separator, testDeploy)}) + client := fake.NewClientBuilder().WithObjects(testDeployCR).WithObjects(testCRDObj).Build() + customFlags := map[string]interface{}{ + "flag1": "val1", + } + config := &Config{ + Cache: cache, + CacheKey: testManifestKey, + Cluster: Cluster{ + Client: client, + }, + Log: zap.NewNop().Sugar(), + } + err := install(config, customFlags, fixManifestRenderFunc("")) + require.NoError(t, err) + + deploymentList := appsv1.DeploymentList{} + err = client.List(context.Background(), &deploymentList) + require.NoError(t, err) + require.Empty(t, deploymentList.Items) + + crdList := apiextensionsv1.CustomResourceDefinitionList{} + err = client.List(context.Background(), &crdList) + require.NoError(t, err) + require.Empty(t, crdList.Items) + }) +} + +func Test_install(t *testing.T) { + log := zap.NewNop().Sugar() + + testManifestKey := types.NamespacedName{ + Name: "test", Namespace: "testnamespace", + } + emptyManifestKey := types.NamespacedName{ + Name: "empty", Namespace: "manifest", + } + wrongManifestKey := types.NamespacedName{ + Name: "wrong", Namespace: "manifest", + } + + cache := NewInMemoryManifestCache() + _ = cache.Set(context.Background(), testManifestKey, + DockerRegistrySpecManifest{Manifest: fmt.Sprint(testCRD, separator, testDeploy)}) + _ = cache.Set(context.Background(), emptyManifestKey, + DockerRegistrySpecManifest{Manifest: ""}) + _ = cache.Set(context.Background(), wrongManifestKey, + DockerRegistrySpecManifest{Manifest: "api: test\n\tversion: test"}) + + type args struct { + config *Config + customFlags map[string]interface{} + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "empty manifest", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: emptyManifestKey, + }, + }, + wantErr: false, + }, + { + name: "parse manifest error", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: wrongManifestKey, + }, + }, + wantErr: true, + }, + { + name: "installation error", + args: args{ + config: &Config{ + Ctx: context.Background(), + Log: log, + Cache: cache, + CacheKey: testManifestKey, + Cluster: Cluster{ + Client: fake.NewClientBuilder().WithScheme(apiextensionsscheme.Scheme).Build(), + }, + }, + }, + wantErr: true, + }, + // we can't simply test succeded installation here because it uses + // tha Patch method which is not fully supported by the fake client. This case is tested in controllers pkg + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := Install(tt.args.config, tt.args.customFlags); (err != nil) != tt.wantErr { + t.Errorf("install() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func Test_unusedOldObjects(t *testing.T) { + firstManifest := fmt.Sprint(testCRD, separator, testDeploy) + firstObjs, _ := parseManifest(firstManifest) + differentManifest := fmt.Sprint(testServiceAccount) + differentObjs, _ := parseManifest(differentManifest) + withCommonPartManifest := fmt.Sprint(testServiceAccount, separator, testDeploy) + withCommonPartObjs, _ := parseManifest(withCommonPartManifest) + firstWithoutCommonPartManifest := fmt.Sprint(testCRD) + firstWithoutCommonPartObjs, _ := parseManifest(firstWithoutCommonPartManifest) + + type args struct { + old []unstructured.Unstructured + new []unstructured.Unstructured + } + tests := []struct { + name string + args args + want []unstructured.Unstructured + }{ + { + name: "empty minus empty should be empty", + args: args{ + old: []unstructured.Unstructured{}, + new: []unstructured.Unstructured{}, + }, + want: []unstructured.Unstructured{}, + }, + { + name: "list minus empty should return the same list", + args: args{ + old: firstObjs, + new: []unstructured.Unstructured{}, + }, + want: firstObjs, + }, + { + name: "list minus list with different elements should return first list", + args: args{ + old: firstObjs, + new: differentObjs, + }, + want: firstObjs, + }, + { + name: "list minus list with common part should return first list without common part", + args: args{ + old: firstObjs, + new: withCommonPartObjs, + }, + want: firstWithoutCommonPartObjs, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := unusedOldObjects(tt.args.old, tt.args.new) + require.Equal(t, tt.want, got) + }) + } +} diff --git a/components/operator/internal/chart/pvc.go b/components/operator/internal/chart/pvc.go new file mode 100644 index 00000000..f178c9d8 --- /dev/null +++ b/components/operator/internal/chart/pvc.go @@ -0,0 +1,64 @@ +package chart + +import ( + "context" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + dockerRegistryPVCName = "internal-docker-registry" + pvcKind = "PersistentVolumeClaim" + pvcVersion = "v1" + pvcGroup = "" +) + +func AdjustDockerRegToClusterPVCSize(ctx context.Context, c client.Client, obj unstructured.Unstructured) (unstructured.Unstructured, error) { + if obj.GetName() != dockerRegistryPVCName { + return obj, nil + } + clusterPVC := corev1.PersistentVolumeClaim{} + objKey := client.ObjectKey{ + Namespace: obj.GetNamespace(), + Name: obj.GetName(), + } + if err := c.Get(ctx, objKey, &clusterPVC); err != nil { + if k8serrors.IsNotFound(err) { + return obj, nil + } + return obj, errors.Wrap(err, "while getting pvc from cluster") + } + objPVC := corev1.PersistentVolumeClaim{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &objPVC); err != nil { + return obj, errors.Wrap(err, "while converting unstructured to pvc") + } + storage := clusterPVC.Spec.Resources.Requests.Storage() + if storage.Equal(*objPVC.Spec.Resources.Requests.Storage()) { + return obj, nil + } + objPVCcopy := objPVC.DeepCopy() + objPVCcopy.Spec.Resources.Requests[corev1.ResourceStorage] = *clusterPVC.Spec.Resources.Requests.Storage() + + out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(objPVCcopy) + if err != nil { + return obj, errors.Wrap(err, "while converting copied pvc object to unstructured") + } + + return unstructured.Unstructured{Object: out}, nil +} + +func IsPVC(objKind schema.GroupVersionKind) bool { + expected := schema.GroupVersionKind{ + Group: pvcGroup, + Version: pvcVersion, + Kind: pvcKind, + } + + return expected.Group == objKind.Group && expected.Kind == objKind.Kind && expected.Version == objKind.Version +} diff --git a/components/operator/internal/chart/pvc_test.go b/components/operator/internal/chart/pvc_test.go new file mode 100644 index 00000000..526d5a53 --- /dev/null +++ b/components/operator/internal/chart/pvc_test.go @@ -0,0 +1,133 @@ +package chart + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestAdjustToClusterSize(t *testing.T) { + testCases := map[string]struct { + rawPVCToInstall *corev1.PersistentVolumeClaim + clusterPVC []client.Object + expectedPVC *corev1.PersistentVolumeClaim + }{ + "pvc not exists in cluster": { + rawPVCToInstall: fixPVC(dockerRegistryPVCName, 20), + expectedPVC: fixPVC(dockerRegistryPVCName, 20), + }, + "pvc is not docker registry": { + rawPVCToInstall: fixPVC("random-pvc", 20), + expectedPVC: fixPVC("random-pvc", 20), + }, + "pvc exists with the same size": { + rawPVCToInstall: fixPVC(dockerRegistryPVCName, 20), + clusterPVC: []client.Object{fixPVC(dockerRegistryPVCName, 20)}, + expectedPVC: fixPVC(dockerRegistryPVCName, 20), + }, + "pvc exists with bigger size": { + rawPVCToInstall: fixPVC(dockerRegistryPVCName, 20), + clusterPVC: []client.Object{fixPVC(dockerRegistryPVCName, 30)}, + expectedPVC: fixPVC(dockerRegistryPVCName, 30), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + //GIVEN + out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(testCase.rawPVCToInstall) + require.NoError(t, err) + obj := unstructured.Unstructured{Object: out} + + c := fake.NewClientBuilder().WithObjects(testCase.clusterPVC...).Build() + + //WHEN + finalObj, err := AdjustDockerRegToClusterPVCSize(context.TODO(), c, obj) + + //THEN + require.NoError(t, err) + + expected, err := runtime.DefaultUnstructuredConverter.ToUnstructured(testCase.expectedPVC) + + require.NoError(t, err) + require.EqualValues(t, expected, finalObj.Object) + }) + } +} + +func fixPVC(name string, size int) *corev1.PersistentVolumeClaim { + return &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "kyma-system", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse(fmt.Sprintf("%dGi", size)), + }, + }, + }, + } +} + +func TestIsPVC(t *testing.T) { + testCases := map[string]struct { + input schema.GroupVersionKind + expected bool + }{ + "Equal": { + input: schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PersistentVolumeClaim", + }, + expected: true, + }, + "Different kind": { + input: schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }, + expected: false, + }, + "Different version": { + input: schema.GroupVersionKind{ + Group: "", + Version: "v2alpha1", + Kind: "PersistentVolumeClaim", + }, + expected: false, + }, + "Different group": { + input: schema.GroupVersionKind{ + Group: "networking", + Version: "v1", + Kind: "NetworkPolicy", + }, + expected: false, + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + //GIVEN + + //WHEN + equal := IsPVC(testCase.input) + //THEN + require.Equal(t, testCase.expected, equal) + }) + } +} diff --git a/components/operator/internal/chart/uninstall.go b/components/operator/internal/chart/uninstall.go new file mode 100644 index 00000000..270694df --- /dev/null +++ b/components/operator/internal/chart/uninstall.go @@ -0,0 +1,160 @@ +package chart + +import ( + "fmt" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type FilterFunc func(unstructured.Unstructured) bool + +func Uninstall(config *Config, filterFunc ...FilterFunc) error { + spec, err := config.Cache.Get(config.Ctx, config.CacheKey) + if err != nil { + return fmt.Errorf("could not render manifest from chart: %s", err.Error()) + } + + objs, err := parseManifest(spec.Manifest) + if err != nil { + return fmt.Errorf("could not parse chart manifest: %s", err.Error()) + } + + err2 := uninstallObjects(config, objs, filterFunc...) + if err2 != nil { + return err2 + } + + err3 := uninstallOrphanedResources(config) + if err3 != nil { + return err3 + } + + return config.Cache.Delete(config.Ctx, config.CacheKey) +} + +func uninstallObjects(config *Config, objs []unstructured.Unstructured, filterFunc ...FilterFunc) error { + for i := range objs { + u := objs[i] + if !fitToFilters(u, filterFunc...) { + continue + } + + config.Log.Debugf("deleting %s %s", u.GetKind(), u.GetName()) + err := config.Cluster.Client.Delete(config.Ctx, &u) + if k8serrors.IsNotFound(err) { + config.Log.Debugf("deletion skipped for %s %s", u.GetKind(), u.GetName()) + continue + } + if err != nil { + return fmt.Errorf("could not uninstall object %s/%s: %s", u.GetNamespace(), u.GetName(), err.Error()) + } + } + return nil +} + +func UninstallSecrets(config *Config, filterFunc ...FilterFunc) (error, bool) { + spec, err := config.Cache.Get(config.Ctx, config.CacheKey) + if err != nil { + return fmt.Errorf("could not render manifest from chart: %s", err.Error()), false + } + + objs, err := parseManifest(spec.Manifest) + if err != nil { + return fmt.Errorf("could not parse chart manifest: %s", err.Error()), false + } + + err2, done := uninstallSecrets(config, objs, filterFunc...) + if err2 != nil { + return err2, false + } + + return nil, done +} + +func uninstallSecrets(config *Config, objs []unstructured.Unstructured, filterFunc ...FilterFunc) (error, bool) { + done := true + for i := range objs { + u := objs[i] + if !fitToFilters(u, filterFunc...) { + continue + } + if u.GetKind() != "Secret" { + continue + } + + config.Log.Debugf("deleting %s %s", u.GetKind(), u.GetName()) + err := config.Cluster.Client.Delete(config.Ctx, &u) + if k8serrors.IsNotFound(err) { + config.Log.Debugf("deletion skipped for %s %s", u.GetKind(), u.GetName()) + continue + } + if err != nil { + return fmt.Errorf("could not uninstall object %s/%s: %s", u.GetNamespace(), u.GetName(), err.Error()), false + } + done = false + } + return nil, done +} + +func WithoutCRDFilter(u unstructured.Unstructured) bool { + return !isCRD(u) +} + +func fitToFilters(u unstructured.Unstructured, filterFunc ...FilterFunc) bool { + for _, fn := range filterFunc { + if !fn(u) { + return false + } + } + + return true +} + +func uninstallOrphanedResources(config *Config) error { + //TODO: move this to finalizers logic in controller + var namespaces corev1.NamespaceList + if err := config.Cluster.Client.List(config.Ctx, &namespaces); err != nil { + return errors.Wrap(err, "couldn't get namespaces during Docker Registry uninstallation") + } + + if err := uninstallOrphanedConfigMaps(config, namespaces); err != nil { + return err + } + if err := uninstallOrphanedServiceAccounts(config, namespaces); err != nil { + return err + } + + return nil +} + +func uninstallOrphanedServiceAccounts(config *Config, namespaces corev1.NamespaceList) error { + for _, namespace := range namespaces.Items { + err := config.Cluster.Client.DeleteAllOf(config.Ctx, &corev1.ServiceAccount{}, + client.InNamespace(namespace.GetName()), + client.MatchingLabels{"dockerregistry.kyma-project.io/config": "service-account"}) + if err != nil { + return errors.Wrapf(err, + "couldn't delete ServiceAccount from namespace \"%s\" during DockerRegistry uninstallation", + namespace.GetName()) + } + } + return nil +} + +func uninstallOrphanedConfigMaps(config *Config, namespaces corev1.NamespaceList) error { + for _, namespace := range namespaces.Items { + err := config.Cluster.Client.DeleteAllOf(config.Ctx, &corev1.ConfigMap{}, + client.InNamespace(namespace.GetName()), + client.MatchingLabels{"dockerregistry.kyma-project.io/config": "runtime"}) + if err != nil { + return errors.Wrapf(err, + "couldn't delete ConfigMap from namespace \"%s\" during Docker Registry uninstallation", + namespace.GetName()) + } + } + return nil +} diff --git a/components/operator/internal/chart/uninstall_test.go b/components/operator/internal/chart/uninstall_test.go new file mode 100644 index 00000000..17b3c827 --- /dev/null +++ b/components/operator/internal/chart/uninstall_test.go @@ -0,0 +1,99 @@ +package chart + +import ( + "context" + "fmt" + "testing" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func Test_Uninstall(t *testing.T) { + log := zap.NewNop().Sugar() + + testManifestKey := types.NamespacedName{ + Name: "test", Namespace: "testnamespace", + } + emptyManifestKey := types.NamespacedName{ + Name: "empty", Namespace: "manifest", + } + wrongManifestKey := types.NamespacedName{ + Name: "wrong", Namespace: "manifest", + } + + cache := NewInMemoryManifestCache() + _ = cache.Set(context.Background(), testManifestKey, + DockerRegistrySpecManifest{Manifest: fmt.Sprint(testCRD, separator, testDeploy)}) + _ = cache.Set(context.Background(), emptyManifestKey, + DockerRegistrySpecManifest{Manifest: ""}) + _ = cache.Set(context.Background(), wrongManifestKey, + DockerRegistrySpecManifest{Manifest: "api: test\n\tversion: test"}) + + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace"}} + + type args struct { + config *Config + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "empty manifest", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: emptyManifestKey, + Cluster: Cluster{ + Client: fake.NewClientBuilder(). + WithScheme(scheme.Scheme). + WithObjects(&ns). + Build(), + }, + }, + }, + wantErr: false, + }, + { + name: "parse manifest error", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: wrongManifestKey, + }, + }, + wantErr: true, + }, + { + name: "installation error", + args: args{ + config: &Config{ + Ctx: context.Background(), + Log: log, + Cache: cache, + CacheKey: testManifestKey, + Cluster: Cluster{ + Client: fake.NewClientBuilder(). + WithScheme(scheme.Scheme). + WithObjects(&ns). + Build(), + }, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := Uninstall(tt.args.config); (err != nil) != tt.wantErr { + t.Errorf("uninstall() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/components/operator/internal/chart/verify.go b/components/operator/internal/chart/verify.go new file mode 100644 index 00000000..bf92f7c4 --- /dev/null +++ b/components/operator/internal/chart/verify.go @@ -0,0 +1,72 @@ +package chart + +import ( + "fmt" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" +) + +func Verify(config *Config) (bool, error) { + spec, err := config.Cache.Get(config.Ctx, config.CacheKey) + if err != nil { + return false, fmt.Errorf("could not render manifest from chart: %s", err.Error()) + } + // sometimes cache is not created yet + if len(spec.Manifest) == 0 { + return false, nil + } + + objs, err := parseManifest(spec.Manifest) + if err != nil { + return false, fmt.Errorf("could not parse chart manifest: %s", err.Error()) + } + + for i := range objs { + u := objs[i] + + var verifyFunc verifyFunc + switch u.GetKind() { + case "Deployment": + verifyFunc = verifyDeployment + case "DaemonSet": + // TODO: right now we don't support internal docker registry + default: + continue + } + + ready, err := verifyFunc(config, u) + if err != nil { + return false, fmt.Errorf("could not verify object %s/%s: %s", u.GetNamespace(), u.GetName(), err.Error()) + } + + if !ready { + return false, nil + } + } + + return true, nil +} + +type verifyFunc func(*Config, unstructured.Unstructured) (bool, error) + +func verifyDeployment(config *Config, u unstructured.Unstructured) (bool, error) { + var deployment appsv1.Deployment + err := config.Cluster.Client.Get(config.Ctx, types.NamespacedName{ + Name: u.GetName(), + Namespace: u.GetNamespace(), + }, &deployment) + if err != nil { + return false, err + } + + for _, cond := range deployment.Status.Conditions { + if cond.Type == appsv1.DeploymentAvailable && cond.Status == v1.ConditionTrue { + return true, nil + } + } + + return false, nil +} diff --git a/components/operator/internal/chart/verify_test.go b/components/operator/internal/chart/verify_test.go new file mode 100644 index 00000000..bd54fd37 --- /dev/null +++ b/components/operator/internal/chart/verify_test.go @@ -0,0 +1,146 @@ +package chart + +import ( + "context" + "fmt" + "testing" + + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +var ( + testDeployNotReadyCR = &appsv1.Deployment{ + ObjectMeta: v1.ObjectMeta{ + Name: "test-deploy", + Namespace: "default", + }, + Status: appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{ + { + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionStatus(v1.ConditionFalse), + }, + }, + }, + } +) + +func Test_verify(t *testing.T) { + log := zap.NewNop().Sugar() + + testManifestKey := types.NamespacedName{ + Name: "test", Namespace: "testnamespace", + } + emptyManifestKey := types.NamespacedName{ + Name: "empty", Namespace: "manifest", + } + wrongManifestKey := types.NamespacedName{ + Name: "wrong", Namespace: "manifest", + } + + cache := NewInMemoryManifestCache() + _ = cache.Set(context.Background(), testManifestKey, + DockerRegistrySpecManifest{Manifest: fmt.Sprint(testCRD, separator, testDeploy)}) + _ = cache.Set(context.Background(), emptyManifestKey, + DockerRegistrySpecManifest{Manifest: "---"}) + _ = cache.Set(context.Background(), wrongManifestKey, + DockerRegistrySpecManifest{Manifest: "api: test\n\tversion: test"}) + + type args struct { + config *Config + } + tests := []struct { + name string + args args + want bool + wantErr bool + }{ + { + name: "empty manifest", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: emptyManifestKey, + }, + }, + want: true, + wantErr: false, + }, + { + name: "parse manifest error", + args: args{ + config: &Config{ + Cache: cache, + CacheKey: wrongManifestKey, + }, + }, + want: false, + wantErr: true, + }, + { + name: "verify", + args: args{ + config: &Config{ + Ctx: context.Background(), + Log: log, + Cache: cache, + CacheKey: testManifestKey, + Cluster: Cluster{ + Client: fake.NewClientBuilder().WithObjects(testDeployCR).Build(), + }, + }, + }, + want: true, + wantErr: false, + }, + { + name: "obj not ready", + args: args{ + config: &Config{ + Ctx: context.Background(), + Log: log, + Cache: cache, + CacheKey: testManifestKey, + Cluster: Cluster{ + Client: fake.NewClientBuilder().WithObjects(testDeployNotReadyCR).Build(), + }, + }, + }, + want: false, + wantErr: false, + }, + { + name: "obj not found", + args: args{ + config: &Config{ + Ctx: context.Background(), + Log: log, + Cache: cache, + CacheKey: testManifestKey, + Cluster: Cluster{ + Client: fake.NewClientBuilder().Build(), + }, + }, + }, + want: false, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := Verify(tt.args.config) + if (err != nil) != tt.wantErr { + t.Errorf("verify() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("verify() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/components/operator/internal/config/config.go b/components/operator/internal/config/config.go new file mode 100644 index 00000000..f298d373 --- /dev/null +++ b/components/operator/internal/config/config.go @@ -0,0 +1,14 @@ +package config + +import "github.com/vrischmann/envconfig" + +type Config struct { + ChartPath string `envconfig:"default=/module-chart"` +} + +func GetConfig(prefix string) (Config, error) { + cfg := Config{} + err := envconfig.InitWithPrefix(&cfg, prefix) + return cfg, err + +} diff --git a/components/operator/internal/controllers/kubernetes/configmap_service.go b/components/operator/internal/controllers/kubernetes/configmap_service.go new file mode 100644 index 00000000..36acf569 --- /dev/null +++ b/components/operator/internal/controllers/kubernetes/configmap_service.go @@ -0,0 +1,98 @@ +package kubernetes + +import ( + "context" + "fmt" + + "go.uber.org/zap" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/kyma-project/docker-registry/components/operator/internal/resource" +) + +type ConfigMapService interface { + IsBase(configMap *corev1.ConfigMap) bool + ListBase(ctx context.Context) ([]corev1.ConfigMap, error) + UpdateNamespace(ctx context.Context, logger *zap.SugaredLogger, namespace string, baseInstance *corev1.ConfigMap) error +} + +var _ ConfigMapService = &configMapService{} + +type configMapService struct { + client resource.Client + config Config +} + +func NewConfigMapService(client resource.Client, config Config) ConfigMapService { + return &configMapService{ + client: client, + config: config, + } +} + +func (r *configMapService) ListBase(ctx context.Context) ([]corev1.ConfigMap, error) { + configMaps := corev1.ConfigMapList{} + if err := r.client.ListByLabel(ctx, r.config.BaseNamespace, map[string]string{ConfigLabel: RuntimeLabelValue}, &configMaps); err != nil { + return nil, err + } + + return configMaps.Items, nil +} + +func (r *configMapService) IsBase(configMap *corev1.ConfigMap) bool { + return configMap.Namespace == r.config.BaseNamespace && configMap.Labels[ConfigLabel] == RuntimeLabelValue +} + +func (r *configMapService) UpdateNamespace(ctx context.Context, logger *zap.SugaredLogger, namespace string, baseInstance *corev1.ConfigMap) error { + logger.Debug(fmt.Sprintf("Updating ConfigMap '%s/%s'", namespace, baseInstance.GetName())) + instance := &corev1.ConfigMap{} + if err := r.client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: baseInstance.GetName()}, instance); err != nil { + if errors.IsNotFound(err) { + return r.createConfigMap(ctx, logger, namespace, baseInstance) + } + logger.Error(err, fmt.Sprintf("Gathering existing ConfigMap '%s/%s' failed", namespace, baseInstance.GetName())) + return err + } + + return r.updateConfigMap(ctx, logger, instance, baseInstance) +} + +func (r *configMapService) createConfigMap(ctx context.Context, logger *zap.SugaredLogger, namespace string, baseInstance *corev1.ConfigMap) error { + configMap := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: baseInstance.GetName(), + Namespace: namespace, + Labels: baseInstance.Labels, + Annotations: baseInstance.Annotations, + }, + Data: baseInstance.Data, + BinaryData: baseInstance.BinaryData, + } + + logger.Debug(fmt.Sprintf("Creating ConfigMap '%s/%s'", configMap.GetNamespace(), configMap.GetName())) + if err := r.client.Create(ctx, &configMap); err != nil { + logger.Error(err, fmt.Sprintf("Creating ConfigMap '%s/%s' failed", configMap.GetNamespace(), configMap.GetName())) + return err + } + + return nil +} + +func (r *configMapService) updateConfigMap(ctx context.Context, logger *zap.SugaredLogger, instance, baseInstance *corev1.ConfigMap) error { + copy := instance.DeepCopy() + copy.Annotations = baseInstance.GetAnnotations() + copy.Labels = baseInstance.GetLabels() + copy.Data = baseInstance.Data + copy.BinaryData = baseInstance.BinaryData + + if err := r.client.Update(ctx, copy); err != nil { + logger.Error(err, fmt.Sprintf("Updating ConfigMap '%s/%s' failed", copy.GetNamespace(), copy.GetName())) + return err + } + + return nil +} diff --git a/components/operator/internal/controllers/kubernetes/namespace_controller.go b/components/operator/internal/controllers/kubernetes/namespace_controller.go new file mode 100644 index 00000000..2a53941f --- /dev/null +++ b/components/operator/internal/controllers/kubernetes/namespace_controller.go @@ -0,0 +1,116 @@ +package kubernetes + +import ( + "context" + "fmt" + + "go.uber.org/zap" + + corev1 "k8s.io/api/core/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +type NamespaceReconciler struct { + Log *zap.SugaredLogger + client client.Client + config Config + configMapSvc ConfigMapService + secretSvc SecretService + serviceAccountSvc ServiceAccountService +} + +func NewNamespace(client client.Client, log *zap.SugaredLogger, config Config, + configMapSvc ConfigMapService, secretSvc SecretService, serviceAccountSvc ServiceAccountService) *NamespaceReconciler { + return &NamespaceReconciler{ + client: client, + Log: log, + config: config, + configMapSvc: configMapSvc, + secretSvc: secretSvc, + serviceAccountSvc: serviceAccountSvc, + } +} + +func (r *NamespaceReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + Named("namespace-controller"). + For(&corev1.Namespace{}). + WithEventFilter(r.predicate()). + Complete(r) +} + +func (r *NamespaceReconciler) predicate() predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + namespace, ok := e.Object.(*corev1.Namespace) + if !ok { + return false + } + return !isExcludedNamespace(namespace.Name, r.config.BaseNamespace, r.config.ExcludedNamespaces) + }, + GenericFunc: func(genericEvent event.GenericEvent) bool { + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return false + }, + } +} + +// Reconcile reads that state of the cluster for a Namespace object and updates other resources based on it +// +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=configmaps;secrets;serviceaccounts,verbs=get;list;watch;create;update;patch;delete + +func (r *NamespaceReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) { + instance := &corev1.Namespace{} + if err := r.client.Get(ctx, request.NamespacedName, instance); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + logger := r.Log.With("name", instance.GetName()) + + logger.Debug(fmt.Sprintf("Updating ConfigMaps in namespace '%s'", instance.GetName())) + configMaps, err := r.configMapSvc.ListBase(ctx) + if err != nil { + logger.Error(err, "Listing base ConfigMaps failed") + return ctrl.Result{}, err + } + for _, configMap := range configMaps { + c := configMap + if err := r.configMapSvc.UpdateNamespace(ctx, logger, instance.GetName(), &c); err != nil { + return ctrl.Result{}, err + } + } + + logger.Debug(fmt.Sprintf("Updating Secret in namespace '%s'", instance.GetName())) + secret, err := r.secretSvc.GetBase(ctx) + if err != nil { + logger.Error(err, "Listing base Secrets failed") + return ctrl.Result{}, err + } + + if err := r.secretSvc.UpdateNamespace(ctx, logger, instance.GetName(), secret); err != nil { + return ctrl.Result{}, err + } + + logger.Debug(fmt.Sprintf("Updating ServiceAccounts in namespace '%s'", instance.GetName())) + serviceAccounts, err := r.serviceAccountSvc.ListBase(ctx) + if err != nil { + logger.Error(err, "Listing base ServiceAccounts failed") + return ctrl.Result{}, err + } + for _, serviceAccount := range serviceAccounts { + sa := serviceAccount + if err := r.serviceAccountSvc.UpdateNamespace(ctx, logger, instance.GetName(), &sa); err != nil { + return ctrl.Result{}, err + } + } + + return ctrl.Result{}, nil +} diff --git a/components/operator/internal/controllers/kubernetes/secret_controller.go b/components/operator/internal/controllers/kubernetes/secret_controller.go new file mode 100644 index 00000000..006eddfd --- /dev/null +++ b/components/operator/internal/controllers/kubernetes/secret_controller.go @@ -0,0 +1,103 @@ +package kubernetes + +import ( + "context" + + "go.uber.org/zap" + + corev1 "k8s.io/api/core/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +type SecretReconciler struct { + Log *zap.SugaredLogger + client client.Client + config Config + svc SecretService +} + +func NewSecret(client client.Client, log *zap.SugaredLogger, config Config, secretSvc SecretService) *SecretReconciler { + return &SecretReconciler{ + client: client, + Log: log, + config: config, + svc: secretSvc, + } +} + +func (r *SecretReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + Named("secret-controller"). + For(&corev1.Secret{}). + WithEventFilter(r.predicate()). + Complete(r) +} + +func (r *SecretReconciler) predicate() predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + runtime, ok := e.Object.(*corev1.Secret) + if !ok { + return false + } + return r.svc.IsBase(runtime) + }, + UpdateFunc: func(e event.UpdateEvent) bool { + runtime, ok := e.ObjectNew.(*corev1.Secret) + if !ok { + return false + } + return r.svc.IsBase(runtime) + }, + GenericFunc: func(e event.GenericEvent) bool { + runtime, ok := e.Object.(*corev1.Secret) + if !ok { + return false + } + return r.svc.IsBase(runtime) + }, + DeleteFunc: func(e event.DeleteEvent) bool { + runtime, ok := e.Object.(*corev1.Secret) + if !ok { + return false + } + return r.svc.IsBase(runtime) + }, + } +} + +// Reconcile reads that state of the cluster for a Secret object and makes changes based +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch + +func (r *SecretReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) { + instance := &corev1.Secret{} + if err := r.client.Get(ctx, request.NamespacedName, instance); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + logger := r.Log.With("namespace", instance.GetNamespace(), "name", instance.GetName()) + + namespaces, err := getNamespaces(ctx, r.client, r.config.BaseNamespace, r.config.ExcludedNamespaces) + if err != nil { + return ctrl.Result{}, err + } + + if err := r.svc.HandleFinalizer(ctx, logger, instance, namespaces); err != nil { + return ctrl.Result{}, err + } + if !instance.ObjectMeta.DeletionTimestamp.IsZero() { + return ctrl.Result{}, nil + } + + for _, namespace := range namespaces { + if err = r.svc.UpdateNamespace(ctx, logger, namespace, instance); err != nil { + return ctrl.Result{}, err + } + } + + return ctrl.Result{RequeueAfter: r.config.SecretRequeueDuration}, nil +} diff --git a/components/operator/internal/controllers/kubernetes/secret_service.go b/components/operator/internal/controllers/kubernetes/secret_service.go new file mode 100644 index 00000000..af4e347d --- /dev/null +++ b/components/operator/internal/controllers/kubernetes/secret_service.go @@ -0,0 +1,175 @@ +package kubernetes + +import ( + "context" + "fmt" + "go.uber.org/zap" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/kyma-project/docker-registry/components/operator/internal/resource" +) + +const ( + FunctionManagedByLabel = "dockerregistry.kyma-project.io/managed-by" + cfgSecretFinalizerName = "dockerregistry.kyma-project.io/finalizer-registry-config" + FunctionResourceLabelUserValue = "user" +) + +type SecretService interface { + IsBase(secret *corev1.Secret) bool + GetBase(ctx context.Context) (*corev1.Secret, error) + UpdateNamespace(ctx context.Context, logger *zap.SugaredLogger, namespace string, baseInstance *corev1.Secret) error + HandleFinalizer(ctx context.Context, logger *zap.SugaredLogger, secret *corev1.Secret, namespaces []string) error +} + +var _ SecretService = &secretService{} + +type secretService struct { + client resource.Client + config Config +} + +func NewSecretService(client resource.Client, config Config) SecretService { + return &secretService{ + client: client, + config: config, + } +} + +func (r *secretService) GetBase(ctx context.Context) (*corev1.Secret, error) { + secret := &corev1.Secret{} + err := r.client.Get(ctx, types.NamespacedName{ + Namespace: r.config.BaseNamespace, + Name: r.config.BaseDefaultSecretName, + }, secret) + + return secret, err +} + +func (r *secretService) IsBase(secret *corev1.Secret) bool { + return secret.Namespace == r.config.BaseNamespace && + secret.Name == r.config.BaseDefaultSecretName && + secret.Labels[ConfigLabel] == CredentialsLabelValue +} + +func (r *secretService) UpdateNamespace(ctx context.Context, logger *zap.SugaredLogger, namespace string, baseInstance *corev1.Secret) error { + logger.Debug(fmt.Sprintf("Updating Secret '%s/%s'", namespace, baseInstance.GetName())) + instance := &corev1.Secret{} + if err := r.client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: baseInstance.GetName()}, instance); err != nil { + if errors.IsNotFound(err) { + return r.createSecret(ctx, logger, namespace, baseInstance) + } + logger.Error(err, fmt.Sprintf("Gathering existing Secret '%s/%s' failed", namespace, baseInstance.GetName())) + return err + } + if instance.Labels[FunctionManagedByLabel] == FunctionResourceLabelUserValue { + return nil + } + return r.updateSecret(ctx, logger, instance, baseInstance) +} + +func (r *secretService) HandleFinalizer(ctx context.Context, logger *zap.SugaredLogger, instance *corev1.Secret, namespaces []string) error { + if instance.ObjectMeta.DeletionTimestamp.IsZero() { + if containsString(instance.ObjectMeta.Finalizers, cfgSecretFinalizerName) { + return nil + } + instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, cfgSecretFinalizerName) + if err := r.client.Update(context.Background(), instance); err != nil { + return err + } + } else { + if !containsString(instance.ObjectMeta.Finalizers, cfgSecretFinalizerName) { + return nil + } + for _, namespace := range namespaces { + logger.Debug(fmt.Sprintf("Deleting Secret '%s/%s'", namespace, instance.Name)) + if err := r.deleteSecret(ctx, logger, namespace, instance.Name); err != nil { + return err + } + } + instance.ObjectMeta.Finalizers = removeString(instance.ObjectMeta.Finalizers, cfgSecretFinalizerName) + if err := r.client.Update(context.Background(), instance); err != nil { + return err + } + } + return nil +} + +func (r *secretService) createSecret(ctx context.Context, logger *zap.SugaredLogger, namespace string, baseInstance *corev1.Secret) error { + secret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: baseInstance.GetName(), + Namespace: namespace, + Labels: baseInstance.Labels, + Annotations: baseInstance.Annotations, + }, + Data: baseInstance.Data, + StringData: baseInstance.StringData, + Type: baseInstance.Type, + } + + logger.Debug(fmt.Sprintf("Creating Secret '%s/%s'", secret.GetNamespace(), secret.GetName())) + if err := r.client.Create(ctx, &secret); err != nil { + logger.Error(err, fmt.Sprintf("Creating Secret '%s/%s' failed", secret.GetNamespace(), secret.GetName())) + return err + } + + return nil +} + +func (r *secretService) updateSecret(ctx context.Context, logger *zap.SugaredLogger, instance, baseInstance *corev1.Secret) error { + copy := instance.DeepCopy() + copy.Annotations = baseInstance.GetAnnotations() + copy.Labels = baseInstance.GetLabels() + copy.Data = baseInstance.Data + copy.StringData = baseInstance.StringData + copy.Type = baseInstance.Type + + if err := r.client.Update(ctx, copy); err != nil { + logger.Error(err, fmt.Sprintf("Updating Secret '%s/%s' failed", copy.GetNamespace(), copy.GetName())) + return err + } + + return nil +} + +func (r *secretService) deleteSecret(ctx context.Context, logger *zap.SugaredLogger, namespace, baseInstanceName string) error { + instance := &corev1.Secret{} + if err := r.client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: baseInstanceName}, instance); err != nil { + return client.IgnoreNotFound(err) + } + if instance.Labels[FunctionManagedByLabel] == FunctionResourceLabelUserValue { + return nil + } + if err := r.client.Delete(ctx, instance); err != nil { + logger.Error(err, fmt.Sprintf("Deleting Secret '%s/%s' failed", namespace, baseInstanceName)) + return err + } + + return nil +} + +// Helper functions to check and remove string from a slice of strings. +func containsString(slice []string, s string) bool { + for _, item := range slice { + if item == s { + return true + } + } + return false +} + +func removeString(slice []string, s string) (result []string) { + for _, item := range slice { + if item == s { + continue + } + result = append(result, item) + } + return +} diff --git a/components/operator/internal/controllers/kubernetes/serviceaccount_service.go b/components/operator/internal/controllers/kubernetes/serviceaccount_service.go new file mode 100644 index 00000000..de7e143f --- /dev/null +++ b/components/operator/internal/controllers/kubernetes/serviceaccount_service.go @@ -0,0 +1,130 @@ +package kubernetes + +import ( + "context" + "fmt" + "strings" + + "go.uber.org/zap" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/kyma-project/docker-registry/components/operator/internal/resource" +) + +type ServiceAccountService interface { + IsBase(serviceAccount *corev1.ServiceAccount) bool + ListBase(ctx context.Context) ([]corev1.ServiceAccount, error) + UpdateNamespace(ctx context.Context, logger *zap.SugaredLogger, namespace string, baseInstance *corev1.ServiceAccount) error +} + +type serviceAccountService struct { + client resource.Client + config Config +} + +func NewServiceAccountService(client resource.Client, config Config) ServiceAccountService { + return &serviceAccountService{ + client: client, + config: config, + } +} + +func (r *serviceAccountService) ListBase(ctx context.Context) ([]corev1.ServiceAccount, error) { + serviceAccounts := &corev1.ServiceAccountList{} + if err := r.client.ListByLabel(ctx, r.config.BaseNamespace, map[string]string{ConfigLabel: ServiceAccountLabelValue}, serviceAccounts); err != nil { + return nil, err + } + + return serviceAccounts.Items, nil +} + +func (r *serviceAccountService) IsBase(serviceAccount *corev1.ServiceAccount) bool { + return serviceAccount.Namespace == r.config.BaseNamespace && serviceAccount.Labels[ConfigLabel] == ServiceAccountLabelValue +} + +func (r *serviceAccountService) UpdateNamespace(ctx context.Context, logger *zap.SugaredLogger, namespace string, baseInstance *corev1.ServiceAccount) error { + logger.Debug(fmt.Sprintf("Updating ServiceAccount '%s/%s'", namespace, baseInstance.GetName())) + serviceAccount := &corev1.ServiceAccount{} + if err := r.client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: baseInstance.GetName()}, serviceAccount); err != nil { + if errors.IsNotFound(err) { + return r.createServiceAccount(ctx, logger, namespace, baseInstance) + } + logger.Error(err, fmt.Sprintf("Gathering existing ServiceAccount '%s/%s' failed", namespace, baseInstance.GetName())) + return err + } + + return r.updateServiceAccount(ctx, logger, serviceAccount, baseInstance) +} + +func (r *serviceAccountService) createServiceAccount(ctx context.Context, logger *zap.SugaredLogger, namespace string, baseInstance *corev1.ServiceAccount) error { + secrets := r.shiftSecretTokens(baseInstance) + serviceAccount := corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: baseInstance.GetName(), + Namespace: namespace, + Labels: baseInstance.Labels, + Annotations: baseInstance.Annotations, + }, + Secrets: secrets, + ImagePullSecrets: baseInstance.ImagePullSecrets, + AutomountServiceAccountToken: baseInstance.AutomountServiceAccountToken, + } + + logger.Debug(fmt.Sprintf("Creating ServiceAccount '%s/%s'", serviceAccount.GetNamespace(), serviceAccount.GetName())) + if err := r.client.Create(ctx, &serviceAccount); err != nil { + logger.Error(err, fmt.Sprintf("Creating ServiceAccount '%s/%s'", serviceAccount.GetNamespace(), serviceAccount.GetName())) + return err + } + + return nil +} + +func (r *serviceAccountService) updateServiceAccount(ctx context.Context, logger *zap.SugaredLogger, instance, baseInstance *corev1.ServiceAccount) error { + tokens := r.extractSecretTokens(instance) + secrets := r.shiftSecretTokens(baseInstance) + secrets = append(secrets, tokens...) + + copy := instance.DeepCopy() + copy.Annotations = baseInstance.GetAnnotations() + copy.Labels = baseInstance.GetLabels() + copy.ImagePullSecrets = baseInstance.ImagePullSecrets + copy.AutomountServiceAccountToken = baseInstance.AutomountServiceAccountToken + copy.Secrets = secrets + + if err := r.client.Update(ctx, copy); err != nil { + logger.Error(err, fmt.Sprintf("Updating ServiceAccount '%s/%s' failed", copy.GetNamespace(), copy.GetName())) + return err + } + + return nil +} + +func (*serviceAccountService) shiftSecretTokens(baseInstance *corev1.ServiceAccount) []corev1.ObjectReference { + prefix := fmt.Sprintf("%s-token", baseInstance.Name) + + secrets := make([]corev1.ObjectReference, 0) + for _, secret := range baseInstance.Secrets { + if !strings.HasPrefix(secret.Name, prefix) { + secrets = append(secrets, secret) + } + } + + return secrets +} + +func (*serviceAccountService) extractSecretTokens(serviceAccount *corev1.ServiceAccount) []corev1.ObjectReference { + prefix := fmt.Sprintf("%s-token", serviceAccount.Name) + + secrets := make([]corev1.ObjectReference, 0) + for _, secret := range serviceAccount.Secrets { + if strings.HasPrefix(secret.Name, prefix) { + secrets = append(secrets, secret) + } + } + + return secrets +} diff --git a/components/operator/internal/controllers/kubernetes/shared.go b/components/operator/internal/controllers/kubernetes/shared.go new file mode 100644 index 00000000..10eb59ad --- /dev/null +++ b/components/operator/internal/controllers/kubernetes/shared.go @@ -0,0 +1,55 @@ +package kubernetes + +import ( + "context" + "time" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + ConfigLabel = "dockerregistry.kyma-project.io/config" + CredentialsLabelValue = "credentials" + ServiceAccountLabelValue = "service-account" + RuntimeLabelValue = "runtime" +) + +type Config struct { + BaseNamespace string `envconfig:"default=kyma-system"` + BaseDefaultSecretName string `envconfig:"default=internal-dockerregistry-config"` + ExcludedNamespaces []string `envconfig:"default=kyma-system"` + ConfigMapRequeueDuration time.Duration `envconfig:"default=1m"` + SecretRequeueDuration time.Duration `envconfig:"default=1m"` + ServiceAccountRequeueDuration time.Duration `envconfig:"default=1m"` +} + +func getNamespaces(ctx context.Context, client client.Client, base string, excluded []string) ([]string, error) { + var namespaces corev1.NamespaceList + if err := client.List(ctx, &namespaces); err != nil { + return nil, err + } + + names := make([]string, 0) + for _, namespace := range namespaces.Items { + if !isExcludedNamespace(namespace.GetName(), base, excluded) && namespace.Status.Phase != corev1.NamespaceTerminating { + names = append(names, namespace.GetName()) + } + } + + return names, nil +} + +func isExcludedNamespace(name, base string, excluded []string) bool { + if name == base { + return true + } + + for _, namespace := range excluded { + if name == namespace { + return true + } + } + + return false +} diff --git a/components/operator/internal/gitrepository/cleanup.go b/components/operator/internal/gitrepository/cleanup.go new file mode 100644 index 00000000..6a41405a --- /dev/null +++ b/components/operator/internal/gitrepository/cleanup.go @@ -0,0 +1,31 @@ +package gitrepository + +import ( + "context" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + gitRepoCRDName = "gitrepositories.dockerregistry.kyma-project.io" +) + +// Cleanup removes gitrepository CRD and its resources +func Cleanup(ctx context.Context, c client.Client) error { + crd, err := getCRD(ctx, c) + if err != nil { + return client.IgnoreNotFound(err) + } + + return c.Delete(ctx, crd, &client.DeleteOptions{}) +} + +func getCRD(ctx context.Context, client client.Client) (*apiextensionsv1.CustomResourceDefinition, error) { + var crd apiextensionsv1.CustomResourceDefinition + err := client.Get(ctx, types.NamespacedName{ + Name: gitRepoCRDName, + }, &crd) + return &crd, err +} diff --git a/components/operator/internal/gitrepository/cleanup_test.go b/components/operator/internal/gitrepository/cleanup_test.go new file mode 100644 index 00000000..3fa1ab7d --- /dev/null +++ b/components/operator/internal/gitrepository/cleanup_test.go @@ -0,0 +1,61 @@ +package gitrepository + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsscheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestCleanup(t *testing.T) { + t.Run("remove crd", func(t *testing.T) { + ctx := context.Background() + c := fake.NewClientBuilder(). + WithScheme(apiextensionsscheme.Scheme). + WithObjects(fixGitRepoCRD()). + Build() + + err := Cleanup(ctx, c) + + require.NoError(t, err) + + err = c.Get(ctx, types.NamespacedName{ + Name: gitRepoCRDName, + }, fixGitRepoCRD()) + require.True(t, errors.IsNotFound(err)) + }) + + t.Run("crd not found", func(t *testing.T) { + ctx := context.Background() + c := fake.NewClientBuilder(). + WithScheme(apiextensionsscheme.Scheme). + Build() + + err := Cleanup(ctx, c) + + require.NoError(t, err) + }) + + t.Run("client get error", func(t *testing.T) { + ctx := context.Background() + c := fake.NewClientBuilder().Build() + + err := Cleanup(ctx, c) + + require.Error(t, err) + }) +} + +func fixGitRepoCRD() *apiextensionsv1.CustomResourceDefinition { + return &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: v1.ObjectMeta{ + Name: gitRepoCRDName, + }, + } +} diff --git a/components/operator/internal/predicate/predicate.go b/components/operator/internal/predicate/predicate.go new file mode 100644 index 00000000..21b0d09c --- /dev/null +++ b/components/operator/internal/predicate/predicate.go @@ -0,0 +1,33 @@ +package predicate + +import ( + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// this predicate allows not reacting on status changes +type NoStatusChangePredicate struct { + predicate.Funcs +} + +func (p NoStatusChangePredicate) Update(e event.UpdateEvent) bool { + if e.ObjectNew == nil || e.ObjectOld == nil { + return false + } + + // first resource version (after apply) + if e.ObjectOld.GetResourceVersion() == e.ObjectNew.GetResourceVersion() { + return true + } + + return !isStatusUpdate(e) +} + +func isStatusUpdate(e event.UpdateEvent) bool { + if e.ObjectOld.GetGeneration() == e.ObjectNew.GetGeneration() && + e.ObjectOld.GetResourceVersion() != e.ObjectNew.GetResourceVersion() { + return true + } + + return false +} diff --git a/components/operator/internal/predicate/predicate_test.go b/components/operator/internal/predicate/predicate_test.go new file mode 100644 index 00000000..e9119b88 --- /dev/null +++ b/components/operator/internal/predicate/predicate_test.go @@ -0,0 +1,98 @@ +package predicate + +import ( + "testing" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/event" +) + +func TestNoStatusChangePredicate_Update(t *testing.T) { + type args struct { + e event.UpdateEvent + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "nil objs", + args: args{ + e: event.UpdateEvent{ + ObjectOld: nil, + ObjectNew: nil, + }, + }, + want: false, + }, + { + name: "first obj iteration", + args: args{ + e: event.UpdateEvent{ + ObjectOld: func() *unstructured.Unstructured { + u := &unstructured.Unstructured{} + u.SetGeneration(1) + u.SetResourceVersion("560") + return u + }(), + ObjectNew: func() *unstructured.Unstructured { + u := &unstructured.Unstructured{} + u.SetGeneration(1) + u.SetResourceVersion("560") + return u + }(), + }, + }, + want: true, + }, + { + name: "status update", + args: args{ + e: event.UpdateEvent{ + ObjectOld: func() *unstructured.Unstructured { + u := &unstructured.Unstructured{} + u.SetGeneration(1) + u.SetResourceVersion("560") + return u + }(), + ObjectNew: func() *unstructured.Unstructured { + u := &unstructured.Unstructured{} + u.SetGeneration(1) + u.SetResourceVersion("600") + return u + }(), + }, + }, + want: false, + }, + { + name: "spec update", + args: args{ + e: event.UpdateEvent{ + ObjectOld: func() *unstructured.Unstructured { + u := &unstructured.Unstructured{} + u.SetGeneration(1) + u.SetResourceVersion("560") + return u + }(), + ObjectNew: func() *unstructured.Unstructured { + u := &unstructured.Unstructured{} + u.SetGeneration(2) + u.SetResourceVersion("600") + return u + }(), + }, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := NoStatusChangePredicate{} + if got := p.Update(tt.args.e); got != tt.want { + t.Errorf("NoStatusChangePredicate.Update() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/components/operator/internal/registry/node_port.go b/components/operator/internal/registry/node_port.go new file mode 100644 index 00000000..f7c84e4e --- /dev/null +++ b/components/operator/internal/registry/node_port.go @@ -0,0 +1,158 @@ +package registry + +import ( + "context" + "fmt" + "math/rand" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + dockerRegistryNodePort = 32_137 + + //Available ports according to documentation https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + maxNodePort = 32_767 + minNodePort = 30_000 +) + +const ( + dockerRegistryService = "internal-docker-registry" + dockerRegistryPortName = "http-registry" + + allNamespaces = "" +) + +type nodePortFinder func() int32 + +type NodePortResolver struct { + nodePortFinder +} + +func NewNodePortResolver(finder nodePortFinder) *NodePortResolver { + return &NodePortResolver{nodePortFinder: finder} +} + +func (npr *NodePortResolver) ResolveDockerRegistryNodePortFn(ctx context.Context, k8sClient client.Client, namespace string) (int32, error) { + svc, err := getService(ctx, k8sClient, namespace, dockerRegistryService) + if err != nil { + return 0, errors.Wrap(err, fmt.Sprintf("while checking if %s service is installed on cluster", dockerRegistryService)) + } + + if svc != nil && svc.Spec.Type == corev1.ServiceTypeNodePort { + if isDefaultNodePortValue(svc) { + return dockerRegistryNodePort, nil + } + currentNodePort := getNodePort(svc) + return currentNodePort, nil + } + + svcs, err := getAllNodePortServices(ctx, k8sClient) + if err != nil { + return 0, errors.Wrap(err, "while fetching all services from cluster") + } + + if possibleConflict(svcs) { + newPort, err := npr.drawEmptyPortNumber(svcs) + if err != nil { + return 0, errors.Wrap(err, "while drawing available port number") + } + return newPort, nil + } + return dockerRegistryNodePort, nil +} + +func (npr *NodePortResolver) drawEmptyPortNumber(svcs *corev1.ServiceList) (int32, error) { + nodePorts := map[int32]struct{}{} + for _, svc := range svcs.Items { + for _, port := range svc.Spec.Ports { + nodePorts[port.NodePort] = struct{}{} + } + } + + retries := 100 + var emptyPort int32 + for i := 0; i < retries; i++ { + possibleEmptyPort := npr.nodePortFinder() + if _, ok := nodePorts[possibleEmptyPort]; !ok { + emptyPort = possibleEmptyPort + break + } + } + if emptyPort == 0 { + return 0, errors.New("couldn't draw available port number, try again") + } + return emptyPort, nil +} + +func getNodePort(svc *corev1.Service) int32 { + for _, port := range svc.Spec.Ports { + if port.Name == dockerRegistryPortName { + return port.NodePort + } + } + return dockerRegistryNodePort +} + +func getService(ctx context.Context, k8sClient client.Client, namespace, name string) (*corev1.Service, error) { + svc := corev1.Service{} + err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, &svc) + if client.IgnoreNotFound(err) != nil { + return nil, errors.Wrap(err, fmt.Sprintf("while getting %s servicce", name)) + } + return &svc, nil +} + +func isDefaultNodePortValue(svc *corev1.Service) bool { + ports := svc.Spec.Ports + for _, port := range ports { + if port.NodePort == dockerRegistryNodePort { + return true + } + } + return false +} + +func getAllNodePortServices(ctx context.Context, k8sClient client.Client) (*corev1.ServiceList, error) { + svcs := corev1.ServiceList{} + err := k8sClient.List(ctx, &svcs, &client.ListOptions{Namespace: allNamespaces}) + if err != nil { + return nil, errors.Wrap(err, "while getting list of all services") + } + nodePortSvcs := &corev1.ServiceList{} + for _, svc := range svcs.Items { + if svc.Spec.Type == corev1.ServiceTypeNodePort { + nodePortSvcs.Items = append(nodePortSvcs.Items, svc) + } + if svc.Spec.Type == corev1.ServiceTypeLoadBalancer { + for _, port := range svc.Spec.Ports { + if port.NodePort != 0 { + nodePortSvcs.Items = append(nodePortSvcs.Items, svc) + break + } + } + } + } + return nodePortSvcs, nil +} + +func possibleConflict(svcs *corev1.ServiceList) bool { + for _, svc := range svcs.Items { + ports := svc.Spec.Ports + for _, port := range ports { + if port.NodePort == dockerRegistryNodePort { + return true + } + } + } + return false +} + +var _ nodePortFinder = RandomNodePort + +func RandomNodePort() int32 { + number := rand.Int31n(maxNodePort - minNodePort) + return minNodePort + number +} diff --git a/components/operator/internal/registry/node_port_test.go b/components/operator/internal/registry/node_port_test.go new file mode 100644 index 00000000..9965b8d3 --- /dev/null +++ b/components/operator/internal/registry/node_port_test.go @@ -0,0 +1,144 @@ +package registry + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +const nonConflictPort int32 = 32238 + +const kymaNamespace = "kyma-system" + +type assertFn func(t *testing.T, overrides map[string]interface{}) + +func TestNodePortAction(t *testing.T) { + testCases := map[string]struct { + givenService *corev1.Service + expectedPort int32 + assertFn assertFn + }{ + "Return default port new port when nodePort installed on default port": { + givenService: fixtureServiceNodePort(dockerRegistryService, kymaNamespace, dockerRegistryNodePort), + expectedPort: dockerRegistryNodePort, + }, + "Generate new port when nodePort service installed on different port": { + givenService: fixtureServiceNodePort(dockerRegistryService, kymaNamespace, nonConflictPort), + expectedPort: nonConflictPort, + }, + "Return default port new port when nodePort not installed, without port conflict": { + expectedPort: dockerRegistryNodePort, + }, + "Generate new port when nodePort not installed, with port conflict": { + givenService: fixtureServiceNodePort("conflicting-svc", kymaNamespace, dockerRegistryNodePort), + expectedPort: nonConflictPort, + }, + "Return default port new port when service is ClusterIP before upgrade without port conflict": { + givenService: fixtureServiceClusterIP(dockerRegistryService, kymaNamespace), + expectedPort: dockerRegistryNodePort, + }, + "Generate new port when cluster has NodePort service in different namespace with port conflict": { + givenService: fixtureServiceNodePort(dockerRegistryService, "different-ns", dockerRegistryNodePort), + expectedPort: nonConflictPort, + }, + "Generate new port when cluster has LoadBalancer service in different namespace with port conflict": { + givenService: fixtureLoadBalancer(), + expectedPort: nonConflictPort, + }, + } + + for testName, testCase := range testCases { + t.Run(testName, func(t *testing.T) { + //GIVEN + ctx := context.TODO() + k8sClient := fake.NewClientBuilder(). + WithRuntimeObjects(fixtureServices()...). + Build() + resolver := NewNodePortResolver(fixedNodePort(nonConflictPort)) + if testCase.givenService != nil { + err := k8sClient.Create(ctx, testCase.givenService, &client.CreateOptions{}) + require.NoError(t, err) + } + + //WHEN + port, err := resolver.ResolveDockerRegistryNodePortFn(ctx, k8sClient, kymaNamespace) + + //THEN + require.NoError(t, err) + require.Equal(t, testCase.expectedPort, port) + }) + } +} + +func fixtureServiceNodePort(name, namespace string, nodePort int32) *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + {Name: dockerRegistryPortName, NodePort: nodePort}}, + }, + } +} + +func fixtureServiceClusterIP(name, namespace string) *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{ + {Name: dockerRegistryPortName, Port: 5000}}, + }, + } +} + +func fixtureServices() []runtime.Object { + l := []runtime.Object{ + fixtureServiceNodePort("other-node-port", kymaNamespace, dockerRegistryNodePort-1), + fixtureServiceNodePort("many-ports", kymaNamespace, dockerRegistryNodePort+2), + } + return l +} + +func fixedNodePort(expectedPort int32) func() int32 { + return func() int32 { + return expectedPort + } +} + +func fixtureLoadBalancer() *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "istio-ingressgateway", + Namespace: "istio-system", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + Ports: []corev1.ServicePort{ + { + NodePort: dockerRegistryNodePort, + Name: "http2", + }, + { + NodePort: 30857, + Name: "https", + }, + }, + }, + Status: corev1.ServiceStatus{}, + } +} diff --git a/components/operator/internal/registry/secret.go b/components/operator/internal/registry/secret.go new file mode 100644 index 00000000..63b44319 --- /dev/null +++ b/components/operator/internal/registry/secret.go @@ -0,0 +1,61 @@ +package registry + +import ( + "context" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + SecretName = "internal-dockerregistry-config" + LabelConfigKey = "dockerregistry.kyma-project.io/config" + LabelConfigVal = "credentials" + IsInternalKey = "isInternal" + DeploymentName = "internal-docker-registry" + HttpEnvKey = "REGISTRY_HTTP_SECRET" +) + +func GetDockerRegistryInternalRegistrySecret(ctx context.Context, c client.Client, namespace string) (*corev1.Secret, error) { + secret := corev1.Secret{} + key := client.ObjectKey{ + Namespace: namespace, + Name: SecretName, + } + err := c.Get(ctx, key, &secret) + if err != nil { + return nil, client.IgnoreNotFound(err) + } + + if val, ok := secret.GetLabels()[LabelConfigKey]; !ok || val != LabelConfigVal { + return nil, nil + } + + if val := string(secret.Data[IsInternalKey]); val != "true" { + return nil, nil + } + + return &secret, nil +} + +func GetRegistryHTTPSecretEnvValue(ctx context.Context, c client.Client, namespace string) (string, error) { + deployment := appsv1.Deployment{} + key := client.ObjectKey{ + Namespace: namespace, + Name: DeploymentName, + } + err := c.Get(ctx, key, &deployment) + if err != nil { + return "", client.IgnoreNotFound(err) + } + + envs := deployment.Spec.Template.Spec.Containers[0].Env + for _, v := range envs { + if v.Name == HttpEnvKey && v.Value != "" { + return v.Value, nil + } + } + + return "", nil +} diff --git a/components/operator/internal/resource/resource.go b/components/operator/internal/resource/resource.go new file mode 100644 index 00000000..afb88f91 --- /dev/null +++ b/components/operator/internal/resource/resource.go @@ -0,0 +1,107 @@ +package resource + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apilabels "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +//go:generate mockery --name=Client --output=automock --outpkg=automock --case=underscore +type Client interface { + Create(ctx context.Context, object Object) error + CreateWithReference(ctx context.Context, parent Object, object Object) error + Update(ctx context.Context, object Object) error + Get(ctx context.Context, key ctrlclient.ObjectKey, object Object) error + ListByLabel(ctx context.Context, namespace string, labels map[string]string, object ctrlclient.ObjectList) error + DeleteAllBySelector(ctx context.Context, resourceType Object, namespace string, selector apilabels.Selector) error + Delete(ctx context.Context, resourceType Object) error + Status() ctrlclient.StatusWriter +} + +//go:generate mockery --name=K8sClient --output=automock --outpkg=automock --case=underscore +type K8sClient interface { + Create(context.Context, ctrlclient.Object, ...ctrlclient.CreateOption) error + Update(ctx context.Context, obj ctrlclient.Object, opts ...ctrlclient.UpdateOption) error + Get(ctx context.Context, key ctrlclient.ObjectKey, obj ctrlclient.Object, opts ...ctrlclient.GetOption) error + List(context.Context, ctrlclient.ObjectList, ...ctrlclient.ListOption) error + DeleteAllOf(context.Context, ctrlclient.Object, ...ctrlclient.DeleteAllOfOption) error + Status() ctrlclient.StatusWriter + Delete(ctx context.Context, obj ctrlclient.Object, opts ...ctrlclient.DeleteOption) error +} + +type Object interface { + runtime.Object + metav1.Object +} + +var _ Client = &client{} + +type client struct { + k8sClient K8sClient + schema *runtime.Scheme +} + +func (c *client) Delete(ctx context.Context, obj Object) error { + propagationPolicy := metav1.DeletePropagationBackground + return c.k8sClient.Delete(ctx, obj, &ctrlclient.DeleteOptions{ + PropagationPolicy: &propagationPolicy, + }) +} + +func New(k8sClient K8sClient, schema *runtime.Scheme) Client { + return &client{ + k8sClient: k8sClient, + schema: schema, + } +} + +func (c *client) Create(ctx context.Context, object Object) error { + return c.CreateWithReference(ctx, nil, object) +} + +func (c *client) CreateWithReference(ctx context.Context, parent, object Object) error { + if parent != nil { + if err := controllerutil.SetControllerReference(parent, object, c.schema); err != nil { + return err + } + } + + return c.k8sClient.Create(ctx, object) +} + +func (c *client) Update(ctx context.Context, object Object) error { + return c.k8sClient.Update(ctx, object) +} + +func (c *client) Get(ctx context.Context, key ctrlclient.ObjectKey, object Object) error { + return c.k8sClient.Get(ctx, key, object) +} + +func (c *client) ListByLabel(ctx context.Context, namespace string, labels map[string]string, list ctrlclient.ObjectList) error { + return c.k8sClient.List(ctx, list, &ctrlclient.ListOptions{ + LabelSelector: apilabels.SelectorFromSet(labels), + Namespace: namespace, + }) +} + +func (c *client) DeleteAllBySelector(ctx context.Context, resourceType Object, namespace string, selector apilabels.Selector) error { + propagationPolicy := metav1.DeletePropagationBackground + + return c.k8sClient.DeleteAllOf(ctx, resourceType, &ctrlclient.DeleteAllOfOptions{ + ListOptions: ctrlclient.ListOptions{ + LabelSelector: selector, + Namespace: namespace, + }, + DeleteOptions: ctrlclient.DeleteOptions{ + PropagationPolicy: &propagationPolicy, + }, + }) +} + +func (c *client) Status() ctrlclient.StatusWriter { + return c.k8sClient.Status() +} diff --git a/components/operator/internal/state/add_finalizer.go b/components/operator/internal/state/add_finalizer.go new file mode 100644 index 00000000..9ed07295 --- /dev/null +++ b/components/operator/internal/state/add_finalizer.go @@ -0,0 +1,32 @@ +package state + +import ( + "context" + + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +func sFnAddFinalizer(ctx context.Context, r *reconciler, s *systemState) (stateFn, *controllerruntime.Result, error) { + instanceIsBeingDeleted := !s.instance.GetDeletionTimestamp().IsZero() + instanceHasFinalizer := controllerutil.ContainsFinalizer(&s.instance, r.finalizer) + if !instanceHasFinalizer { + // in case instance has no finalizer and instance is being deleted - end reconciliation + if instanceIsBeingDeleted { + // stop state machine + return stop() + } + + if err := addFinalizer(ctx, r, s); err != nil { + // stop state machine with potential error + return stopWithEventualError(err) + } + } + return nextState(sFnInitialize) +} + +func addFinalizer(ctx context.Context, r *reconciler, s *systemState) error { + // in case instance does not have finalizer - add it and update instance + controllerutil.AddFinalizer(&s.instance, r.finalizer) + return updateDockerRegistryWithoutStatus(ctx, r, s) +} diff --git a/components/operator/internal/state/add_finalizer_test.go b/components/operator/internal/state/add_finalizer_test.go new file mode 100644 index 00000000..9771e856 --- /dev/null +++ b/components/operator/internal/state/add_finalizer_test.go @@ -0,0 +1,84 @@ +package state + +import ( + "context" + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "testing" +) + +func Test_sFnAddFinalizer(t *testing.T) { + t.Run("set finalizer", func(t *testing.T) { + scheme := runtime.NewScheme() + require.NoError(t, v1alpha1.AddToScheme(scheme)) + + dockerRegistry := v1alpha1.DockerRegistry{ + ObjectMeta: v1.ObjectMeta{ + Name: "test-name", + Namespace: "test-namespace", + ResourceVersion: "123", + }, + } + s := &systemState{ + instance: dockerRegistry, + } + r := &reconciler{ + cfg: cfg{ + finalizer: v1alpha1.Finalizer, + }, + k8s: k8s{ + client: fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(&dockerRegistry). + Build(), + }, + } + + // set finalizer + next, result, err := sFnAddFinalizer(context.Background(), r, s) + require.NoError(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnInitialize, next) + + // check finalizer in systemState + require.Contains(t, s.instance.GetFinalizers(), r.cfg.finalizer) + + // check finalizer in k8s + obj := v1alpha1.DockerRegistry{} + err = r.k8s.client.Get(context.Background(), + client.ObjectKey{ + Namespace: dockerRegistry.Namespace, + Name: dockerRegistry.Name, + }, + &obj) + require.NoError(t, err) + require.Contains(t, obj.GetFinalizers(), r.cfg.finalizer) + }) + + t.Run("stop when no finalizer and instance is being deleted", func(t *testing.T) { + r := &reconciler{ + cfg: cfg{ + finalizer: v1alpha1.Finalizer, + }, + } + + metaTimeNow := v1.Now() + s := &systemState{ + instance: v1alpha1.DockerRegistry{ + ObjectMeta: v1.ObjectMeta{ + DeletionTimestamp: &metaTimeNow, + }, + }, + } + + // stop + next, result, err := sFnAddFinalizer(context.Background(), r, s) + require.Nil(t, err) + require.Nil(t, result) + require.Nil(t, next) + }) +} diff --git a/components/operator/internal/state/apply.go b/components/operator/internal/state/apply.go new file mode 100644 index 00000000..12a5fb96 --- /dev/null +++ b/components/operator/internal/state/apply.go @@ -0,0 +1,37 @@ +package state + +import ( + "context" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// run dockerregistry chart installation +func sFnApplyResources(_ context.Context, r *reconciler, s *systemState) (stateFn, *ctrl.Result, error) { + // set condition Installed if it does not exist + if !s.instance.IsCondition(v1alpha1.ConditionTypeInstalled) { + s.setState(v1alpha1.StateProcessing) + s.instance.UpdateConditionUnknown(v1alpha1.ConditionTypeInstalled, v1alpha1.ConditionReasonInstallation, + "Installing for configuration") + } + + // install component + err := chart.Install(s.chartConfig, s.flagsBuilder.Build()) + if err != nil { + r.log.Warnf("error while installing resource %s: %s", + client.ObjectKeyFromObject(&s.instance), err.Error()) + s.setState(v1alpha1.StateError) + s.instance.UpdateConditionFalse( + v1alpha1.ConditionTypeInstalled, + v1alpha1.ConditionReasonInstallationErr, + err, + ) + return stopWithEventualError(err) + } + + // switch state verify + return nextState(sFnVerifyResources) +} diff --git a/components/operator/internal/state/apply_test.go b/components/operator/internal/state/apply_test.go new file mode 100644 index 00000000..016e35b3 --- /dev/null +++ b/components/operator/internal/state/apply_test.go @@ -0,0 +1,104 @@ +package state + +import ( + "context" + "testing" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +func Test_buildSFnApplyResources(t *testing.T) { + t.Run("switch state and add condition when condition is missing", func(t *testing.T) { + s := &systemState{ + instance: v1alpha1.DockerRegistry{}, + chartConfig: &chart.Config{ + Cache: fixEmptyManifestCache(), + CacheKey: types.NamespacedName{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, + Release: chart.Release{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, + }, + flagsBuilder: chart.NewFlagsBuilder(), + } + + next, result, err := sFnApplyResources(context.Background(), nil, s) + require.Nil(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnVerifyResources, next) + + status := s.instance.Status + require.Equal(t, v1alpha1.StateProcessing, status.State) + requireContainsCondition(t, status, + v1alpha1.ConditionTypeInstalled, + metav1.ConditionUnknown, + v1alpha1.ConditionReasonInstallation, + "Installing for configuration", + ) + }) + + t.Run("apply resources", func(t *testing.T) { + s := &systemState{ + instance: *testInstalledDockerRegistry.DeepCopy(), + chartConfig: &chart.Config{ + Cache: fixEmptyManifestCache(), + CacheKey: types.NamespacedName{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, + Release: chart.Release{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, + }, + flagsBuilder: chart.NewFlagsBuilder(), + } + r := &reconciler{} + + // run installation process and return verificating state + next, result, err := sFnApplyResources(context.Background(), r, s) + require.Nil(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnVerifyResources, next) + }) + + t.Run("install chart error", func(t *testing.T) { + s := &systemState{ + instance: *testInstalledDockerRegistry.DeepCopy(), + chartConfig: &chart.Config{ + Cache: fixManifestCache("\t"), + CacheKey: types.NamespacedName{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, + }, + flagsBuilder: chart.NewFlagsBuilder(), + } + r := &reconciler{ + log: zap.NewNop().Sugar(), + } + + // handle error and return update condition state + next, result, err := sFnApplyResources(context.Background(), r, s) + require.EqualError(t, err, "could not parse chart manifest: yaml: found character that cannot start any token") + require.Nil(t, result) + require.Nil(t, next) + + status := s.instance.Status + require.Equal(t, v1alpha1.StateError, status.State) + requireContainsCondition(t, status, + v1alpha1.ConditionTypeInstalled, + metav1.ConditionFalse, + v1alpha1.ConditionReasonInstallationErr, + "could not parse chart manifest: yaml: found character that cannot start any token", + ) + }) +} diff --git a/components/operator/internal/state/controller_configuration.go b/components/operator/internal/state/controller_configuration.go new file mode 100644 index 00000000..b6170ede --- /dev/null +++ b/components/operator/internal/state/controller_configuration.go @@ -0,0 +1,45 @@ +package state + +import ( + "context" + "github.com/kyma-project/docker-registry/components/operator/internal/registry" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + controllerruntime "sigs.k8s.io/controller-runtime" +) + +func sFnControllerConfiguration(_ context.Context, r *reconciler, s *systemState) (stateFn, *controllerruntime.Result, error) { + err := updateControllerConfigurationStatus(r, &s.instance) + if err != nil { + return stopWithEventualError(err) + } + + configureControllerConfigurationFlags(s) + + s.setState(v1alpha1.StateProcessing) + s.instance.UpdateConditionTrue( + v1alpha1.ConditionTypeConfigured, + v1alpha1.ConditionReasonConfigured, + "Configuration ready", + ) + + return nextState(sFnApplyResources) +} + +func updateControllerConfigurationStatus(r *reconciler, instance *v1alpha1.DockerRegistry) error { + spec := instance.Spec + fields := fieldsToUpdate{ + {spec.HealthzLivenessTimeout, &instance.Status.HealthzLivenessTimeout, "Duration of health check", ""}, + {registry.SecretName, &instance.Status.SecretName, "Name of secret with registry access data", ""}, + } + + updateStatusFields(r.k8s, instance, fields) + return nil +} + +func configureControllerConfigurationFlags(s *systemState) { + s.flagsBuilder. + WithControllerConfiguration( + s.instance.Status.HealthzLivenessTimeout, + ) +} diff --git a/components/operator/internal/state/controller_configuration_test.go b/components/operator/internal/state/controller_configuration_test.go new file mode 100644 index 00000000..431231b2 --- /dev/null +++ b/components/operator/internal/state/controller_configuration_test.go @@ -0,0 +1,110 @@ +package state + +import ( + "context" + "github.com/kyma-project/docker-registry/components/operator/internal/registry" + "testing" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +const ( + healthzLivenessTimeoutTest = "test-healthz-liveness-timeout" +) + +func Test_sFnControllerConfiguration(t *testing.T) { + configurationReadyMsg := "Configuration ready" + + t.Run("update status additional configuration overrides", func(t *testing.T) { + s := &systemState{ + instance: v1alpha1.DockerRegistry{ + Spec: v1alpha1.DockerRegistrySpec{ + HealthzLivenessTimeout: healthzLivenessTimeoutTest, + }, + }, + flagsBuilder: chart.NewFlagsBuilder(), + } + + c := fake.NewClientBuilder().Build() + eventRecorder := record.NewFakeRecorder(10) + r := &reconciler{log: zap.NewNop().Sugar(), k8s: k8s{client: c, EventRecorder: eventRecorder}} + next, result, err := sFnControllerConfiguration(context.TODO(), r, s) + require.Nil(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnApplyResources, next) + + status := s.instance.Status + require.Equal(t, healthzLivenessTimeoutTest, status.HealthzLivenessTimeout) + require.Equal(t, registry.SecretName, status.SecretName) + + require.Equal(t, v1alpha1.StateProcessing, status.State) + requireContainsCondition(t, status, + v1alpha1.ConditionTypeConfigured, + metav1.ConditionTrue, + v1alpha1.ConditionReasonConfigured, + configurationReadyMsg, + ) + + expectedEvents := []string{ + "Normal Configuration Duration of health check set from '' to 'test-healthz-liveness-timeout'", + } + + for _, expectedEvent := range expectedEvents { + require.Equal(t, expectedEvent, <-eventRecorder.Events) + } + }) + + t.Run("reconcile from configurationError", func(t *testing.T) { + s := &systemState{ + instance: v1alpha1.DockerRegistry{ + Status: v1alpha1.DockerRegistryStatus{ + Conditions: []metav1.Condition{ + { + Type: string(v1alpha1.ConditionTypeConfigured), + Status: metav1.ConditionFalse, + Reason: string(v1alpha1.ConditionReasonConfigurationErr), + }, + { + Type: string(v1alpha1.ConditionTypeInstalled), + Status: metav1.ConditionTrue, + Reason: string(v1alpha1.ConditionReasonInstallation), + }, + }, + State: v1alpha1.StateError, + }, + }, + statusSnapshot: v1alpha1.DockerRegistryStatus{}, + flagsBuilder: chart.NewFlagsBuilder(), + } + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "boo", + }, + } + r := &reconciler{ + log: zap.NewNop().Sugar(), + k8s: k8s{ + client: fake.NewClientBuilder().WithObjects(secret).Build(), + EventRecorder: record.NewFakeRecorder(2), + }, + } + + next, result, err := sFnControllerConfiguration(context.Background(), r, s) + require.NoError(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnApplyResources, next) + requireContainsCondition(t, s.instance.Status, + v1alpha1.ConditionTypeConfigured, + metav1.ConditionTrue, + v1alpha1.ConditionReasonConfigured, + configurationReadyMsg) + require.Equal(t, v1alpha1.StateProcessing, s.instance.Status.State) + }) +} diff --git a/components/operator/internal/state/delete.go b/components/operator/internal/state/delete.go new file mode 100644 index 00000000..09332081 --- /dev/null +++ b/components/operator/internal/state/delete.go @@ -0,0 +1,99 @@ +package state + +import ( + "context" + "time" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// delete dockerregistry based on previously installed resources +func sFnDeleteResources(_ context.Context, _ *reconciler, s *systemState) (stateFn, *ctrl.Result, error) { + s.setState(v1alpha1.StateDeleting) + s.instance.UpdateConditionUnknown( + v1alpha1.ConditionTypeDeleted, + v1alpha1.ConditionReasonDeletion, + "Uninstalling", + ) + + return nextState(sFnSafeDeletionState) +} + +func sFnSafeDeletionState(_ context.Context, r *reconciler, s *systemState) (stateFn, *ctrl.Result, error) { + if err := chart.CheckCRDOrphanResources(s.chartConfig); err != nil { + // stop state machine with a warning and requeue reconciliation in 1min + // warning state indicates that user intervention would fix it. It's not reconciliation error. + s.setState(v1alpha1.StateWarning) + s.instance.UpdateConditionFalse( + v1alpha1.ConditionTypeDeleted, + v1alpha1.ConditionReasonDeletionErr, + err, + ) + return stopWithEventualError(err) + } + + return deleteResourcesWithFilter(r, s) +} + +func deleteResourcesWithFilter(r *reconciler, s *systemState, filterFuncs ...chart.FilterFunc) (stateFn, *ctrl.Result, error) { + err, done := chart.UninstallSecrets(s.chartConfig, filterFuncs...) + if err != nil { + return uninstallSecretsError(r, s, err) + } + if !done { + return awaitingSecretsRemoval(s) + } + + if err := chart.Uninstall(s.chartConfig, filterFuncs...); err != nil { + return uninstallResourcesError(r, s, err) + } + + s.setState(v1alpha1.StateDeleting) + s.instance.UpdateConditionTrue( + v1alpha1.ConditionTypeDeleted, + v1alpha1.ConditionReasonDeleted, + "DockerRegistry module deleted", + ) + + // if resources are ready to be deleted, remove finalizer + return nextState(sFnRemoveFinalizer) +} + +func uninstallResourcesError(r *reconciler, s *systemState, err error) (stateFn, *ctrl.Result, error) { + r.log.Warnf("error while uninstalling resource %s: %s", + client.ObjectKeyFromObject(&s.instance), err.Error()) + s.setState(v1alpha1.StateError) + s.instance.UpdateConditionFalse( + v1alpha1.ConditionTypeDeleted, + v1alpha1.ConditionReasonDeletionErr, + err, + ) + return stopWithEventualError(err) +} + +func awaitingSecretsRemoval(s *systemState) (stateFn, *ctrl.Result, error) { + s.setState(v1alpha1.StateDeleting) + s.instance.UpdateConditionTrue( + v1alpha1.ConditionTypeDeleted, + v1alpha1.ConditionReasonDeletion, + "Deleting secrets", + ) + + // wait one sec until ctrl-mngr remove finalizers from secrets + return requeueAfter(time.Second) +} + +func uninstallSecretsError(r *reconciler, s *systemState, err error) (stateFn, *ctrl.Result, error) { + r.log.Warnf("error while uninstalling secrets %s: %s", + client.ObjectKeyFromObject(&s.instance), err.Error()) + s.setState(v1alpha1.StateError) + s.instance.UpdateConditionFalse( + v1alpha1.ConditionTypeDeleted, + v1alpha1.ConditionReasonDeletionErr, + err, + ) + return stopWithEventualError(err) +} diff --git a/components/operator/internal/state/delete_test.go b/components/operator/internal/state/delete_test.go new file mode 100644 index 00000000..8f0fb16d --- /dev/null +++ b/components/operator/internal/state/delete_test.go @@ -0,0 +1,121 @@ +package state + +import ( + "context" + "testing" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +var ( + testDeletingDockerRegistry = func() v1alpha1.DockerRegistry { + dockerRegistry := testInstalledDockerRegistry + dockerRegistry.Status.State = v1alpha1.StateDeleting + dockerRegistry.Status.Conditions = []metav1.Condition{ + { + Type: string(v1alpha1.ConditionTypeDeleted), + Reason: string(v1alpha1.ConditionReasonDeletion), + Status: metav1.ConditionUnknown, + }, + } + return dockerRegistry + }() +) + +func Test_sFnDeleteResources(t *testing.T) { + ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace"}} + + t.Run("update condition", func(t *testing.T) { + s := &systemState{ + instance: v1alpha1.DockerRegistry{}, + } + + next, result, err := sFnDeleteResources(context.Background(), nil, s) + require.Nil(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnSafeDeletionState, next) + + status := s.instance.Status + require.Equal(t, v1alpha1.StateDeleting, status.State) + requireContainsCondition(t, status, + v1alpha1.ConditionTypeDeleted, + metav1.ConditionUnknown, + v1alpha1.ConditionReasonDeletion, + "Uninstalling", + ) + }) + + t.Run("deletion error while checking orphan resources", func(t *testing.T) { + s := &systemState{ + instance: *testDeletingDockerRegistry.DeepCopy(), + chartConfig: &chart.Config{ + Cache: fixManifestCache("\t"), + CacheKey: types.NamespacedName{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, + }, + } + r := &reconciler{ + log: zap.NewNop().Sugar(), + } + + next, result, err := sFnSafeDeletionState(context.TODO(), r, s) + require.EqualError(t, err, "could not parse chart manifest: yaml: found character that cannot start any token") + require.Nil(t, result) + require.Nil(t, next) + + status := s.instance.Status + require.Equal(t, v1alpha1.StateWarning, status.State) + requireContainsCondition(t, status, + v1alpha1.ConditionTypeDeleted, + metav1.ConditionFalse, + v1alpha1.ConditionReasonDeletionErr, + "could not parse chart manifest: yaml: found character that cannot start any token", + ) + }) + + t.Run("deletion", func(t *testing.T) { + s := &systemState{ + instance: *testDeletingDockerRegistry.DeepCopy(), + chartConfig: &chart.Config{ + Cache: fixEmptyManifestCache(), + CacheKey: types.NamespacedName{ + Name: testDeletingDockerRegistry.GetName(), + Namespace: testDeletingDockerRegistry.GetNamespace(), + }, + Cluster: chart.Cluster{ + Client: fake.NewClientBuilder(). + WithScheme(scheme.Scheme). + WithObjects(&ns). + Build(), + }, + }, + } + r := &reconciler{ + log: zap.NewNop().Sugar(), + } + + next, result, err := sFnSafeDeletionState(context.TODO(), r, s) + require.Nil(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnRemoveFinalizer, next) + + status := s.instance.Status + require.Equal(t, v1alpha1.StateDeleting, status.State) + requireContainsCondition(t, status, + v1alpha1.ConditionTypeDeleted, + metav1.ConditionTrue, + v1alpha1.ConditionReasonDeleted, + "DockerRegistry module deleted", + ) + }) +} diff --git a/components/operator/internal/state/emit_event.go b/components/operator/internal/state/emit_event.go new file mode 100644 index 00000000..65a1febb --- /dev/null +++ b/components/operator/internal/state/emit_event.go @@ -0,0 +1,41 @@ +package state + +import ( + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + warningMessagePrefix = "Warning" +) + +func emitEvent(m *reconciler, s *systemState) { + // compare if any condition change + for _, condition := range s.instance.Status.Conditions { + // check if condition exists in memento status + memorizedCondition := meta.FindStatusCondition(s.statusSnapshot.Conditions, condition.Type) + // ignore unchanged conditions + if memorizedCondition != nil && + memorizedCondition.Status == condition.Status && + memorizedCondition.Reason == condition.Reason && + memorizedCondition.Message == condition.Message { + continue + } + m.Event( + &s.instance, + eventType(condition, condition.Message), + condition.Reason, + condition.Message, + ) + } +} + +func eventType(condition metav1.Condition, message string) string { + eventType := "Normal" + if condition.Status == metav1.ConditionFalse || strings.HasPrefix(message, warningMessagePrefix) { + eventType = "Warning" + } + return eventType +} diff --git a/components/operator/internal/state/emit_event_test.go b/components/operator/internal/state/emit_event_test.go new file mode 100644 index 00000000..23b9e3a9 --- /dev/null +++ b/components/operator/internal/state/emit_event_test.go @@ -0,0 +1,94 @@ +package state + +import ( + "testing" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" +) + +var ( + testDockerRegistryConditions1 = v1alpha1.DockerRegistry{ + Status: v1alpha1.DockerRegistryStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionUnknown, + Reason: "test-reason", + Message: "test message 1", + Type: "test-type-1", + }, + { + Status: metav1.ConditionUnknown, + Reason: "test-reason", + Message: "test message 1", + Type: "test-type-2", + }, + }, + }, + } + testDockerRegistryConditions2 = v1alpha1.DockerRegistry{ + Status: v1alpha1.DockerRegistryStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionFalse, + Reason: "test-reason", + Message: "test message 2", + Type: "test-type-1", + }, + { + Status: metav1.ConditionTrue, + Reason: "test-reason", + Message: "test message 2", + Type: "test-type-2", + }, + }, + }, + } +) + +func Test_emitEvent(t *testing.T) { + t.Run("don't emit event", func(t *testing.T) { + eventRecorder := record.NewFakeRecorder(5) + s := &systemState{ + instance: *testDockerRegistryConditions1.DeepCopy(), + statusSnapshot: *testDockerRegistryConditions1.Status.DeepCopy(), + } + r := &reconciler{ + k8s: k8s{ + EventRecorder: eventRecorder, + }, + } + + emitEvent(r, s) + + // check conditions, don't emit event + require.Len(t, eventRecorder.Events, 0) + }) + + t.Run("emit events", func(t *testing.T) { + eventRecorder := record.NewFakeRecorder(5) + s := &systemState{ + instance: *testDockerRegistryConditions2.DeepCopy(), + statusSnapshot: *testDockerRegistryConditions1.Status.DeepCopy(), + } + r := &reconciler{ + k8s: k8s{ + EventRecorder: eventRecorder, + }, + } + + // build emitEventFunc + emitEvent(r, s) + + // check conditions, don't emit event + require.Len(t, eventRecorder.Events, 2) + + expectedEvents := []string{"Warning test-reason test message 2", "Normal test-reason test message 2"} + close(eventRecorder.Events) + for v := range eventRecorder.Events { + require.Contains(t, expectedEvents, v) + } + }) +} diff --git a/components/operator/internal/state/fsm.go b/components/operator/internal/state/fsm.go new file mode 100644 index 00000000..a5b636ba --- /dev/null +++ b/components/operator/internal/state/fsm.go @@ -0,0 +1,143 @@ +package state + +import ( + "context" + "fmt" + "reflect" + "runtime" + "strings" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + "github.com/kyma-project/docker-registry/components/operator/internal/warning" + "go.uber.org/zap" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + defaultResult = ctrl.Result{} + secretCacheKey = types.NamespacedName{ + Name: "dockerregistry-manifest-cache", + Namespace: "kyma-system", + } +) + +type stateFn func(context.Context, *reconciler, *systemState) (stateFn, *ctrl.Result, error) + +type cfg struct { + finalizer string + chartPath string + managerPodUID string +} + +type systemState struct { + instance v1alpha1.DockerRegistry + statusSnapshot v1alpha1.DockerRegistryStatus + chartConfig *chart.Config + warningBuilder *warning.Builder + flagsBuilder chart.FlagsBuilder +} + +func (s *systemState) saveStatusSnapshot() { + result := s.instance.Status.DeepCopy() + if result == nil { + result = &v1alpha1.DockerRegistryStatus{} + } + s.statusSnapshot = *result +} + +func (s *systemState) setState(state v1alpha1.State) { + s.instance.Status.State = state +} + +func (s *systemState) setServed(served v1alpha1.Served) { + s.instance.Status.Served = served +} + +func chartConfig(ctx context.Context, r *reconciler, namespace string) *chart.Config { + return &chart.Config{ + Ctx: ctx, + Log: r.log, + Cache: r.cache, + CacheKey: secretCacheKey, + ManagerUID: r.managerPodUID, + Cluster: chart.Cluster{ + Client: r.client, + Config: r.config, + }, + Release: chart.Release{ + ChartPath: r.chartPath, + Namespace: namespace, + Name: "dockerregistry", + }, + } +} + +type k8s struct { + client client.Client + config *rest.Config + record.EventRecorder +} + +type reconciler struct { + fn stateFn + log *zap.SugaredLogger + cache chart.ManifestCache + result ctrl.Result + k8s + cfg +} + +func (m *reconciler) stateFnName() string { + fullName := runtime.FuncForPC(reflect.ValueOf(m.fn).Pointer()).Name() + splitFullName := strings.Split(fullName, ".") + + if len(splitFullName) < 3 { + return fullName + } + + shortName := splitFullName[2] + return shortName +} + +func (m *reconciler) Reconcile(ctx context.Context, v v1alpha1.DockerRegistry) (ctrl.Result, error) { + state := systemState{ + instance: v, + warningBuilder: warning.NewBuilder(), + flagsBuilder: chart.NewFlagsBuilder(), + chartConfig: chartConfig(ctx, m, v.Namespace), + } + state.saveStatusSnapshot() + var err error + var result *ctrl.Result +loop: + for m.fn != nil && err == nil { + select { + case <-ctx.Done(): + err = ctx.Err() + break loop + + default: + m.log.Info(fmt.Sprintf("switching state: %s", m.stateFnName())) + m.fn, result, err = m.fn(ctx, m, &state) + if updateErr := updateDockerRegistryStatus(ctx, m, &state); updateErr != nil { + err = updateErr + } + } + } + + if result == nil { + result = &defaultResult + } + + m.log. + With("error", err). + With("result", result). + Info("reconciliation done") + + return *result, err +} diff --git a/components/operator/internal/state/fsm_test.go b/components/operator/internal/state/fsm_test.go new file mode 100644 index 00000000..b05f42fe --- /dev/null +++ b/components/operator/internal/state/fsm_test.go @@ -0,0 +1,145 @@ +package state + +import ( + "context" + "reflect" + "testing" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +var ( + testStateFn = func(ctx context.Context, r *reconciler, ss *systemState) (stateFn, *ctrl.Result, error) { + return nil, &testResult, nil + } + + testWrappedStateFn = func(ctx context.Context, r *reconciler, ss *systemState) (stateFn, *ctrl.Result, error) { + return testStateFn, nil, nil + } + + testResult = ctrl.Result{ + Requeue: true, + } + + canceledCtx = func() context.Context { + ctx, done := context.WithCancel(context.Background()) + done() + return ctx + }() +) + +func Test_reconciler_Reconcile(t *testing.T) { + type fields struct { + fn stateFn + log *zap.SugaredLogger + cache chart.ManifestCache + result ctrl.Result + k8s k8s + cfg cfg + } + type args struct { + ctx context.Context + v v1alpha1.DockerRegistry + } + tests := []struct { + name string + fields fields + args args + want ctrl.Result + wantErr bool + }{ + { + name: "empty fn", + fields: fields{ + log: zap.NewNop().Sugar(), + }, + want: defaultResult, + wantErr: false, + }, + { + name: "with ctx done", + fields: fields{ + log: zap.NewNop().Sugar(), + fn: testStateFn, + }, + args: args{ + ctx: canceledCtx, + }, + want: defaultResult, + wantErr: true, + }, + { + name: "with many fns", + fields: fields{ + log: zap.NewNop().Sugar(), + fn: testWrappedStateFn, + }, + args: args{ + ctx: context.Background(), + }, + want: testResult, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &reconciler{ + fn: tt.fields.fn, + log: tt.fields.log, + cache: tt.fields.cache, + result: tt.fields.result, + k8s: tt.fields.k8s, + cfg: tt.fields.cfg, + } + got, err := m.Reconcile(tt.args.ctx, tt.args.v) + if (err != nil) != tt.wantErr { + t.Errorf("reconciler.Reconcile() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("reconciler.Reconcile() = %v, want %v", got, tt.want) + } + }) + } + + t.Run("take status snapshot", func(t *testing.T) { + fn := func(_ context.Context, _ *reconciler, s *systemState) (stateFn, *ctrl.Result, error) { + // check status + require.Equal(t, s.instance.Status, s.statusSnapshot) + return nil, nil, nil + } + r := &reconciler{ + fn: fn, + cfg: cfg{ + finalizer: v1alpha1.Finalizer, + }, + k8s: k8s{ + client: fake.NewClientBuilder().Build(), + }, + log: zap.NewNop().Sugar(), + } + dockerRegistry := v1alpha1.DockerRegistry{ + Status: v1alpha1.DockerRegistryStatus{ + Conditions: []metav1.Condition{ + { + Type: "test-type", + Status: "test-status", + Reason: "test-reason", + Message: "test-message", + ObservedGeneration: 1, + LastTransitionTime: metav1.Now(), + }, + }, + State: v1alpha1.StateError, + }, + } + _, err := r.Reconcile(context.Background(), dockerRegistry) + require.NoError(t, err) + }) +} diff --git a/components/operator/internal/state/initialize.go b/components/operator/internal/state/initialize.go new file mode 100644 index 00000000..afd12a61 --- /dev/null +++ b/components/operator/internal/state/initialize.go @@ -0,0 +1,18 @@ +package state + +import ( + "context" + + ctrl "sigs.k8s.io/controller-runtime" +) + +// choose right scenario to start (installation/deletion) +func sFnInitialize(_ context.Context, _ *reconciler, s *systemState) (stateFn, *ctrl.Result, error) { + // in case instance is being deleted and has finalizer - delete all resources + instanceIsBeingDeleted := !s.instance.GetDeletionTimestamp().IsZero() + if instanceIsBeingDeleted { + return nextState(sFnDeleteResources) + } + + return nextState(sFnRegistryConfiguration) +} diff --git a/components/operator/internal/state/initialize_test.go b/components/operator/internal/state/initialize_test.go new file mode 100644 index 00000000..e6fd0e25 --- /dev/null +++ b/components/operator/internal/state/initialize_test.go @@ -0,0 +1,67 @@ +package state + +import ( + "context" + "testing" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func Test_sFnInitialize(t *testing.T) { + t.Run("setup and return next step sFnRegistryConfiguration", func(t *testing.T) { + r := &reconciler{ + cfg: cfg{ + finalizer: v1alpha1.Finalizer, + }, + k8s: k8s{ + client: fake.NewClientBuilder().Build(), + }, + } + s := &systemState{ + instance: v1alpha1.DockerRegistry{ + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{ + r.cfg.finalizer, + }, + }, + }, + } + + // setup and return buildSFnPrerequisites + next, result, err := sFnInitialize(context.Background(), r, s) + require.Nil(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnRegistryConfiguration, next) + }) + + t.Run("setup and return next step sFnDeleteResources", func(t *testing.T) { + r := &reconciler{ + cfg: cfg{ + finalizer: v1alpha1.Finalizer, + }, + k8s: k8s{ + client: fake.NewClientBuilder().Build(), + }, + } + metaTime := metav1.Now() + s := &systemState{ + instance: v1alpha1.DockerRegistry{ + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{ + r.cfg.finalizer, + }, + DeletionTimestamp: &metaTime, + }, + }, + } + + // setup and return buildSFnDeleteResources + next, result, err := sFnInitialize(context.Background(), r, s) + require.Nil(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnDeleteResources, next) + }) +} diff --git a/components/operator/internal/state/new.go b/components/operator/internal/state/new.go new file mode 100644 index 00000000..54e00edf --- /dev/null +++ b/components/operator/internal/state/new.go @@ -0,0 +1,36 @@ +package state + +import ( + "context" + "os" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + "go.uber.org/zap" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type StateReconciler interface { + Reconcile(ctx context.Context, v v1alpha1.DockerRegistry) (ctrl.Result, error) +} + +func NewMachine(client client.Client, config *rest.Config, recorder record.EventRecorder, log *zap.SugaredLogger, cache chart.ManifestCache, chartPath string) StateReconciler { + return &reconciler{ + fn: sFnServedFilter, + cache: cache, + log: log, + cfg: cfg{ + finalizer: v1alpha1.Finalizer, + chartPath: chartPath, + managerPodUID: os.Getenv("DOCKERREGISTRY_MANAGER_UID"), + }, + k8s: k8s{ + client: client, + config: config, + EventRecorder: recorder, + }, + } +} diff --git a/components/operator/internal/state/registry.go b/components/operator/internal/state/registry.go new file mode 100644 index 00000000..c39d56c6 --- /dev/null +++ b/components/operator/internal/state/registry.go @@ -0,0 +1,66 @@ +package state + +import ( + "context" + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/registry" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" +) + +func sFnRegistryConfiguration(ctx context.Context, r *reconciler, s *systemState) (stateFn, *ctrl.Result, error) { + s.setState(v1alpha1.StateProcessing) + // setup status.dockerRegistry and set possible warnings + err := configureRegistry(ctx, r, s) + if err != nil { + s.setState(v1alpha1.StateError) + s.instance.UpdateConditionFalse( + v1alpha1.ConditionTypeConfigured, + v1alpha1.ConditionReasonConfigurationErr, + err, + ) + return stopWithEventualError(err) + } + + return nextState(sFnControllerConfiguration) +} + +func configureRegistry(ctx context.Context, r *reconciler, s *systemState) error { + err := setInternalRegistryConfig(ctx, r, s) + if err != nil { + return err + } + + return nil +} + +func setInternalRegistryConfig(ctx context.Context, r *reconciler, s *systemState) error { + existingIntRegSecret, err := registry.GetDockerRegistryInternalRegistrySecret(ctx, r.client, s.instance.Namespace) + if err != nil { + return errors.Wrap(err, "while fetching existing internal docker registry secret") + } + if existingIntRegSecret != nil { + r.log.Debugf("reusing existing credentials for internal docker registry to avoiding docker registry rollout") + registryHttpSecretEnvValue, getErr := registry.GetRegistryHTTPSecretEnvValue(ctx, r.client, s.instance.Namespace) + if getErr != nil { + return errors.Wrap(getErr, "while reading env value registryHttpSecret from internal docker registry deployment") + } + s.flagsBuilder. + WithRegistryCredentials( + string(existingIntRegSecret.Data["username"]), + string(existingIntRegSecret.Data["password"]), + ). + WithRegistryHttpSecret( + registryHttpSecretEnvValue, + ) + } + + resolver := registry.NewNodePortResolver(registry.RandomNodePort) + nodePort, err := resolver.ResolveDockerRegistryNodePortFn(ctx, r.client, s.instance.Namespace) + if err != nil { + return errors.Wrap(err, "while resolving registry node port") + } + r.log.Debugf("docker registry node port: %d", nodePort) + s.flagsBuilder.WithNodePort(int64(nodePort)) + return nil +} diff --git a/components/operator/internal/state/registry_test.go b/components/operator/internal/state/registry_test.go new file mode 100644 index 00000000..514dae5f --- /dev/null +++ b/components/operator/internal/state/registry_test.go @@ -0,0 +1,39 @@ +package state + +import ( + "context" + "testing" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func Test_sFnRegistryConfiguration(t *testing.T) { + t.Run("internal registry and update", func(t *testing.T) { + s := &systemState{ + instance: v1alpha1.DockerRegistry{}, + statusSnapshot: v1alpha1.DockerRegistryStatus{}, + flagsBuilder: chart.NewFlagsBuilder(), + } + r := &reconciler{ + k8s: k8s{client: fake.NewClientBuilder().Build()}, + log: zap.NewNop().Sugar(), + } + expectedFlags := map[string]interface{}{ + "global": map[string]interface{}{ + "registryNodePort": int64(32_137), + }, + } + + next, result, err := sFnRegistryConfiguration(context.Background(), r, s) + require.NoError(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnControllerConfiguration, next) + + require.EqualValues(t, expectedFlags, s.flagsBuilder.Build()) + require.Equal(t, v1alpha1.StateProcessing, s.instance.Status.State) + }) +} diff --git a/components/operator/internal/state/remove_finalizer.go b/components/operator/internal/state/remove_finalizer.go new file mode 100644 index 00000000..e5e9c406 --- /dev/null +++ b/components/operator/internal/state/remove_finalizer.go @@ -0,0 +1,17 @@ +package state + +import ( + "context" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +func sFnRemoveFinalizer(ctx context.Context, r *reconciler, s *systemState) (stateFn, *ctrl.Result, error) { + if !controllerutil.RemoveFinalizer(&s.instance, r.finalizer) { + return requeue() + } + + err := updateDockerRegistryWithoutStatus(ctx, r, s) + return stopWithEventualError(err) +} diff --git a/components/operator/internal/state/remove_finalizer_test.go b/components/operator/internal/state/remove_finalizer_test.go new file mode 100644 index 00000000..e2da8273 --- /dev/null +++ b/components/operator/internal/state/remove_finalizer_test.go @@ -0,0 +1,68 @@ +package state + +import ( + "context" + "testing" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func Test_sFnRemoveFinalizer(t *testing.T) { + t.Run("remove finalizer", func(t *testing.T) { + scheme := scheme.Scheme + require.NoError(t, v1alpha1.AddToScheme(scheme)) + instance := v1alpha1.DockerRegistry{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + Finalizers: []string{ + v1alpha1.Finalizer, + }, + }, + } + r := &reconciler{ + cfg: cfg{ + finalizer: v1alpha1.Finalizer, + }, + k8s: k8s{ + client: fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(&instance). + Build(), + }, + } + s := &systemState{ + instance: instance, + } + + // remove finalizer + next, result, err := sFnRemoveFinalizer(context.Background(), r, s) + require.Nil(t, err) + require.Nil(t, result) + require.Nil(t, next) + }) + + t.Run("requeue when is no finalizer", func(t *testing.T) { + r := &reconciler{ + cfg: cfg{ + finalizer: v1alpha1.Finalizer, + }, + } + s := &systemState{ + instance: v1alpha1.DockerRegistry{ + ObjectMeta: metav1.ObjectMeta{}, + }, + } + + // remove finalizer + next, result, err := sFnRemoveFinalizer(context.Background(), r, s) + require.Nil(t, err) + require.Equal(t, &ctrl.Result{Requeue: true}, result) + require.Nil(t, next) + }) +} diff --git a/components/operator/internal/state/served_filter.go b/components/operator/internal/state/served_filter.go new file mode 100644 index 00000000..6b74e7fa --- /dev/null +++ b/components/operator/internal/state/served_filter.go @@ -0,0 +1,50 @@ +package state + +import ( + "context" + "fmt" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + ctrl "sigs.k8s.io/controller-runtime" +) + +func sFnServedFilter(ctx context.Context, r *reconciler, s *systemState) (stateFn, *ctrl.Result, error) { + if s.instance.IsServedEmpty() { + if err := setInitialServed(ctx, r, s); err != nil { + return stopWithEventualError(err) + } + } + + if s.instance.Status.Served == v1alpha1.ServedFalse { + return stop() + } + return nextState(sFnAddFinalizer) +} + +func setInitialServed(ctx context.Context, r *reconciler, s *systemState) error { + servedDockerRegistry, err := GetServedDockerRegistry(ctx, r.k8s.client) + if err != nil { + return err + } + + return setServed(servedDockerRegistry, s) +} + +func setServed(servedDockerRegistry *v1alpha1.DockerRegistry, s *systemState) error { + if servedDockerRegistry == nil { + s.setServed(v1alpha1.ServedTrue) + return nil + } + + s.setServed(v1alpha1.ServedFalse) + s.setState(v1alpha1.StateWarning) + err := fmt.Errorf( + "Only one instance of DockerRegistry is allowed (current served instance: %s/%s). This DockerRegistry CR is redundant. Remove it to fix the problem.", + servedDockerRegistry.GetNamespace(), servedDockerRegistry.GetName()) + s.instance.UpdateConditionFalse( + v1alpha1.ConditionTypeConfigured, + v1alpha1.ConditionReasonDuplicated, + err, + ) + return err +} diff --git a/components/operator/internal/state/served_filter_test.go b/components/operator/internal/state/served_filter_test.go new file mode 100644 index 00000000..7b9585de --- /dev/null +++ b/components/operator/internal/state/served_filter_test.go @@ -0,0 +1,136 @@ +package state + +import ( + "context" + "testing" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apiruntime "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func Test_sFnServedFilter(t *testing.T) { + t.Run("skip processing when served is false", func(t *testing.T) { + s := &systemState{ + instance: v1alpha1.DockerRegistry{ + Status: v1alpha1.DockerRegistryStatus{ + Served: v1alpha1.ServedFalse, + }, + }, + } + + nextFn, result, err := sFnServedFilter(context.TODO(), nil, s) + require.Nil(t, err) + require.Nil(t, result) + require.Nil(t, nextFn) + }) + + t.Run("do next step when served is true", func(t *testing.T) { + s := &systemState{ + instance: v1alpha1.DockerRegistry{ + Status: v1alpha1.DockerRegistryStatus{ + Served: v1alpha1.ServedTrue, + }, + }, + } + + nextFn, result, err := sFnServedFilter(context.TODO(), nil, s) + require.Nil(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnAddFinalizer, nextFn) + }) + + t.Run("set served value from nil to true when there is no served dockerregistry on cluster", func(t *testing.T) { + s := &systemState{ + instance: v1alpha1.DockerRegistry{ + Status: v1alpha1.DockerRegistryStatus{}, + }, + } + + r := &reconciler{ + k8s: k8s{ + client: func() client.Client { + scheme := apiruntime.NewScheme() + require.NoError(t, v1alpha1.AddToScheme(scheme)) + + client := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects( + fixServedDockerRegistry("test-1", "default", ""), + fixServedDockerRegistry("test-2", "dockerregistry-test", v1alpha1.ServedFalse), + fixServedDockerRegistry("test-3", "dockerregistry-test-2", ""), + fixServedDockerRegistry("test-4", "default", v1alpha1.ServedFalse), + ).Build() + + return client + }(), + }, + } + + nextFn, result, err := sFnServedFilter(context.TODO(), r, s) + require.Nil(t, err) + require.Nil(t, result) + requireEqualFunc(t, sFnAddFinalizer, nextFn) + require.Equal(t, v1alpha1.ServedTrue, s.instance.Status.Served) + }) + + t.Run("set served value from nil to false and set condition to error when there is at lease one served dockerregistry on cluster", func(t *testing.T) { + s := &systemState{ + instance: v1alpha1.DockerRegistry{ + Status: v1alpha1.DockerRegistryStatus{}, + }, + } + + r := &reconciler{ + k8s: k8s{ + client: func() client.Client { + scheme := apiruntime.NewScheme() + require.NoError(t, v1alpha1.AddToScheme(scheme)) + + client := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects( + fixServedDockerRegistry("test-1", "default", v1alpha1.ServedFalse), + fixServedDockerRegistry("test-2", "dockerregistry-test", v1alpha1.ServedTrue), + fixServedDockerRegistry("test-3", "dockerregistry-test-2", ""), + fixServedDockerRegistry("test-4", "default", v1alpha1.ServedFalse), + ).Build() + + return client + }(), + }, + } + + nextFn, result, err := sFnServedFilter(context.TODO(), r, s) + + expectedErrorMessage := "Only one instance of DockerRegistry is allowed (current served instance: dockerregistry-test/test-2). This DockerRegistry CR is redundant. Remove it to fix the problem." + require.EqualError(t, err, expectedErrorMessage) + require.Nil(t, result) + require.Nil(t, nextFn) + require.Equal(t, v1alpha1.ServedFalse, s.instance.Status.Served) + + status := s.instance.Status + require.Equal(t, v1alpha1.StateWarning, status.State) + requireContainsCondition(t, status, + v1alpha1.ConditionTypeConfigured, + metav1.ConditionFalse, + v1alpha1.ConditionReasonDuplicated, + expectedErrorMessage, + ) + }) +} + +func fixServedDockerRegistry(name, namespace string, served v1alpha1.Served) *v1alpha1.DockerRegistry { + return &v1alpha1.DockerRegistry{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Status: v1alpha1.DockerRegistryStatus{ + Served: served, + }, + } +} diff --git a/components/operator/internal/state/state.go b/components/operator/internal/state/state.go new file mode 100644 index 00000000..fdbf8a99 --- /dev/null +++ b/components/operator/internal/state/state.go @@ -0,0 +1,65 @@ +package state + +import ( + "time" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" +) + +var requeueResult = &ctrl.Result{ + Requeue: true, +} + +func nextState(next stateFn) (stateFn, *ctrl.Result, error) { + return next, nil, nil +} + +func stopWithEventualError(err error) (stateFn, *ctrl.Result, error) { + return nil, nil, err +} + +func stop() (stateFn, *ctrl.Result, error) { + return nil, nil, nil +} + +func requeue() (stateFn, *ctrl.Result, error) { + return nil, requeueResult, nil +} + +func requeueAfter(duration time.Duration) (stateFn, *ctrl.Result, error) { + return nil, &ctrl.Result{ + RequeueAfter: duration, + }, nil +} + +type fieldsToUpdate []struct { + specField string + statusField *string + fieldName string + defaultValue string +} + +func updateStatusFields(eventRecorder record.EventRecorder, instance *v1alpha1.DockerRegistry, fields fieldsToUpdate) { + for _, field := range fields { + // set default value if spec field is empty + if field.specField == "" { + field.specField = field.defaultValue + } + + if field.specField != *field.statusField { + oldStatusValue := *field.statusField + *field.statusField = field.specField + eventRecorder.Eventf( + instance, + "Normal", + string(v1alpha1.ConditionReasonConfiguration), + "%s set from '%s' to '%s'", + field.fieldName, + oldStatusValue, + field.specField, + ) + } + } +} diff --git a/components/operator/internal/state/state_test.go b/components/operator/internal/state/state_test.go new file mode 100644 index 00000000..b957ac67 --- /dev/null +++ b/components/operator/internal/state/state_test.go @@ -0,0 +1,110 @@ +package state + +import ( + "context" + "reflect" + "runtime" + "strings" + "testing" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +var ( + testInstalledDockerRegistry = v1alpha1.DockerRegistry{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Status: v1alpha1.DockerRegistryStatus{ + Conditions: []metav1.Condition{ + { + Type: string(v1alpha1.ConditionTypeConfigured), + Status: metav1.ConditionTrue, + Reason: string(v1alpha1.ConditionReasonConfiguration), + }, + { + Type: string(v1alpha1.ConditionTypeInstalled), + Status: metav1.ConditionTrue, + Reason: string(v1alpha1.ConditionReasonInstallation), + }, + }, + State: v1alpha1.StateReady, + }, + } +) + +func fixEmptyManifestCache() chart.ManifestCache { + return fixManifestCache("---") +} + +func fixManifestCache(manifest string) chart.ManifestCache { + cache := chart.NewInMemoryManifestCache() + _ = cache.Set(context.Background(), types.NamespacedName{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, chart.DockerRegistrySpecManifest{Manifest: manifest, CustomFlags: map[string]interface{}{}}) + + return cache +} + +func requireEqualFunc(t *testing.T, expected, actual stateFn) { + require.NotNil(t, actual) + + expectedFnName := getFnName(expected) + actualFnName := getFnName(actual) + + if expectedFnName == actualFnName { + // return if functions are simply same + return + } + + expectedElems := strings.Split(expectedFnName, "/") + actualElems := strings.Split(actualFnName, "/") + + // check package paths (prefix) + require.Equal(t, + strings.Join(expectedElems[0:len(expectedElems)-2], "/"), + strings.Join(actualElems[0:len(actualElems)-2], "/"), + ) + + // check direct fn names (suffix) + require.Equal(t, + getDirectFnName(expectedElems[len(expectedElems)-1]), + getDirectFnName(actualElems[len(actualElems)-1]), + ) +} + +func getDirectFnName(nameSuffix string) string { + elements := strings.Split(nameSuffix, ".") + for i := range elements { + elemI := len(elements) - i - 1 + if !strings.HasPrefix(elements[elemI], "func") { + return elements[elemI] + } + } + + return "" +} + +func getFnName(fn stateFn) string { + return runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() +} + +func requireContainsCondition(t *testing.T, status v1alpha1.DockerRegistryStatus, + conditionType v1alpha1.ConditionType, conditionStatus metav1.ConditionStatus, conditionReason v1alpha1.ConditionReason, conditionMessage string) { + hasExpectedCondition := false + for _, condition := range status.Conditions { + if condition.Type == string(conditionType) { + require.Equal(t, string(conditionReason), condition.Reason) + require.Equal(t, conditionStatus, condition.Status) + require.Equal(t, conditionMessage, condition.Message) + hasExpectedCondition = true + } + } + require.True(t, hasExpectedCondition) +} diff --git a/components/operator/internal/state/update_status.go b/components/operator/internal/state/update_status.go new file mode 100644 index 00000000..dffea574 --- /dev/null +++ b/components/operator/internal/state/update_status.go @@ -0,0 +1,25 @@ +package state + +import ( + "context" + "reflect" + "time" +) + +var ( + requeueDuration = time.Second * 3 +) + +func updateDockerRegistryWithoutStatus(ctx context.Context, r *reconciler, s *systemState) error { + return r.client.Update(ctx, &s.instance) +} + +func updateDockerRegistryStatus(ctx context.Context, r *reconciler, s *systemState) error { + if !reflect.DeepEqual(s.instance.Status, s.statusSnapshot) { + err := r.client.Status().Update(ctx, &s.instance) + emitEvent(r, s) + s.saveStatusSnapshot() + return err + } + return nil +} diff --git a/components/operator/internal/state/utils.go b/components/operator/internal/state/utils.go new file mode 100644 index 00000000..ee04ab2e --- /dev/null +++ b/components/operator/internal/state/utils.go @@ -0,0 +1,46 @@ +package state + +import ( + "context" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/pkg/errors" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func GetDockerRegistryOrServed(ctx context.Context, req ctrl.Request, c client.Client) (*v1alpha1.DockerRegistry, error) { + instance := &v1alpha1.DockerRegistry{} + err := c.Get(ctx, req.NamespacedName, instance) + if err == nil { + return instance, nil + } + if !k8serrors.IsNotFound(err) { + return nil, errors.Wrap(err, "while fetching dockerregistry instance") + } + + instance, err = GetServedDockerRegistry(ctx, c) + if err != nil { + return nil, errors.Wrap(err, "while fetching served dockerregistry instance") + } + return instance, nil +} + +func GetServedDockerRegistry(ctx context.Context, c client.Client) (*v1alpha1.DockerRegistry, error) { + var dockerRegistryList v1alpha1.DockerRegistryList + + err := c.List(ctx, &dockerRegistryList) + + if err != nil { + return nil, err + } + + for _, item := range dockerRegistryList.Items { + if !item.IsServedEmpty() && item.Status.Served == v1alpha1.ServedTrue { + return &item, nil + } + } + + return nil, nil +} diff --git a/components/operator/internal/state/verify.go b/components/operator/internal/state/verify.go new file mode 100644 index 00000000..d326657e --- /dev/null +++ b/components/operator/internal/state/verify.go @@ -0,0 +1,49 @@ +package state + +import ( + "context" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// verify if all workloads are in ready state +func sFnVerifyResources(_ context.Context, r *reconciler, s *systemState) (stateFn, *ctrl.Result, error) { + ready, err := chart.Verify(s.chartConfig) + if err != nil { + r.log.Warnf("error while verifying resource %s: %s", + client.ObjectKeyFromObject(&s.instance), err.Error()) + s.setState(v1alpha1.StateError) + s.instance.UpdateConditionFalse( + v1alpha1.ConditionTypeInstalled, + v1alpha1.ConditionReasonInstallationErr, + err, + ) + return stopWithEventualError(err) + } + + if !ready { + return requeueAfter(requeueDuration) + } + + warning := s.warningBuilder.Build() + if warning != "" { + s.setState(v1alpha1.StateWarning) + s.instance.UpdateConditionTrue( + v1alpha1.ConditionTypeInstalled, + v1alpha1.ConditionReasonInstalled, + warning, + ) + return stop() + } + + s.setState(v1alpha1.StateReady) + s.instance.UpdateConditionTrue( + v1alpha1.ConditionTypeInstalled, + v1alpha1.ConditionReasonInstalled, + "DockerRegistry installed", + ) + return stop() +} diff --git a/components/operator/internal/state/verify_test.go b/components/operator/internal/state/verify_test.go new file mode 100644 index 00000000..f7782896 --- /dev/null +++ b/components/operator/internal/state/verify_test.go @@ -0,0 +1,178 @@ +package state + +import ( + "context" + "testing" + + "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/internal/chart" + "github.com/kyma-project/docker-registry/components/operator/internal/warning" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +var ( + testDeployCR = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deploy", + Namespace: "default", + }, + Status: appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{ + { + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionUnknown, + }, + }, + }, + } +) + +const ( + testDeployManifest = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-deploy + namespace: default +` +) + +func Test_sFnVerifyResources(t *testing.T) { + t.Run("ready", func(t *testing.T) { + s := &systemState{ + warningBuilder: warning.NewBuilder(), + instance: *testInstalledDockerRegistry.DeepCopy(), + chartConfig: &chart.Config{ + Cache: fixEmptyManifestCache(), + CacheKey: types.NamespacedName{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, + }, + } + r := &reconciler{ + log: zap.NewNop().Sugar(), + k8s: k8s{ + client: fake.NewClientBuilder().Build(), + }, + } + + // verify and return update condition state + next, result, err := sFnVerifyResources(context.Background(), r, s) + require.Nil(t, err) + require.Nil(t, result) + require.Nil(t, next) + + status := s.instance.Status + require.Equal(t, v1alpha1.StateReady, status.State) + require.Len(t, status.Conditions, 2) + requireContainsCondition(t, status, + v1alpha1.ConditionTypeInstalled, + metav1.ConditionTrue, + v1alpha1.ConditionReasonInstalled, + "DockerRegistry installed", + ) + }) + + t.Run("warning", func(t *testing.T) { + s := &systemState{ + warningBuilder: warning.NewBuilder().With("test warning"), + instance: *testInstalledDockerRegistry.DeepCopy(), + chartConfig: &chart.Config{ + Cache: fixEmptyManifestCache(), + CacheKey: types.NamespacedName{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, + }, + } + r := &reconciler{ + log: zap.NewNop().Sugar(), + } + + // verify and return update condition state + next, result, err := sFnVerifyResources(context.Background(), r, s) + require.Nil(t, err) + require.Nil(t, result) + require.Nil(t, next) + + status := s.instance.Status + require.Equal(t, v1alpha1.StateWarning, status.State) + requireContainsCondition(t, status, + v1alpha1.ConditionTypeInstalled, + metav1.ConditionTrue, + v1alpha1.ConditionReasonInstalled, + s.warningBuilder.Build(), + ) + }) + + t.Run("verify error", func(t *testing.T) { + s := &systemState{ + instance: *testInstalledDockerRegistry.DeepCopy(), + chartConfig: &chart.Config{ + Cache: fixManifestCache("\t"), + CacheKey: types.NamespacedName{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, + }, + } + r := &reconciler{ + log: zap.NewNop().Sugar(), + } + + // handle verify err and update condition with err + next, result, err := sFnVerifyResources(context.Background(), r, s) + require.EqualError(t, err, "could not parse chart manifest: yaml: found character that cannot start any token") + require.Nil(t, result) + require.Nil(t, next) + + status := s.instance.Status + require.Equal(t, v1alpha1.StateError, status.State) + requireContainsCondition(t, status, + v1alpha1.ConditionTypeInstalled, + metav1.ConditionFalse, + v1alpha1.ConditionReasonInstallationErr, + "could not parse chart manifest: yaml: found character that cannot start any token", + ) + }) + + t.Run("requeue when resources are not ready", func(t *testing.T) { + client := fake.NewClientBuilder().WithObjects(testDeployCR).Build() + s := &systemState{ + instance: *testInstalledDockerRegistry.DeepCopy(), + chartConfig: &chart.Config{ + Cache: func() chart.ManifestCache { + cache := chart.NewInMemoryManifestCache() + _ = cache.Set(context.Background(), types.NamespacedName{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, chart.DockerRegistrySpecManifest{Manifest: testDeployManifest}) + return cache + }(), + CacheKey: types.NamespacedName{ + Name: testInstalledDockerRegistry.GetName(), + Namespace: testInstalledDockerRegistry.GetNamespace(), + }, + Cluster: chart.Cluster{ + Client: client, + }, + }, + } + r := &reconciler{} + + // return requeue on verification failed + next, result, err := sFnVerifyResources(context.Background(), r, s) + + _, expectedResult, _ := requeueAfter(requeueDuration) + require.NoError(t, err) + require.Equal(t, expectedResult, result) + require.Nil(t, next) + }) +} diff --git a/components/operator/internal/tracing/watcher.go b/components/operator/internal/tracing/watcher.go new file mode 100644 index 00000000..15397c69 --- /dev/null +++ b/components/operator/internal/tracing/watcher.go @@ -0,0 +1,57 @@ +package tracing + +import ( + "context" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +const ( + tracingOTLPService = "telemetry-otlp-traces" +) + +type eventHandler struct{} + +func (e eventHandler) Create(_ context.Context, event event.CreateEvent, q workqueue.RateLimitingInterface) { + if event.Object == nil { + return + } + svcName := event.Object.GetName() + if svcName != tracingOTLPService { + return + } + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: event.Object.GetName(), + Namespace: event.Object.GetNamespace(), + }}) +} + +func (e eventHandler) Update(_ context.Context, _ event.UpdateEvent, _ workqueue.RateLimitingInterface) { +} + +func (e eventHandler) Delete(_ context.Context, event event.DeleteEvent, q workqueue.RateLimitingInterface) { + if event.Object == nil { + return + } + svcName := event.Object.GetName() + if svcName != tracingOTLPService { + return + } + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: event.Object.GetName(), + Namespace: event.Object.GetNamespace(), + }}) +} + +func (e eventHandler) Generic(_ context.Context, _ event.GenericEvent, _ workqueue.RateLimitingInterface) { +} + +var _ handler.EventHandler = eventHandler{} + +func ServiceCollectorWatcher() handler.EventHandler { + return &eventHandler{} +} diff --git a/components/operator/internal/warning/warning.go b/components/operator/internal/warning/warning.go new file mode 100644 index 00000000..44a32f76 --- /dev/null +++ b/components/operator/internal/warning/warning.go @@ -0,0 +1,27 @@ +package warning + +import ( + "fmt" + "strings" +) + +type Builder struct { + warnings []string +} + +func NewBuilder() *Builder { + return &Builder{} +} + +func (w *Builder) With(warning string) *Builder { + w.warnings = append(w.warnings, warning) + return w +} + +func (w *Builder) Build() string { + msg := "" + if len(w.warnings) > 0 { + msg = fmt.Sprintf("Warning: %s", strings.Join(w.warnings, "; ")) + } + return msg +} diff --git a/components/operator/internal/warning/warning_test.go b/components/operator/internal/warning/warning_test.go new file mode 100644 index 00000000..c34d0bfa --- /dev/null +++ b/components/operator/internal/warning/warning_test.go @@ -0,0 +1,22 @@ +package warning + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBuilder_Build(t *testing.T) { + t.Run("build multiple warnings", func(t *testing.T) { + warning := NewBuilder(). + With("warn 1"). + With("warn 2"). + Build() + + require.Equal(t, "Warning: warn 1; warn 2", warning) + }) + t.Run("build empty warning", func(t *testing.T) { + warning := NewBuilder().Build() + require.Equal(t, "", warning) + }) +} diff --git a/components/operator/main.go b/components/operator/main.go new file mode 100644 index 00000000..67b8d80f --- /dev/null +++ b/components/operator/main.go @@ -0,0 +1,218 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "flag" + "github.com/kyma-project/docker-registry/components/operator/internal/registry" + "os" + "time" + + "github.com/kyma-project/docker-registry/components/operator/internal/config" + "github.com/kyma-project/docker-registry/components/operator/internal/gitrepository" + "github.com/pkg/errors" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + uberzap "go.uber.org/zap" + uberzapcore "go.uber.org/zap/zapcore" + _ "k8s.io/client-go/plugin/pkg/client/auth" + + k8s "github.com/kyma-project/docker-registry/components/operator/internal/controllers/kubernetes" + internalresource "github.com/kyma-project/docker-registry/components/operator/internal/resource" + corev1 "k8s.io/api/core/v1" + apiextensionsscheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + ctrlcache "sigs.k8s.io/controller-runtime/pkg/cache" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics/server" + + operatorv1alpha1 "github.com/kyma-project/docker-registry/components/operator/api/v1alpha1" + "github.com/kyma-project/docker-registry/components/operator/controllers" + //+kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") + syncPeriod = time.Minute * 30 + cleanupTimeout = time.Second * 10 +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + utilruntime.Must(operatorv1alpha1.AddToScheme(scheme)) + + utilruntime.Must(apiextensionsscheme.AddToScheme(scheme)) + + //+kubebuilder:scaffold:scheme +} + +func main() { + var metricsAddr string + var probeAddr string + flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + + opts := zap.Options{ + Development: true, + TimeEncoder: uberzapcore.TimeEncoderOfLayout("Jan 02 15:04:05.000000000"), + } + opts.BindFlags(flag.CommandLine) + flag.Parse() + + cfg, err := config.GetConfig("") + if err != nil { + setupLog.Error(err, "while getting config") + os.Exit(1) + } + + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) + + ctx, cancel := context.WithTimeout(context.Background(), cleanupTimeout) + defer cancel() + + setupLog.Info("cleaning orphan deprecated resources") + err = cleanupOrphanDeprecatedResources(ctx) + if err != nil { + setupLog.Error(err, "while removing orphan resources") + os.Exit(1) + } + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + Metrics: ctrlmetrics.Options{ + BindAddress: metricsAddr, + }, + HealthProbeBindAddress: probeAddr, + Cache: ctrlcache.Options{ + SyncPeriod: &syncPeriod, + }, + Client: ctrlclient.Options{ + Cache: &ctrlclient.CacheOptions{ + DisableFor: []ctrlclient.Object{ + &corev1.Secret{}, + &corev1.ConfigMap{}, + }, + }, + }, + // TODO: use our own logger - now eventing use logger with different message format + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + config := uberzap.NewDevelopmentConfig() + config.EncoderConfig.TimeKey = "timestamp" + config.EncoderConfig.EncodeTime = opts.TimeEncoder + config.DisableCaller = true + + reconcilerLogger, err := config.Build() + if err != nil { + setupLog.Error(err, "unable to setup logger") + os.Exit(1) + } + + reconciler := controllers.NewDockerRegistryReconciler( + mgr.GetClient(), mgr.GetConfig(), + mgr.GetEventRecorderFor("dockerregistry-operator"), + reconcilerLogger.Sugar(), + cfg.ChartPath) + + //TODO: get it from some configuration + configKubernetes := k8s.Config{ + BaseNamespace: "kyma-system", + BaseDefaultSecretName: registry.SecretName, + ExcludedNamespaces: []string{"kyma-system"}, + ConfigMapRequeueDuration: time.Minute, + SecretRequeueDuration: time.Minute, + ServiceAccountRequeueDuration: time.Minute, + } + + resourceClient := internalresource.New(mgr.GetClient(), scheme) + secretSvc := k8s.NewSecretService(resourceClient, configKubernetes) + configMapSvc := k8s.NewConfigMapService(resourceClient, configKubernetes) + serviceAccountSvc := k8s.NewServiceAccountService(resourceClient, configKubernetes) + + if err = reconciler.SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "DockerRegistry") + os.Exit(1) + } + + namespaceLogger, err := config.Build() + if err != nil { + setupLog.Error(err, "unable to setup logger") + os.Exit(1) + } + + if err := k8s.NewNamespace(mgr.GetClient(), namespaceLogger.Sugar(), configKubernetes, configMapSvc, secretSvc, serviceAccountSvc). + SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create Namespace controller") + os.Exit(1) + } + + secretLogger, err := config.Build() + if err != nil { + setupLog.Error(err, "unable to setup logger") + os.Exit(1) + } + + if err := k8s.NewSecret(mgr.GetClient(), secretLogger.Sugar(), configKubernetes, secretSvc). + SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create Secret controller") + os.Exit(1) + } + //+kubebuilder:scaffold:builder + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up ready check") + os.Exit(1) + } + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} + +func cleanupOrphanDeprecatedResources(ctx context.Context) error { + // We are going to talk to the API server _before_ we start the manager. + // Since the default manager client reads from cache, we will get an error. + // So, we create a "serverClient" that would read from the API directly. + // We only use it here, this only runs at start up, so it shouldn't be to much for the API + serverClient, err := ctrlclient.New(ctrl.GetConfigOrDie(), ctrlclient.Options{ + Scheme: scheme, + }) + if err != nil { + return errors.Wrap(err, "failed to create a server client") + } + + return gitrepository.Cleanup(ctx, serverClient) +} diff --git a/config.yaml b/config.yaml new file mode 100644 index 00000000..423459f8 --- /dev/null +++ b/config.yaml @@ -0,0 +1,7 @@ +# Samples Config +configs: +# TODO: Add optional manifest installation chart flags and value overrides +# The format below should be followed +# - name: nginx-ingress +# clientConfig: "CreateNamespace=true,Namespace=jakobs-new" +# overrides: "x=4" \ No newline at end of file diff --git a/config/docker-registry/.helmignore b/config/docker-registry/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/config/docker-registry/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/config/docker-registry/Chart.yaml b/config/docker-registry/Chart.yaml new file mode 100644 index 00000000..8988fbf1 --- /dev/null +++ b/config/docker-registry/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +description: Kyma component 'docker registry' +name: docker-registry +version: 1.0.0 +home: https://kyma-project.io +icon: https://github.com/kyma-project/kyma/blob/main/logo.png?raw=true +dependencies: + - name: docker-registry \ No newline at end of file diff --git a/config/docker-registry/README.md b/config/docker-registry/README.md new file mode 100644 index 00000000..01d334e7 --- /dev/null +++ b/config/docker-registry/README.md @@ -0,0 +1,15 @@ +# Function Controller + +## Overview + +This project contains the chart for the Function Controller. + +> **NOTE**: This feature is experimental. + +## Prerequisites + +- Kubernetes cluster (v1.16.3) + +## Details + +To learn how Serverless is used in Kyma, see its [official documentation](https://kyma-project.io/#/serverless-manager/user/README.md). diff --git a/config/docker-registry/charts/docker-registry/.helmignore b/config/docker-registry/charts/docker-registry/.helmignore new file mode 100644 index 00000000..f0c13194 --- /dev/null +++ b/config/docker-registry/charts/docker-registry/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/config/docker-registry/charts/docker-registry/Chart.yaml b/config/docker-registry/charts/docker-registry/Chart.yaml new file mode 100644 index 00000000..c8e41309 --- /dev/null +++ b/config/docker-registry/charts/docker-registry/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +description: A Helm chart for Docker Registry +name: docker-registry +version: 1.9.1 +appVersion: 2.7.1 +home: https://hub.docker.com/_/registry/ +icon: https://hub.docker.com/public/images/logos/mini-logo.svg +sources: + - https://github.com/docker/distribution-library-image +maintainers: + - name: jpds + email: jpds@protonmail.com + - name: rendhalver + email: pete.brown@powerhrg.com diff --git a/config/docker-registry/charts/docker-registry/README.md b/config/docker-registry/charts/docker-registry/README.md new file mode 100644 index 00000000..b9ff4612 --- /dev/null +++ b/config/docker-registry/charts/docker-registry/README.md @@ -0,0 +1,79 @@ +# Docker Registry Helm Chart + +This directory contains a Kubernetes chart to deploy a private Docker Registry. + +## Prerequisites + +* Persistence Volume (PV) support on underlying infrastructure (if persistence is required) + +## Chart Details + +This chart implements the Docker Registry deployment. + +## Installing the Chart + +To install the chart, use the following command: + +```bash +helm install stable/docker-registry +``` + +## Configuration + +The following table lists the configurable parameters of the `docker-registry` chart and +their default values. + +| Parameter | Description | Default | +|:----------------------------|:-------------------------------------------------------------------------------------------|:----------------| +| `image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `image.repository` | Container image to use | `registry` | +| `image.tag` | Container image tag to deploy | `2.7.1` | +| `persistence.accessMode` | Access mode to use for PVC | `ReadWriteOnce` | +| `persistence.enabled` | Whether to use a PVC for the Docker storage | `false` | +| `persistence.deleteEnabled` | Enable the deletion of image blobs and manifests by digest | `nil` | +| `persistence.size` | Amount of space to claim for PVC | `10Gi` | +| `persistence.storageClass` | Storage Class to use for PVC | `-` | +| `persistence.existingClaim` | Name of an existing PVC to use for config | `nil` | +| `service.port` | TCP port on which the service is exposed | `5000` | +| `service.type` | Service type | `ClusterIP` | +| `service.clusterIP` | If `service.type` is `ClusterIP` and this is non-empty, sets the cluster IP of the service | `nil` | +| `service.nodePort` | If `service.type` is `NodePort` and this is non-empty, sets the node port of the service | `nil` | +| `replicaCount` | Kubernetes replicas | `1` | +| `updateStrategy` | update strategy for deployment | `{}` | +| `podAnnotations` | Annotations for Pod | `{}` | +| `podLabels` | Labels for Pod | `{}` | +| `podDisruptionBudget` | Pod disruption budget | `{}` | +| `resources.limits.cpu` | Container requested CPU | `nil` | +| `resources.limits.memory` | Container requested memory | `nil` | +| `storage` | Storage system to use | `filesystem` | +| `tlsSecretName` | Name of Secret for TLS certs | `nil` | +| `secrets.htpasswd` | Htpasswd authentication | `nil` | +| `secrets.s3.accessKey` | Access Key for S3 configuration | `nil` | +| `secrets.s3.secretKey` | Secret Key for S3 configuration | `nil` | +| `secrets.swift.username` | Username for Swift configuration | `nil` | +| `secrets.swift.password` | Password for Swift configuration | `nil` | +| `haSharedSecret` | Shared Secret for Registry | `nil` | +| `configData` | Configuration hash for Docker | `nil` | +| `s3.region` | S3 region | `nil` | +| `s3.regionEndpoint` | S3 region endpoint | `nil` | +| `s3.bucket` | S3 bucket name | `nil` | +| `s3.encrypt` | Store images in encrypted format | `nil` | +| `s3.secure` | Use HTTPS | `nil` | +| `swift.authurl` | Swift authurl | `nil` | +| `swift.container` | Swift container | `nil` | +| `nodeSelector` | node labels for Pod assignment | `{}` | +| `tolerations` | Pod tolerations | `[]` | +| `ingress.enabled` | If true, Ingress will be created | `false` | +| `ingress.annotations` | Ingress annotations | `{}` | +| `ingress.labels` | Ingress labels | `{}` | +| `ingress.path` | Ingress service path | `/` | +| `ingress.hosts` | Ingress hostnames | `[]` | +| `ingress.tls` | Ingress TLS configuration (YAML) | `[]` | +| `extraVolumeMounts` | Additional volumeMounts to the registry container | `[]` | +| `extraVolumes` | Additional volumes to the pod | `[]` | + +Specify each parameter using the `--set key=value[,key=value]` argument with +`helm install`. + +To generate htpasswd file, run this Docker command: +`docker run --entrypoint htpasswd registry:2 -Bbn user password > ./htpasswd`. diff --git a/config/docker-registry/charts/docker-registry/templates/_helpers.tpl b/config/docker-registry/charts/docker-registry/templates/_helpers.tpl new file mode 100644 index 00000000..a91077ef --- /dev/null +++ b/config/docker-registry/charts/docker-registry/templates/_helpers.tpl @@ -0,0 +1,24 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "docker-registry.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "docker-registry.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/config/docker-registry/charts/docker-registry/templates/configmap.yaml b/config/docker-registry/charts/docker-registry/templates/configmap.yaml new file mode 100644 index 00000000..08ee9011 --- /dev/null +++ b/config/docker-registry/charts/docker-registry/templates/configmap.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "docker-registry.fullname" . }}-config + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "docker-registry.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + config.yml: |- +{{ toYaml .Values.configData | indent 4 }} diff --git a/config/docker-registry/charts/docker-registry/templates/deployment.yaml b/config/docker-registry/charts/docker-registry/templates/deployment.yaml new file mode 100644 index 00000000..338ca576 --- /dev/null +++ b/config/docker-registry/charts/docker-registry/templates/deployment.yaml @@ -0,0 +1,231 @@ +{{- $rollme := include "tplValue" ( dict "value" .Values.rollme "context" . ) -}} +{{- $registryHTTPSecret := include "tplValue" ( dict "value" .Values.registryHTTPSecret "context" . ) -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "docker-registry.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "docker-registry.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} +spec: + selector: + matchLabels: + app: {{ template "docker-registry.name" . }} + release: {{ .Release.Name }} + replicas: {{ .Values.replicaCount }} + strategy: + type: Recreate + rollingUpdate: null + minReadySeconds: 5 + template: + metadata: + labels: + app: {{ template "docker-registry.name" . }} + release: {{ .Release.Name }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + annotations: + rollme: {{ $rollme | quote }} +{{- if $.Values.podAnnotations }} +{{ toYaml $.Values.podAnnotations | indent 8 }} +{{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + priorityClassName: "{{ .Values.global.dockerregistryPriorityClassName }}" +{{- if .Values.pod.securityContext }} + securityContext: + {{- include "tplValue" ( dict "value" .Values.pod.securityContext "context" . ) | nindent 12 }} +{{- end }} + hostNetwork: false # Optional. The default is false if the entry is not there. + hostPID: false # Optional. The default is false if the entry is not there. + hostIPC: false # Optional. The default is false if the entry is not there. + initContainers: + - name: generate-htpasswd + image: "{{ include "imageurl" (dict "reg" .Values.global.containerRegistry "img" .Values.global.images.registry) }}" +{{- if .Values.initContainers.securityContext }} + securityContext: + {{- include "tplValue" ( dict "value" .Values.initContainers.securityContext "context" . ) | nindent 12 }} +{{- end }} + volumeMounts: + {{- if eq .Values.storage "filesystem" }} + - name: data + mountPath: /var/lib/registry/ + {{- end }} + - name: registry-credentials + mountPath: /regcred + readOnly: true + {{- with .Values.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + command: + - sh + - -ec + - | + htpasswd -Bbn $(cat /regcred/username.txt) $(cat /regcred/password.txt) > ./data/htpasswd + echo "Generated htpasswd file for docker-registry..." +{{- if eq .Values.storage "filesystem" }} + chown -R 1000:1000 "/var/lib/registry/" +{{- end }} + + containers: + - name: {{ .Chart.Name }} + image: "{{ include "imageurl" (dict "reg" .Values.global.containerRegistry "img" .Values.global.images.registry) }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} +{{- if .Values.containers.securityContext }} + securityContext: + {{- include "tplValue" ( dict "value" .Values.containers.securityContext "context" . ) | nindent 12 }} +{{- end }} + command: + - /bin/registry + - serve + - /etc/docker/registry/config.yml + ports: + - containerPort: 5000 + livenessProbe: + httpGet: +{{- if .Values.tlsSecretName }} + scheme: HTTPS +{{- end }} + path: / + port: 5000 + readinessProbe: + httpGet: +{{- if .Values.tlsSecretName }} + scheme: HTTPS +{{- end }} + path: / + port: 5000 + resources: +{{ toYaml .Values.resources | indent 12 }} + env: + - name: REGISTRY_AUTH + value: "htpasswd" + - name: REGISTRY_AUTH_HTPASSWD_REALM + value: "Registry Realm" + - name: REGISTRY_AUTH_HTPASSWD_PATH + value: "/data/htpasswd" + - name: REGISTRY_HTTP_SECRET + # https://docs.docker.com/registry/configuration/#http, there's no problem that it is plainly seen + # using kubectl describe + value: {{ $registryHTTPSecret | quote }} +{{- if .Values.tlsSecretName }} + - name: REGISTRY_HTTP_TLS_CERTIFICATE + value: /etc/ssl/docker/tls.crt + - name: REGISTRY_HTTP_TLS_KEY + value: /etc/ssl/docker/tls.key +{{- end }} +{{- if eq .Values.storage "filesystem" }} + - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY + value: "/var/lib/registry" +{{- else if eq .Values.storage "azure" }} + - name: REGISTRY_STORAGE_AZURE_ACCOUNTNAME + valueFrom: + secretKeyRef: + name: {{ template "docker-registry.fullname" . }}-secret + key: azureAccountName + - name: REGISTRY_STORAGE_AZURE_ACCOUNTKEY + valueFrom: + secretKeyRef: + name: {{ template "docker-registry.fullname" . }}-secret + key: azureAccountKey + - name: REGISTRY_STORAGE_AZURE_CONTAINER + valueFrom: + secretKeyRef: + name: {{ template "docker-registry.fullname" . }}-secret + key: azureContainer +{{- else if eq .Values.storage "s3" }} + {{- if and .Values.secrets.s3.secretKey .Values.secrets.s3.accessKey }} + - name: REGISTRY_STORAGE_S3_ACCESSKEY + valueFrom: + secretKeyRef: + name: {{ template "docker-registry.fullname" . }}-secret + key: s3AccessKey + - name: REGISTRY_STORAGE_S3_SECRETKEY + valueFrom: + secretKeyRef: + name: {{ template "docker-registry.fullname" . }}-secret + key: s3SecretKey + {{- end }} + - name: REGISTRY_STORAGE_S3_REGION + value: {{ required ".Values.s3.region is required" .Values.s3.region }} + {{- if .Values.s3.regionEndpoint }} + - name: REGISTRY_STORAGE_S3_REGIONENDPOINT + value: {{ .Values.s3.regionEndpoint }} + {{- end }} + - name: REGISTRY_STORAGE_S3_BUCKET + value: {{ required ".Values.s3.bucket is required" .Values.s3.bucket }} + {{- if .Values.s3.encrypt }} + - name: REGISTRY_STORAGE_S3_ENCRYPT + value: {{ .Values.s3.encrypt | quote }} + {{- end }} + {{- if .Values.s3.secure }} + - name: REGISTRY_STORAGE_S3_SECURE + value: {{ .Values.s3.secure | quote }} + {{- end }} +{{- else if eq .Values.storage "swift" }} + - name: REGISTRY_STORAGE_SWIFT_AUTHURL + value: {{ required ".Values.swift.authurl is required" .Values.swift.authurl }} + - name: REGISTRY_STORAGE_SWIFT_USERNAME + valueFrom: + secretKeyRef: + name: {{ template "docker-registry.fullname" . }}-secret + key: swiftUsername + - name: REGISTRY_STORAGE_SWIFT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "docker-registry.fullname" . }}-secret + key: swiftPassword + - name: REGISTRY_STORAGE_SWIFT_CONTAINER + value: {{ required ".Values.swift.container is required" .Values.swift.container }} +{{- end }} + volumeMounts: +{{- if eq .Values.storage "filesystem" }} + - name: data + mountPath: /var/lib/registry/ +{{- end }} + - name: "{{ template "docker-registry.fullname" . }}-config" + mountPath: "/etc/docker/registry" +{{- if .Values.tlsSecretName }} + - mountPath: /etc/ssl/docker + name: tls-cert + readOnly: true +{{- end }} +{{- with .Values.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} +{{- end }} + +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} +{{- end }} +{{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} +{{- end }} + volumes: +{{- if eq .Values.storage "filesystem" }} + - name: data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template "docker-registry.fullname" . }}{{- end }} + {{- else }} + emptyDir: {} + {{- end -}} +{{- end }} + - name: {{ template "docker-registry.fullname" . }}-config + configMap: + name: {{ template "docker-registry.fullname" . }}-config +{{- if .Values.tlsSecretName }} + - name: tls-cert + secret: + secretName: {{ .Values.tlsSecretName }} +{{- end }} +{{- with .Values.extraVolumes }} + {{- toYaml . | nindent 8 }} +{{- end }} diff --git a/config/docker-registry/charts/docker-registry/templates/ingress.yaml b/config/docker-registry/charts/docker-registry/templates/ingress.yaml new file mode 100644 index 00000000..3060a29f --- /dev/null +++ b/config/docker-registry/charts/docker-registry/templates/ingress.yaml @@ -0,0 +1,37 @@ +{{- if .Values.ingress.enabled -}} +{{- $serviceName := include "docker-registry.fullname" . -}} +{{- $servicePort := .Values.service.port -}} +{{- $path := .Values.ingress.path -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ template "docker-registry.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "docker-registry.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.ingress.labels }} +{{ toYaml .Values.ingress.labels | indent 4 }} +{{- end }} + annotations: + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + {{- range $host := .Values.ingress.hosts }} + - host: {{ $host }} + http: + paths: + - path: {{ $path }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} diff --git a/config/docker-registry/charts/docker-registry/templates/poddisruptionbudget.yaml b/config/docker-registry/charts/docker-registry/templates/poddisruptionbudget.yaml new file mode 100644 index 00000000..fa5f1a7c --- /dev/null +++ b/config/docker-registry/charts/docker-registry/templates/poddisruptionbudget.yaml @@ -0,0 +1,18 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "docker-registry.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "docker-registry.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "docker-registry.name" . }} + release: {{ .Release.Name }} +{{ toYaml .Values.podDisruptionBudget | indent 2 }} +{{- end -}} diff --git a/config/docker-registry/charts/docker-registry/templates/priorityclass.yaml b/config/docker-registry/charts/docker-registry/templates/priorityclass.yaml new file mode 100644 index 00000000..4717d7d2 --- /dev/null +++ b/config/docker-registry/charts/docker-registry/templates/priorityclass.yaml @@ -0,0 +1,7 @@ +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: {{ .Values.global.dockerregistryPriorityClassName }} +value: {{ .Values.global.dockerregistryPriorityClassValue }} +globalDefault: false +description: "Scheduling priority of dockerregistry components. By default, dockerregistry components should not be blocked by unschedulable user workloads." \ No newline at end of file diff --git a/config/docker-registry/charts/docker-registry/templates/pvc.yaml b/config/docker-registry/charts/docker-registry/templates/pvc.yaml new file mode 100644 index 00000000..96da5061 --- /dev/null +++ b/config/docker-registry/charts/docker-registry/templates/pvc.yaml @@ -0,0 +1,27 @@ +{{- if .Values.persistence.enabled }} +{{- if not .Values.persistence.existingClaim -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "docker-registry.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "docker-registry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- if .Values.persistence.storageClass }} +{{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/config/docker-registry/charts/docker-registry/templates/secret.yaml b/config/docker-registry/charts/docker-registry/templates/secret.yaml new file mode 100644 index 00000000..c5e04ab3 --- /dev/null +++ b/config/docker-registry/charts/docker-registry/templates/secret.yaml @@ -0,0 +1,31 @@ +{{- if or (eq .Values.storage "azure") (eq .Values.storage "s3") (eq .Values.storage "swift") }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "docker-registry.fullname" . }}-secret + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "docker-registry.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +data: + {{- if eq .Values.storage "azure" }} + {{- if and .Values.secrets.azure.accountName .Values.secrets.azure.accountKey .Values.secrets.azure.container }} + azureAccountName: {{ .Values.secrets.azure.accountName | b64enc | quote }} + azureAccountKey: {{ .Values.secrets.azure.accountKey | b64enc | quote }} + azureContainer: {{ .Values.secrets.azure.container | b64enc | quote }} + {{- end }} + {{- else if eq .Values.storage "s3" }} + {{- if and .Values.secrets.s3.secretKey .Values.secrets.s3.accessKey }} + s3AccessKey: {{ .Values.secrets.s3.accessKey | b64enc | quote }} + s3SecretKey: {{ .Values.secrets.s3.secretKey | b64enc | quote }} + {{- end }} + {{- else if eq .Values.storage "swift" }} + {{- if and .Values.secrets.swift.username .Values.secrets.swift.password }} + swiftUsername: {{ .Values.secrets.swift.username | b64enc | quote }} + swiftPassword: {{ .Values.secrets.swift.password | b64enc | quote }} + {{- end }} + {{- end }} + {{- end}} \ No newline at end of file diff --git a/config/docker-registry/charts/docker-registry/templates/service.yaml b/config/docker-registry/charts/docker-registry/templates/service.yaml new file mode 100644 index 00000000..cda1a453 --- /dev/null +++ b/config/docker-registry/charts/docker-registry/templates/service.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "docker-registry.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "docker-registry.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: NodePort + ports: + - port: {{ .Values.global.registryServicePort }} + protocol: TCP + name: http-{{ .Values.service.name }} + targetPort: {{ .Values.global.registryServicePort }} + nodePort: {{ .Values.global.registryNodePort }} + selector: + app: {{ template "docker-registry.name" . }} + release: {{ .Release.Name }} diff --git a/config/docker-registry/charts/docker-registry/values.yaml b/config/docker-registry/charts/docker-registry/values.yaml new file mode 100644 index 00000000..62756567 --- /dev/null +++ b/config/docker-registry/charts/docker-registry/values.yaml @@ -0,0 +1,171 @@ +# Default values for docker-registry. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +replicaCount: 1 + +updateStrategy: + type: Recreate + rollingUpdate: null + # maxSurge: 1 + # maxUnavailable: 0 + +image: + pullPolicy: IfNotPresent +# imagePullSecrets: + # - name: docker +service: + name: registry + port: "{{ .Values.global.registryServicePort }}" # same as configData.http.addr + annotations: {} +ingress: + enabled: false + path: / + # Used to create an Ingress record. + hosts: + - chart-example.local + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + tls: + # Secrets must be manually created in the namespace. + # - secretName: chart-example-tls + # hosts: + # - chart-example.local +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 400m + memory: 800Mi + requests: + cpu: 10m + memory: 300Mi + +podAnnotations: + sidecar.istio.io/inject: "false" +podLabels: {} + +persistence: + accessMode: 'ReadWriteOnce' + enabled: true + size: 20Gi + # storageClass: '-' + +# set the type of filesystem to use: filesystem, s3. +# If filesystem is used, you should also add it to configData, below +storage: filesystem + +# Set this to name of secret for tls certs +# tlsSecretName: registry.docker.example.com + +# Secrets for Azure +# azure: +# accountName: "" +# accountKey: "" +# container: "" +# Secrets for S3 access and secret keys +# s3: +# accessKey: "" +# secretKey: "" +# Secrets for Swift username and password +# swift: +# username: "" +# password: "" + +# Options for s3 storage type: +# s3: +# region: us-east-1 +# regionEndpoint: s3.us-east-1.amazonaws.com +# bucket: my-bucket +# encrypt: false +# secure: true + +# Options for swift storage type: +# swift: +# authurl: http://swift.example.com/ +# container: my-container + +# https://docs.docker.com/registry/configuration/ +configData: # example: https://github.com/docker/distribution/blob/master/cmd/registry/config-dev.yml + version: 0.1 + log: + formatter: json + fields: + service: registry + storage: + cache: + blobdescriptor: inmemory + filesystem: + rootdirectory: /var/lib/registry + http: + addr: :5000 # same as .Values.service.port + headers: + X-Content-Type-Options: [nosniff] + debug: + addr: :5001 + prometheus: + enabled: true + path: /metrics + health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 + +containers: + # the following guidelines should be followed for this https://github.com/kyma-project/community/tree/main/concepts/psp-replacement + securityContext: + privileged: false + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + procMount: default # Optional. The default is false if the entry is not there. + readOnlyRootFilesystem: true # Mandatory + +initContainers: + # the following guidelines should be followed for this https://github.com/kyma-project/community/tree/main/concepts/psp-replacement + securityContext: + # this is required to allow the initContainer to chmod the volumemount for the registry storage volume. This is incompatible with the security requirements above and should be fixed in the future. + runAsUser: 0 + runAsGroup: 0 + privileged: false + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + add: ["CHOWN"] + procMount: default # Optional. The default is false if the entry is not there. + readOnlyRootFilesystem: true # Mandatory + +pod: + # the following guidelines should be followed for this https://github.com/kyma-project/community/tree/main/concepts/psp-replacement + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + seccompProfile: # Optional. This option can also be set on container level but it is recommended to set it on Pod level and leave it undefined on container level. + type: RuntimeDefault + + +podDisruptionBudget: {} + # maxUnavailable: 1 + # minAvailable: 2 + +nodeSelector: {} + +tolerations: [] + +extraVolumeMounts: [] + +extraVolumes: [] + +nameOverride: +fullnameOverride: + +destinationRule: + enabled: true + +rollme: "" +registryHTTPSecret: "" diff --git a/config/docker-registry/templates/_helpers.tpl b/config/docker-registry/templates/_helpers.tpl new file mode 100644 index 00000000..8fc39071 --- /dev/null +++ b/config/docker-registry/templates/_helpers.tpl @@ -0,0 +1,49 @@ +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{- include "tplValue" ( dict "value" .Values.path.to.the.Value "context" $ ) }} +*/}} +{{- define "tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "registry-fullname" -}} +{{- "internal-docker-registry" -}} +{{- end -}} + + +{{/* +Create a URL for container images +*/}} +{{- define "imageurl" -}} +{{- $registry := default $.reg.path $.img.containerRegistryPath -}} +{{- $path := ternary (print $registry) (print $registry "/" $.img.directory) (empty $.img.directory) -}} +{{- $version := ternary (print ":" $.img.version) (print "@sha256:" $.img.sha) (empty $.img.sha) -}} +{{- print $path "/" $.img.name $version -}} +{{- end -}} diff --git a/config/docker-registry/templates/registry-config.yaml b/config/docker-registry/templates/registry-config.yaml new file mode 100644 index 00000000..b099644f --- /dev/null +++ b/config/docker-registry/templates/registry-config.yaml @@ -0,0 +1,21 @@ +{{- $username := include "tplValue" ( dict "value" .Values.dockerRegistry.username "context" . ) -}} +{{- $password := include "tplValue" ( dict "value" .Values.dockerRegistry.password "context" . ) -}} +{{- $encodedUsernamePassword := printf "%s:%s" $username $password | b64enc }} +{{- $internalRegPullAddr := printf "localhost:%d" (int .Values.global.registryNodePort) }} +{{- $internalRegPushAddr := printf "%s.%s.svc.cluster.local:%d" (include "registry-fullname" . ) .Release.Namespace ( int .Values.global.registryServicePort) }} + +apiVersion: v1 +kind: Secret +type: kubernetes.io/dockerconfigjson +metadata: + name: internal-dockerregistry-config + namespace: {{ .Release.Namespace }} + labels: + dockerregistry.kyma-project.io/config: credentials +data: + username: "{{ $username | b64enc }}" + password: "{{ $password | b64enc }}" + isInternal: {{ "true" | b64enc }} + pullRegAddr: {{ $internalRegPullAddr | b64enc }} + pushRegAddr: "{{ $internalRegPushAddr | b64enc }}" + .dockerconfigjson: "{{- (printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}, \"%s\": {\"auth\": \"%s\"}}}" $internalRegPushAddr $encodedUsernamePassword $internalRegPullAddr $encodedUsernamePassword) | b64enc }}" diff --git a/config/docker-registry/values.yaml b/config/docker-registry/values.yaml new file mode 100644 index 00000000..ffc9f205 --- /dev/null +++ b/config/docker-registry/values.yaml @@ -0,0 +1,47 @@ +# Default values for dockerregistry. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +fullnameOverride: "dockerregistry" +global: + registryServicePort: 5000 + registryNodePort: 32137 + containerRegistry: + path: europe-docker.pkg.dev/kyma-project + images: + registry: + name: "tpi/registry" + version: "2.8.1-1ae4c190" + directory: "prod" + dockerregistryPriorityClassValue: 2000000 + dockerregistryPriorityClassName: "dockerregistry-priority" +dockerRegistry: + username: "{{ randAlphaNum 20 | b64enc }}" # for gcr "_json_key" + password: "{{ randAlphaNum 40 | b64enc }}" # for gcr data from json key + # This is the registry address, for dockerhub it's username, for other it's url. + registryAddress: "" + # This is the server address of the registry which will be used to create docker configuration. + serverAddress: "" +docker-registry: + fullnameOverride: "internal-docker-registry" + destinationRule: + enabled: true + secrets: + haSharedSecret: "secret" + htpasswd: "generated-in-init-container" + extraVolumeMounts: + - name: htpasswd-data + mountPath: /data + extraVolumes: + - name: registry-credentials + secret: + secretName: internal-dockerregistry-config + items: + - key: username + path: username.txt + - key: password + path: password.txt + - name: htpasswd-data + emptyDir: {} + rollme: "{{ randAlphaNum 5}}" + registryHTTPSecret: "{{ randAlphaNum 16 | b64enc }}" diff --git a/config/operator/base/crd/bases/operator.kyma-project.io_dockerregistries.yaml b/config/operator/base/crd/bases/operator.kyma-project.io_dockerregistries.yaml new file mode 100644 index 00000000..088e8e70 --- /dev/null +++ b/config/operator/base/crd/bases/operator.kyma-project.io_dockerregistries.yaml @@ -0,0 +1,165 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: dockerregistries.operator.kyma-project.io +spec: + group: operator.kyma-project.io + names: + kind: DockerRegistry + listKind: DockerRegistryList + plural: dockerregistries + singular: dockerregistry + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Configured')].status + name: Configured + type: string + - jsonPath: .status.conditions[?(@.type=='Installed')].status + name: Installed + type: string + - jsonPath: .metadata.generation + name: generation + type: integer + - jsonPath: .metadata.creationTimestamp + name: age + type: date + - jsonPath: .status.state + name: state + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: DockerRegistry is the Schema for the dockerregistry API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DockerRegistrySpec defines the desired state of DockerRegistry + properties: + healthzLivenessTimeout: + description: Sets the timeout for the Function health check. The default + value in seconds is `10` + type: string + type: object + status: + properties: + conditions: + description: Conditions associated with CustomStatus. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + healthzLivenessTimeout: + type: string + secretName: + type: string + served: + description: |- + Served signifies that current DockerRegistry is managed. + Value can be one of ("True", "False"). + enum: + - "True" + - "False" + type: string + state: + description: |- + State signifies current state of DockerRegistry. + Value can be one of ("Ready", "Processing", "Error", "Deleting"). + enum: + - Processing + - Deleting + - Ready + - Error + - Warning + type: string + required: + - served + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/operator/base/crd/kustomization.yaml b/config/operator/base/crd/kustomization.yaml new file mode 100644 index 00000000..db1457e0 --- /dev/null +++ b/config/operator/base/crd/kustomization.yaml @@ -0,0 +1,10 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/operator.kyma-project.io_dockerregistries.yaml +#+kubebuilder:scaffold:crdkustomizeresource + +# the following config is for teaching kustomize how to do kustomization for CRDs. +configurations: +- kustomizeconfig.yaml diff --git a/config/operator/base/crd/kustomizeconfig.yaml b/config/operator/base/crd/kustomizeconfig.yaml new file mode 100644 index 00000000..ec5c150a --- /dev/null +++ b/config/operator/base/crd/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/config/operator/base/deployment/deployment.yaml b/config/operator/base/deployment/deployment.yaml new file mode 100644 index 00000000..33436299 --- /dev/null +++ b/config/operator/base/deployment/deployment.yaml @@ -0,0 +1,64 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: operator + namespace: system + labels: + control-plane: operator + app.kubernetes.io/name: deployment + app.kubernetes.io/instance: dockerregistry-operator + app.kubernetes.io/component: operator + app.kubernetes.io/created-by: dockerregistry-operator + app.kubernetes.io/part-of: dockerregistry-operator + app.kubernetes.io/managed-by: kustomize +spec: + selector: + matchLabels: + control-plane: operator + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: operator + sidecar.istio.io/inject: "false" + spec: + securityContext: + runAsNonRoot: true + containers: + - command: + - /operator + image: controller:latest + name: manager + env: + - name: DOCKERREGISTRY_MANAGER_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 1000m + memory: 512Mi + requests: + cpu: 10m + memory: 64Mi + serviceAccountName: operator + terminationGracePeriodSeconds: 10 diff --git a/config/operator/base/deployment/kustomization.yaml b/config/operator/base/deployment/kustomization.yaml new file mode 100644 index 00000000..2d833f82 --- /dev/null +++ b/config/operator/base/deployment/kustomization.yaml @@ -0,0 +1,8 @@ +resources: +- deployment.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +images: +- name: controller + newName: dockerregistry-operator + newTag: 2d332b272e2a diff --git a/config/operator/base/kustomization.yaml b/config/operator/base/kustomization.yaml new file mode 100644 index 00000000..95f2c701 --- /dev/null +++ b/config/operator/base/kustomization.yaml @@ -0,0 +1,20 @@ +# Adds namespace to all resources. +namespace: kyma-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: dockerregistry- + +# Labels to add to all resources and selectors. +commonLabels: + app.kubernetes.io/component: dockerregistry-operator.kyma-project.io + + +resources: +- ./crd +- ./deployment +- ./rbac +- ./ui-extensions diff --git a/config/operator/base/rbac/editor_role.yaml b/config/operator/base/rbac/editor_role.yaml new file mode 100644 index 00000000..00c0c261 --- /dev/null +++ b/config/operator/base/rbac/editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit dockerregistry. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: dockerregistry-operator-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: dockerregistry-operator + app.kubernetes.io/part-of: dockerregistry-operator + app.kubernetes.io/managed-by: kustomize + name: operator-editor-role +rules: +- apiGroups: + - operator.kyma-project.io + resources: + - dockerregistries + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - operator.kyma-project.io + resources: + - dockerregistries/status + verbs: + - get diff --git a/config/operator/base/rbac/kustomization.yaml b/config/operator/base/rbac/kustomization.yaml new file mode 100644 index 00000000..b508482b --- /dev/null +++ b/config/operator/base/rbac/kustomization.yaml @@ -0,0 +1,10 @@ +resources: +# All RBAC will be applied under this service account in +# the deployment namespace. You may comment out this resource +# if your operator will use a service account that exists at +# runtime. Be sure to update RoleBinding and ClusterRoleBinding +# subjects if changing service account names. +- service_account.yaml +- role.yaml +- role_binding.yaml + diff --git a/config/operator/base/rbac/role.yaml b/config/operator/base/rbac/role.yaml new file mode 100644 index 00000000..45f4f444 --- /dev/null +++ b/config/operator/base/rbac/role.yaml @@ -0,0 +1,288 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator-role +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - serviceaccounts + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps + - secrets + - serviceaccounts + - services + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - daemonsets + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - deployments/status + verbs: + - get +- apiGroups: + - apps + resources: + - replicasets + verbs: + - list +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs/status + verbs: + - get +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - operator.kyma-project.io + resources: + - dockerregistries + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - operator.kyma-project.io + resources: + - dockerregistries/finalizers + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - operator.kyma-project.io + resources: + - dockerregistries/status + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - scheduling.k8s.io + resources: + - priorityclasses + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch diff --git a/config/operator/base/rbac/role_binding.yaml b/config/operator/base/rbac/role_binding.yaml new file mode 100644 index 00000000..3a699c7d --- /dev/null +++ b/config/operator/base/rbac/role_binding.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/instance: dockerregistry-operator-rolebinding + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: dockerregistry-operator + app.kubernetes.io/part-of: dockerregistry-operator + app.kubernetes.io/managed-by: kustomize + name: operator-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-role +subjects: +- kind: ServiceAccount + name: operator + namespace: system diff --git a/config/operator/base/rbac/service_account.yaml b/config/operator/base/rbac/service_account.yaml new file mode 100644 index 00000000..c60ca4da --- /dev/null +++ b/config/operator/base/rbac/service_account.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/instance: dockerregistry-operator-sa + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: dockerregistry-operator + app.kubernetes.io/part-of: dockerregistry-operator + app.kubernetes.io/managed-by: kustomize + name: operator + namespace: system diff --git a/config/operator/base/rbac/viewer_role.yaml b/config/operator/base/rbac/viewer_role.yaml new file mode 100644 index 00000000..61a95ad7 --- /dev/null +++ b/config/operator/base/rbac/viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view dockerregistry. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: dockerregistry-operator-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: dockerregistry-operator + app.kubernetes.io/part-of: dockerregistry-operator + app.kubernetes.io/managed-by: kustomize + name: operator-viewer-role +rules: +- apiGroups: + - operator.kyma-project.io + resources: + - dockerregistries + verbs: + - get + - list + - watch +- apiGroups: + - operator.kyma-project.io + resources: + - dockerregistries/status + verbs: + - get diff --git a/config/operator/base/ui-extensions/dockerregistry/details b/config/operator/base/ui-extensions/dockerregistry/details new file mode 100644 index 00000000..d5cee36c --- /dev/null +++ b/config/operator/base/ui-extensions/dockerregistry/details @@ -0,0 +1,91 @@ +header: + - name: Ready + source: status.state + widget: Badge + highlights: + positive: + - 'Ready' +body: + - name: Desired Specification + widget: Panel + children: + - name: Docker Registry + visibility: $root.spec.dockerRegistry.enableInternal = true + source: spec.dockerRegistry.enableInternal?"INTERNAL":"" + - name: Docker Registry + visibility: '$exists($value)' + source: spec.dockerRegistry.secretName + widget: ResourceLink + resource: + name: spec.dockerRegistry.secretName + namespace: $root.metadata.namespace + kind: "'Secret'" + - name: Eventing Endpoint + source: spec.eventing.endpoint + visibility: '$exists($value)' + - name: OTLP Trace Endpoint + source: spec.tracing.endpoint + visibility: '$exists($value)' + - name: Default Resources Preset (Build-time) + source: spec.defaultBuildJobPreset + visibility: '$exists($value)' + - name: Default Resources Preset (Runtime) + source: spec.defaultRuntimePodPreset + visibility: '$exists($value)' + - name: Custom Build Execution Args + source: spec.functionBuildExecutorArgs + visibility: '$exists($value)' + - name: Max Simultaneous Builds + source: spec.functionBuildMaxSimultaneousJobs + visibility: '$exists($value)' + - name: Function Request Body Limit [Mb] + source: spec.functionRequestBodyLimitMb + visibility: '$exists($value)' + - name: Function Timeout [Sec] + source: spec.functionTimeoutSec + visibility: '$exists($value)' + - name: Function Requeue Duration + source: spec.functionRequeueDuration + visibility: '$exists($value)' + - name: Controller Liveness Timeout + source: spec.healthzLivenessTimeout + visibility: '$exists($value)' + - name: Target CPU utilisation for HPA + source: spec.targetCPUUtilizationPercentage + visibility: '$exists($value)' + - name: Status + widget: Panel + children: + - name: Docker Registry + source: status.dockerRegistry + - name: Eventing Endpoint + source: status.eventingEndpoint + - name: OTLP Trace Endpoint + source: status.tracingEndpoint + + - source: status.conditions + widget: Table + name: Reconciliation Conditions + children: + - source: type + name: Type + - source: status + name: Status + widget: Badge + highlights: + positive: + - 'True' + negative: + - 'False' + - source: reason + name: Reason + - source: message + name: Message + - source: '$readableTimestamp(lastTransitionTime)' + name: Last transition + sort: true + + - widget: EventList + filter: '$matchEvents($$, $root.kind, $root.metadata.name)' + name: events + defaultType: information diff --git a/config/operator/base/ui-extensions/dockerregistry/form b/config/operator/base/ui-extensions/dockerregistry/form new file mode 100644 index 00000000..90e23e78 --- /dev/null +++ b/config/operator/base/ui-extensions/dockerregistry/form @@ -0,0 +1,25 @@ +- path: spec.dockerRegistry.enableInternal + simple: true + name: Enable Internal Docker Registry +- simple: true + widget: Alert + severity: warning + alert: "'Internal Docker Registry is not recommended for production grade installations'" + visibility: "$root.spec.dockerRegistry.enableInternal = true" +- path: spec.dockerRegistry.secretName + visibility: $root.spec.dockerRegistry.enableInternal != true + simple: true + widget: Resource + name: External Docker Registry Configuration + resource: + kind: Secret + version: v1 + scope: namespace +- path: spec.tracing.endpoint + name: OTLP Trace Endpoint + simple: true + required: false +- path: spec.eventing.endpoint + name: Eventing Endpoint + simple: true + required: false \ No newline at end of file diff --git a/config/operator/base/ui-extensions/dockerregistry/general b/config/operator/base/ui-extensions/dockerregistry/general new file mode 100644 index 00000000..a02076a6 --- /dev/null +++ b/config/operator/base/ui-extensions/dockerregistry/general @@ -0,0 +1,15 @@ +resource: + kind: DockerRegistry + group: operator.kyma-project.io + version: v1alpha1 +urlPath: dockerregistries +category: Kyma +name: DockerRegistry +scope: namespace +features: + actions: + disableCreate: true + disableDelete: true +description: >- + {{[DockerRegistry CR](https://github.com/kyma-project/docker-registry/blob/main/config/samples/default-dockerregistry-cr.yaml)}} + specifies dockerregistry module. diff --git a/config/operator/base/ui-extensions/dockerregistry/kustomization.yaml b/config/operator/base/ui-extensions/dockerregistry/kustomization.yaml new file mode 100644 index 00000000..91a95ffb --- /dev/null +++ b/config/operator/base/ui-extensions/dockerregistry/kustomization.yaml @@ -0,0 +1,14 @@ +configMapGenerator: +- name: operator.kyma-project.io + namespace: kube-public + files: + - general + - form + - list + - details + options: + disableNameSuffixHash: true + labels: + app.kubernetes.io/name: dockerregistries.operator.kyma-project.io + busola.io/extension: resource + busola.io/extension-version: "0.5" \ No newline at end of file diff --git a/config/operator/base/ui-extensions/dockerregistry/list b/config/operator/base/ui-extensions/dockerregistry/list new file mode 100644 index 00000000..3c25b7c5 --- /dev/null +++ b/config/operator/base/ui-extensions/dockerregistry/list @@ -0,0 +1,6 @@ +- name: Ready + source: status.state + widget: Badge + highlights: + positive: + - 'Ready' \ No newline at end of file diff --git a/config/operator/base/ui-extensions/kustomization.yaml b/config/operator/base/ui-extensions/kustomization.yaml new file mode 100644 index 00000000..83221f54 --- /dev/null +++ b/config/operator/base/ui-extensions/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- dockerregistry \ No newline at end of file diff --git a/config/operator/dev/.gitignore b/config/operator/dev/.gitignore new file mode 100644 index 00000000..738a410d --- /dev/null +++ b/config/operator/dev/.gitignore @@ -0,0 +1 @@ +kustomization.yaml \ No newline at end of file diff --git a/config/operator/dev/kustomization.yaml.tpl b/config/operator/dev/kustomization.yaml.tpl new file mode 100644 index 00000000..412e9c94 --- /dev/null +++ b/config/operator/dev/kustomization.yaml.tpl @@ -0,0 +1,9 @@ +resources: +- ../base +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +# To overwrite image in base it has to point to the image in base kustomization.yaml +images: +- name: europe-docker.pkg.dev/kyma-project/prod/dockerregistry-operator + newName: local-registry + newTag: local diff --git a/config/samples/default-dockerregistry-cr.yaml b/config/samples/default-dockerregistry-cr.yaml new file mode 100644 index 00000000..9f9f1a1c --- /dev/null +++ b/config/samples/default-dockerregistry-cr.yaml @@ -0,0 +1,6 @@ +apiVersion: operator.kyma-project.io/v1alpha1 +kind: DockerRegistry +metadata: + name: default + namespace: kyma-system +spec: {} diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index 84aca7bc..00000000 --- a/docs/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# Docs - -## Overview - -The `docs` folder contains two subfolders - `user` and `contributor`. - -The `user` subfolder contains the end-user documentation, which is displayed on the [Kyma website](https://kyma-project.io/#/). Depending on your module needs, the subfolder must include overview, usage, or technical reference documents. To display the content on the website properly, create a `_sidebar.md` file in the `user` subfolder and list the documents it contains there. For more information on how to publish user documentation, follow [this guide](https://github.com/kyma-project/community/blob/main/docs/guidelines/content-guidelines/01-user-docs.md). - -The `contributor` subfolder includes any developer-related documentation to help them manually install, develop, and operate a module. - -To have a common structure across all modules, all documents must be properly numbered according to the following structure: - -> **NOTE:** It is suggested to use the following titles if you have the content that matches them; otherwise use your own, more suitable titles, or simply skip the ones you find irrelevant. - - - 00-xx-overview - - 01-xx-tutorial/configuration - - 02-xx-usage - - 03-xx-troubleshooting - -where `xx` is the number of the given document. For example: - - ```bash - 00-00-overview-telemetry-manager - 00-10-overview-logs - 00-20-overview-traces - 00-30-overview-metrics - 01-10-configure-logs - 01-20-configure-traces - 01-30-configure-metrics - 02-10-use-logs - 02-20-use-traces - 02-30-use-metrics - (...) - ``` -> **NOTE:** Before introducing [docsify](https://docsify.js.org/#/?id=docsify), we agreed to use the `10`, `20`, `30` numbering. It was to help maintain the proper order of docs if they were rendered automatically on the website. With docsify, you manually add the content to the `_sidebar.md` file, and docs are displayed in the order you add them. However, this numbering is still recommended to have the unified structure of the docs in the module repositories. - -If you have other content that does not fit into the above topics, create your own 04-10-module-specific document(s). - -You can divide your documentation into subfolders to avoid having too many documents in one `docs/user` or `docs/contributor` folder. For example, if you have many technical reference documents, you can create a `technical reference` subfolder in `docs/user` and keep relevant documentation there. Each subfolder in the `user` folder must have its own `_sidebar.md` file with the links to the main module page and the list of docs it contains. \ No newline at end of file diff --git a/docs/assets/modular-serverless.svg b/docs/assets/modular-serverless.svg new file mode 100644 index 00000000..971e9080 --- /dev/null +++ b/docs/assets/modular-serverless.svg @@ -0,0 +1,4 @@ + + + +
Serverless Module
Serverless Module
Serverless
<<Custom Resource>>

spec:
<config>
status:
OK/ERR
Serverless...
watch 
watch 
reconcile
reconcile
reconcile (if configured)
reconcile (if configured)
Serverless Operator
<<Deployment>>
Serverless Operator...
watch
watch
reconcile
(configures ENVS)
reconcile...
serverless controller
<<Deployment>>
serverless controlle...
docker registry
docker registry
Function
<<CR>>
Function...
Eventing
<<CR>>

status:
 - publisherProxyEndpoint
Eventing...
Deployment
<<CR>>
(function runtime)
Deployment...
Serverless Module
Serverless Module
Serverless
<<Custom Resource>>

spec:
<config>
status:
 - health status
- used otlpEndpoints
-used publisherProxyEndpoint
Serverless...
watch 
watch 
reconcile
reconcile
reconcile (if configured)
reconcile (if configured)
watch
watch
reconcile/generate CA for
reconcile/generate CA for
Serverless Operator
<<Deployment>>
Serverless Operator...
watch
watch
reconcile
(configures ENVS)
reconcile...
serverless controller
<<Deployment>>
serverless controlle...
docker registry
docker registry
Function
<<CR>>
Function...
Telemetry
<<CR>>

status:
 - traceOTLPEndpoint
 - metricOTLPEndpoint
Telemetry...
Deployment
<<CR>>
(function runtime)
Deployment...
watch
watch
validates/converts
validates/converts
serverless webhook
<<Deployment>>
serverless webhook...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/assets/svls-add-ssh-key.png b/docs/assets/svls-add-ssh-key.png new file mode 100644 index 00000000..32ddd1be Binary files /dev/null and b/docs/assets/svls-add-ssh-key.png differ diff --git a/docs/assets/svls-api-rules.svg b/docs/assets/svls-api-rules.svg new file mode 100644 index 00000000..db9f53a1 --- /dev/null +++ b/docs/assets/svls-api-rules.svg @@ -0,0 +1,4 @@ + + + +
User
User
creates
creates
API Gateway Controller
API Gateway Controller
APIRule CR
APIRule CR
listens for new CRs
listens for new CRs
1
1
2
2
Virtual Service
Virtual Service
creates
creates
3
3
Kyma cluster
Kyma cluster
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/assets/svls-architecture.svg b/docs/assets/svls-architecture.svg new file mode 100644 index 00000000..fde10d04 --- /dev/null +++ b/docs/assets/svls-architecture.svg @@ -0,0 +1,4 @@ + + + +
Function Controller
Function Controller
Function CR
(Function)
Function CR...
Docker image
Docker image
K8s Job
K8s Job
Defaulting
webhook
Defaulting...
Validation
webhook
Validation...
Deployment
Deployment
Service
Service
HorizontalPodAutoscaler
HorizontalPodAutoscaler
ConfigMap
ConfigMap
creates
creates
watches
watches
creates
creates
references
refer...
pushes
pushes
creates
creates
creates and watches
creates and watches
creates
creates
creates
creates
scales
scales
exposes
expos...
references
refer...
Kyma cluster
Kyma cluster
User
User
Docker registry
Docker registry
1
1
2
2
3
3
2
2
4
4
5
5
7
7
10
10
8
8
9
9
6
6
Reference key:
Reference key:

Dashed connectors and borders represent background processes
Dashed connectors and borders re...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/assets/svls-built.svg b/docs/assets/svls-built.svg new file mode 100644 index 00000000..b7cabd5a --- /dev/null +++ b/docs/assets/svls-built.svg @@ -0,0 +1,4 @@ + + + +
User
User
Create Job CR
Create Job CR
Create 
Function's image
Create...
Set Built 
condition to True
Set Built...
Delete all previous Job CRs labeled with Function's UID
Delete all previous...
creates Function
creates Function
updates Function
updates Function
Push image to Docker Registry 
Push image to Docke...
Don't change condition status
Don't change condition statu...
Is image rebuild required?
Is image reb...
yes
yes
no
no
Set Built 
condition to False
Set Built...
Set Built 
condition to Unknown
Set Built...
success
success
failure
failure
processing
processing
Kyma cluster
Kyma cluster
Reference key:
Reference key:

Dashed connectors and borders point to Function status conditions
Dashed connectors and borders po...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/assets/svls-configured.svg b/docs/assets/svls-configured.svg new file mode 100644 index 00000000..b236e712 --- /dev/null +++ b/docs/assets/svls-configured.svg @@ -0,0 +1,4 @@ + + + +
User
User
Create / Update ConfigMap
Create / Update Con...
Set Configured 
condition to True
Set Configured...
creates Function
creates Function
updates Function
updates Function
Don't change condition status
Don't change condition statu...
Is image rebuild required?
Is image reb...
yes
yes
no
no
Set Configured 
condition to False
Set Configured...
success
success
failure
failure
Kyma cluster
Kyma cluster
Reference key:
Reference key:

Dashed connectors and borders point to Function status conditions
Dashed connectors and borders po...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/assets/svls-create-ssh-key.png b/docs/assets/svls-create-ssh-key.png new file mode 100644 index 00000000..e77b8011 Binary files /dev/null and b/docs/assets/svls-create-ssh-key.png differ diff --git a/docs/assets/svls-function-ui.png b/docs/assets/svls-function-ui.png new file mode 100644 index 00000000..a4844659 Binary files /dev/null and b/docs/assets/svls-function-ui.png differ diff --git a/docs/assets/svls-internal-registry.svg b/docs/assets/svls-internal-registry.svg new file mode 100644 index 00000000..4e053c08 --- /dev/null +++ b/docs/assets/svls-internal-registry.svg @@ -0,0 +1,4 @@ + + + +
K8s network
K8s network
Node 3
Node 3
Node 2
Node 2
Node 1
Node 1
internal image registry
internal imag...
kubelet
kubelet
kubelet
kubelet
kubelet
kubelet
node port
n...
node port
n...
node port
n...
k8s dns
k8s dns
function build job
function b...

Internal Image Registry - communication schema

Internal Image Registry - communication schema
1
1
3
3
4
4
2
2
5
5
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/assets/svls-kyma-cli-functions.png b/docs/assets/svls-kyma-cli-functions.png new file mode 100644 index 00000000..7ab9acfb Binary files /dev/null and b/docs/assets/svls-kyma-cli-functions.png differ diff --git a/docs/assets/svls-running.svg b/docs/assets/svls-running.svg new file mode 100644 index 00000000..33f88631 --- /dev/null +++ b/docs/assets/svls-running.svg @@ -0,0 +1,4 @@ + + + +
User
User
Create/Update
Deployment, Service, and/or HorizontalPodAutoscaler
Create/Update...
Set Running 
condition to True
Set Running...
creates Function
creates Function
updates Function
updates Function
Don't change condition status
Don't change condition statu...
Is Deployment, Service, or HorizontalPodAutoscaler update required?
Is Deployment...
yes
yes
no
no
Set Running 
condition to False
Set Running...
Set Running 
condition to Unknown
Set Running...
success
success
failure
failure
processing
processing
Kyma cluster
Kyma cluster
Reference key:
Reference key:

Dashed connectors and borders point to Function status conditions
Dashed connectors and borders po...
Set Running 
condition to Unknown with reason MinimumReplcasUnavailable
Set Running...
unhealthy deployment
unhealthy d...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/assets/svls-settings.png b/docs/assets/svls-settings.png new file mode 100644 index 00000000..6e46c4ec Binary files /dev/null and b/docs/assets/svls-settings.png differ diff --git a/docs/contributor/03-10-scripts-not-working.md b/docs/contributor/03-10-scripts-not-working.md new file mode 100644 index 00000000..568eeaa2 --- /dev/null +++ b/docs/contributor/03-10-scripts-not-working.md @@ -0,0 +1,19 @@ +# Scripts Don't Work + +## Symptom + +For MacBook M1, some parts of the scripts may not work. + +The example error may look like this: `Error: unsupported platform OS_TYPE: Darwin, OS_ARCH: arm64; to mitigate this problem set variable KYMA with the absolute path to kyma-cli binary compatible with your operating system and architecture. Stop.` + +## Cause + +Kyma CLI is not released for Apple Silicon users. + +## Remedy + +Install [Kyma CLI manually](https://github.com/kyma-project/cli#installation) and export the path to it. + + ```bash + export KYMA=$(which kyma) + ``` diff --git a/docs/contributor/04-10-testing-strategy.md b/docs/contributor/04-10-testing-strategy.md new file mode 100644 index 00000000..d6ac9cc9 --- /dev/null +++ b/docs/contributor/04-10-testing-strategy.md @@ -0,0 +1,23 @@ +# Testing Strategy + +## CI/CD Jobs Running on Pull Requests + +Each pull request to the repository triggers the following CI/CD jobs that verify the Serverless Operator reconciliation logic and run integration tests of the Serverless module: + +- `Markdown / link-check (pull_request)` - Checks if there are no broken links in the pull request `.md` files. For the configuration, see the [mlc.config.json](https://github.com/kyma-project/serverless/blob/main/.mlc.config.json) and the [markdown.yaml](https://github.com/kyma-project/serverless/blob/e36239e8315d7cce49eaa3ad1766f3261cef8af6/.github/workflows/markdown.yaml#L8) files. +- `Operator verify / lint (pull_request)` - Is responsible for the Operator linting and static code analysis. For the configuration, see the [operator-verify.yaml](https://github.com/kyma-project/serverless/blob/main/.github/workflows/operator-verify.yaml#L19) file. +- `Serverless verify / lint (pull_request)` - Is responsible for the Serverless linting and static code analysis. For the configuration, see the [serverless-verify.yaml](https://github.com/kyma-project/serverless/blob/main/.github/workflows/serverless-verify.yaml#L19) file. +- `Operator verify / unit-test (pull_request)` - Runs basic unit tests of Operator's logic. For the configuration, see the [operator-verify.yaml](https://github.com/kyma-project/serverless/blob/main/.github/workflows/operator-verify.yaml#L30) file. +- `Serverless verify / unit-test (pull_request)` - Runs unit tests of Serverless's logic. For the configuration, see the [serverless-verify.yaml](https://github.com/kyma-project/serverless/blob/main/.github/workflows/serverless-verify.yaml#L31) file. +- `Serverless verify / integration-test (pull_request)` - Runs the basic functionality integration and the `tracing`, `api-gateway`, `cloud-event` contract compatibility integration test suite for the Serverless in a k3d cluster. For the configuration, see the [serverless-verify.yaml](https://github.com/kyma-project/serverless/blob/main/.github/workflows/serverless-verify.yaml#L40) file. + +## CI/CD Jobs Running on the Main Branch + +- `Serverless verify / integration-test (push)` - Runs the basic functionality integration and the `tracing`, `api-gateway`, and `cloud-event` contract compatibility integration test suite for Serverless in a k3d cluster. For the configuration, see the [serverless-verify.yaml](https://github.com/kyma-project/serverless/blob/main/.github/workflows/serverless-verify.yaml#L40) file. +- `Operator verify / upgrade-test (push)` - Runs the upgrade integration test suite and verifies if the latest release can be successfully upgraded to the new (`main`) revision. For the configuration, see the [operator-verify.yaml](https://github.com/kyma-project/serverless/blob/main/.github/workflows/operator-verify.yaml#L40) file. +- `Serverless verify / git-auth-integration-test (push)` - Runs the `GitHub` and `Azure DevOps` API and authentication integration test suite for Serverless. For the configuration, see the [serverless-verify.yaml](https://github.com/kyma-project/serverless/blob/main/.github/workflows/serverless-verify.yaml#L67) file. +- `Operator verify / gardener-integration-test (push)` - Checks the installation of the Serverless module in the Gardener shoot cluster and runs basic integration tests of Serverless. For the configuration, see the [operator-verify.yaml](https://github.com/kyma-project/serverless/blob/main/.github/workflows/operator-verify.yaml#L60) file. + +## CI/CD Jobs Running on a Schedule + +- `Markdown / link-check` - Runs Markdown link check every day at 05:00 AM. diff --git a/docs/contributor/README.md b/docs/contributor/README.md deleted file mode 100644 index 9a22d6f9..00000000 --- a/docs/contributor/README.md +++ /dev/null @@ -1 +0,0 @@ -In this folder, you can add any developer-related documentation, for example, advanced installation options, testing strategy, governance, etc. \ No newline at end of file diff --git a/docs/user/00-10-from-code-to-function.md b/docs/user/00-10-from-code-to-function.md new file mode 100644 index 00000000..33f8b5ec --- /dev/null +++ b/docs/user/00-10-from-code-to-function.md @@ -0,0 +1,17 @@ +# From Code to Function + +Pick the programming language for the Function and decide where you want to keep the source code. Serverless will create the workload out of it for you. + +## Runtimes + +Functions support multiple languages by using the underlying execution environments known as runtimes. Currently, you can create both Node.js and Python Functions in Kyma. + +> [!TIP] +> See [sample Functions](technical-reference/07-10-sample-functions.md) for each available runtime. + +## Source Code + +You can also choose where you want to keep your Function's source code and dependencies. You can either place them directly in the Function CR under the **spec.source** and **spec.deps** fields as an **inline Function**, or store the code and dependencies in a public or private Git repository (**Git Functions**). Choosing the second option ensures your Function is versioned and gives you more development freedom in the choice of a project structure or an IDE. + +> [!TIP] +> Read more about [Git Functions](technical-reference/07-40-git-source-type.md). diff --git a/docs/user/00-20-configure-serverless.md b/docs/user/00-20-configure-serverless.md new file mode 100644 index 00000000..c047046b --- /dev/null +++ b/docs/user/00-20-configure-serverless.md @@ -0,0 +1,222 @@ +# Serverless Configuration + +## Overview + +The Serverless module has its own operator (Serverless operator). It watches the Serverless custom resource (CR) and reconfigures (reconciles) the Serverless workloads. + +The Serverless CR becomes an API to configure the Serverless module. You can use it to: + +- enable or disable the internal Docker registry +- configure the external Docker registry +- override endpoint for traces collected by the Serverless Functions +- override endpoint for eventing +- override the target CPU utilization percentage +- override the Function requeue duration +- override the Function build executor arguments +- override the Function build max simultaneous jobs +- override the healthz liveness timeout +- override the Function request body limit +- override the Function timeout +- override the default build Job preset +- override the default runtime Pod preset + +The default configuration of the Serverless Module is following: + + ```yaml + apiVersion: operator.kyma-project.io/v1alpha1 + kind: Serverless + metadata: + name: serverless-sample + spec: + dockerRegistry: + enableInternal: true + ``` + +## Configure Docker Registry + +By default, Serverless uses PersistentVolume (PV) as the internal registry to store Docker images for Functions. The default storage size of a single volume is 20 GB. This internal registry is suitable for local development. + +If you use Serverless for production purposes, it is recommended that you use an external registry, such as Docker Hub, Artifact Registry, or Azure Container Registry (ACR). + +Follow these steps to use the external Docker registry in Serverless: + +1. Create a Secret in the `kyma-system` namespace with the required data (`username`, `password`, `serverAddress`, and `registryAddress`): + + ```bash + kubectl create secret generic my-registry-config \ + --namespace kyma-system \ + --from-literal=username={USERNAME} \ + --from-literal=password={PASSWORD} \ + --from-literal=serverAddress={SERVER_URL} \ + --from-literal=registryAddress={REGISTRY_URL} + ``` + +> [!TIP] +> In case of DockerHub, usually the Docker registry address is the same as the account name. + +Examples: + + + +### **Docker Hub** + + ```bash + kubectl create secret generic my-registry-config \ + --namespace kyma-system \ + --from-literal=username={USERNAME} \ + --from-literal=password={PASSWORD} \ + --from-literal=serverAddress=https://index.docker.io/v1/ \ + --from-literal=registryAddress={USERNAME} + ``` + +### **Artifact Registry** + +To learn how to set up authentication for Docker with Artifact Registry, visit the [Artifact Registry documentation](https://cloud.google.com/artifact-registry/docs/docker/authentication#json-key). + + ```bash + kubectl create secret generic my-registry-config \ + --namespace kyma-system \ + --from-literal=username=_json_key \ + --from-literal=password={GCR_KEY_JSON} \ + --from-literal=serverAddress=gcr.io \ + --from-literal=registryAddress=gcr.io/{YOUR_GCR_PROJECT} + ``` + +### **ACR** + +To learn how to authenticate with ACR, visit the [ACR documentation](https://learn.microsoft.com/en-us/azure/container-registry/container-registry-authentication?tabs=azure-cli#az-acr-login-with---expose-token). + + ```bash + kubectl create secret generic my-registry-config \ + --namespace kyma-system \ + --from-literal=username=00000000-0000-0000-0000-000000000000 \ + --from-literal=password={ACR_TOKEN} \ + --from-literal=serverAddress={AZ_REGISTRY_NAME}.azurecr.io \ + --from-literal=registryAddress={AZ_REGISTRY_NAME}.azurecr.io + ``` + + + +2. Reference the Secret in the Serverless CR: + + ```yaml + spec: + dockerRegistry: + secretName: my-registry-config + ``` + +The URL of the currently used Docker registry is visible in the Serverless CR status. + +## Configure Trace Endpoint + +By default, the Serverless operator checks if there is a trace endpoint available. If available, the detected trace endpoint is used as the trace collector URL in Functions. +If no trace endpoint is detected, Functions are configured with no trace collector endpoint. +You can configure a custom trace endpoint so that Function traces are sent to any tracing backend you choose. +The currently used trace endpoint is visible in the Serverless CR status. + + ```yaml + spec: + tracing: + endpoint: http://jaeger-collector.observability.svc.cluster.local:4318/v1/traces + ``` + +## Configure Eventing Endpoint + +You can configure a custom eventing endpoint, so when you use SDK for sending events from your Functions, it is used to publish events. +The currently used trace endpoint is visible in the Serverless CR status. +By default `http://eventing-publisher-proxy.kyma-system.svc.cluster.local/publish` is used. + + ```yaml + spec: + eventing: + endpoint: http://eventing-publisher-proxy.kyma-system.svc.cluster.local/publish + ``` + +## Configure Target CPU Utilization Percentage + +You can set a custom target threshold for CPU utilization. The default value is set to `50%`. + +```yaml + spec: + targetCPUUtilizationPercentage: 50 +``` + +## Configure the Function Requeue Duration + +By default, the Function associated with the default configuration will be requeued every 5 minutes. + +```yaml + spec: + functionRequeueDuration: 5m +``` + +## Configure the Function Build Executor Arguments + +Use this label to choose the [arguments](https://github.com/GoogleContainerTools/kaniko?tab=readme-ov-file#additional-flags) passed to the Function build executor, for example: + +- `--insecure` - executor operates in an insecure mode +- `--skip-tls-verify` - executor skips the TLS certificate verification +- `--skip-unused-stages` - executor skips any stages that aren't used for the current execution +- `--log-format=text` - executor uses logs in a given format +- `--cache=true` - enables caching for the executor +- `--compressed-caching=false` - Prevents tar compression for cached layers. This will increase the runtime of the build, but decrease the memory usage especially for large builds. +- `--use-new-run` - Improves performance by avoiding the full filesystem snapshots. + +```yaml + spec: + functionBuildExecutorArgs: "--insecure,--skip-tls-verify,--skip-unused-stages,--log-format=text,--cache=true,--use-new-run,--compressed-caching=false" +``` + +## Configure the Function Build Max Simultaneous Jobs + +You can set a custom maximum number of simultaneous jobs which can run at the same time. The default value is set to `5`. + +```yaml + spec: + functionBuildMaxSimultaneousJobs: 5 +``` + +## Configure the healthz Liveness Timeout + +By default, the Function is considered unhealthy if the liveness health check endpoint does not respond within 10 seconds. + +```yaml + spec: + healthzLivenessTimeout: "10s" +``` + +## Configure the Function Request Body Limit + +Use this field to configure the maximum size limit for the request body of a Function. The default value is set to `1` megabyte. + +```yaml + spec: + functionRequestBodyLimitMb: 1 +``` + +## Configure the Function Timeout + +By default, the maximum execution time limit for a Function is set to `180` seconds. + +```yaml + spec: + functionTimeoutSec: 180 +``` + +## Configure the Default Build Job Preset + +You can configure the default build Job preset to be used. + +```yaml + spec: + defaultBuildJobPreset: "normal" +``` + +## Configure the Default Runtime Pod Preset + +You can configure the default runtime Pod preset to be used. + +```yaml + spec: + defaultRuntimePodPreset: "M" +``` diff --git a/docs/user/00-30-development-toolkit.md b/docs/user/00-30-development-toolkit.md new file mode 100644 index 00000000..e5e105a3 --- /dev/null +++ b/docs/user/00-30-development-toolkit.md @@ -0,0 +1,13 @@ +# Development Toolkit + +To start developing your first Functions, you need: + +- Self-hosted **Kubernetes cluster** and the **KUBECONFIG** file to authenticate to the cluster +- **Kyma** as the platform for managing the Function-related workloads +- [**Docker**](https://www.docker.com/) as the container runtime +- [**kubectl**](https://kubernetes.io/docs/reference/kubectl/kubectl/), the Kubernetes command-line tool, for running commands against clusters +- Development environment of your choice: + - **Kyma CLI** to easily initiate inline Functions or Git Functions locally, run, test, and later apply them in the clusters + - **Node.js** or **Python** + - **IDE** as the source code editor + - **Kyma dashboard** to manage Functions and related workloads through the graphical user interface diff --git a/docs/user/00-40-security-considerations.md b/docs/user/00-40-security-considerations.md new file mode 100644 index 00000000..2a0023f0 --- /dev/null +++ b/docs/user/00-40-security-considerations.md @@ -0,0 +1,20 @@ +# Security Considerations + +To eliminate potential security risks when using Functions, bear in mind these few facts: + +- Kyma provides base images for serverless runtimes. Those default runtimes are maintained with regards to commonly known security advisories. It is possible to use a custom runtime image (see this [tutorial](tutorials/01-110-override-runtime-image.md)). In such a case, you are responsible for security compliance and assessment of exploitability of any potential vulnerabilities of the custom runtime image. + +- Kyma does not run any security scans against Functions and their images. Before you store any sensitive data in Functions, consider the potential risk of data leakage. + +- Kyma does not define any authorization policies that would restrict Functions' access to other resources within the namespace. If you deploy a Function in a given namespace, it can freely access all events and APIs of services within this namespace. + +- Since Kubernetes is [moving from PodSecurityPolicies to PodSecurity Admission Controller](https://kubernetes.io/docs/tasks/configure-pod-container/migrate-from-psp/), Kyma Functions require running in namespaces with the `baseline` Pod security level. The `restricted` level is not currently supported due to the requirements of the Function building process. + +- Kyma Serverless components can run with the PodSecurity Admission Controller support in the `restricted` Pod security level when using an external registry. When the Internal Docker Registry is enabled, the Internal Registry DaemonSet requires elevated privileges to function correctly, exceeding the limitations of both the `restricted` and `baseline` levels. + +- All administrators and regular users who have access to a specific namespace in a cluster can also access: + + - Source code of all Functions within this namespace + - Internal Docker registry that contains Function images + - Secrets allowing the build Job to pull and push images from and to the Docker registry (in non-system namespaces) + \ No newline at end of file diff --git a/docs/user/00-50-limitations.md b/docs/user/00-50-limitations.md new file mode 100644 index 00000000..907d52af --- /dev/null +++ b/docs/user/00-50-limitations.md @@ -0,0 +1,86 @@ +# Serverless Limitations + +## Controller Limitations + +Serverless controller does not serve time-critical requests from users. +It reconciles Function custom resources (CR), stored at the Kubernetes API Server, and has no persistent state on its own. + +Serverless controller doesn't build or serve Functions using its allocated runtime resources. It delegates this work to the dedicated Kubernetes workloads. It schedules (build-time) jobs to build the Function Docker image and (runtime) Pods to serve them once they are built. +Refer to the [architecture](technical-reference/04-10-architecture.md) diagram for more details. + +Having this in mind Serverless Controller does not require horizontal scaling. +It scales vertically up to the `160Mi` of memory and `500m` of CPU time. + +## Limitation for the Number of Functions + +There is no upper limit of Functions that can be run on Kyma (similar to Kubernetes workloads in general). Once a user defines a Function, its build jobs and runtime Pods will always be requested by Serverless controller. It's up to Kubernetes to schedule them based on the available memory and CPU time on the Kubernetes worker nodes. This is determined mainly by the number of the Kubernetes worker nodes (and the Node auto-scaling capabilities) and their computational capacity. + +## Build Phase Limitation + +The time necessary to build Function depends on: + +- selected [build profile](technical-reference/07-80-available-presets.md#build-jobs-resources) that determines the requested resources (and their limits) for the build phase +- number and size of dependencies that must be downloaded and bundled into the Function image +- cluster Nodes specification (see the note with reference specification at the end of this document) + + + +#### **Node.js** + +| | local-dev | no profile (no limits for resource) | +|-----------------|-----------|-------------------------------------| +| no dependencies | 24 sec | 15 sec | +| 2 dependencies | 26 sec | 16 sec | + +#### **Python** + +| | local-dev | no profile (no limits for resource) | +|-----------------|-----------|-------------------------------------| +| no dependencies | 30 sec | 16 sec | +| 2 dependencies | 32 sec | 20 sec | + + + +The shortest build time (the limit) is approximately 15 seconds and requires no limitation of the build job resources and a minimum number of dependencies that are pulled in during the build phase. + +Running multiple Function build jobs at once (especially with no limits) may drain the cluster resources. To mitigate such risk, there is an additional limit of 5 simultaneous Function builds. If a sixth one is scheduled, it is built once there is a vacancy in the build queue. + +## Runtime Phase Limitations + +In the runtime, the Functions serve user-provided logic wrapped in the WEB framework (`express` for Node.js and `bottle` for Python). Taking the user logic aside, those frameworks have limitations and depend on the selected [runtime profile](technical-reference/07-80-available-presets.md#functions-resources) and the Kubernetes nodes specification (see the note with reference specification at the end of this document). + +The following describes the response times of the selected runtime profiles for a "Hello World" Function requested at 50 requests/second. This describes the overhead of the serving framework itself. Any user logic added on top of that will add extra milliseconds and must be profiled separately. + + + +#### **Node.js** + +| | XL | L | M | S | XS | +|-------------------------------|--------|--------|--------|--------|---------| +| response time [avarage] | ~13ms | 13ms | ~15ms | ~60ms | ~400ms | +| response time [95 percentile] | ~20ms | ~30ms | ~70ms | ~200ms | ~800ms | +| response time [99 percentile] | ~200ms | ~200ms | ~220ms | ~500ms | ~1.25ms | + +#### **Python** + +| | XL | L | M | S | +|-------------------------------|--------|--------|--------|--------| +| response time [avarage] | ~11ms | 12ms | ~12ms | ~14ms | +| response time [95 percentile] | ~25ms | ~25ms | ~25ms | ~25ms | +| response time [99 percentile] | ~175ms | ~180ms | ~210ms | ~280ms | + + + +Obviously, the bigger the runtime profile, the more resources are available to serve the response quicker. Consider these limits of the serving layer as a baseline - as this does not take your Function logic into account. + +### Scaling + +Function runtime Pods can be scaled horizontally from zero up to the limits of the available resources at the Kubernetes worker nodes. +See the [Use external scalers](tutorials/01-130-use-external-scalers.md) tutorial for more information. + +## In-Cluster Docker Registry + +Serverless comes with an in-cluster Docker registry for the Function images. For more information on the Docker registry configuration, visit [Serverless configuration](00-20-configure-serverless.md#configure-docker-registry). + +> [!NOTE] +> All measurements were done on Kubernetes with five AWS worker nodes of type `m5.xlarge` (four CPU 3.1 GHz x86_64 cores, 16 GiB memory). diff --git a/docs/user/08-10-best-practices.md b/docs/user/08-10-best-practices.md new file mode 100644 index 00000000..83c725ee --- /dev/null +++ b/docs/user/08-10-best-practices.md @@ -0,0 +1,62 @@ +# Serverless Best Practices + +## Overview - It's All About Custom Resources + +Kyma Serverless introduces a [Function](resources/06-10-function-cr.md) CustomResourceDefinition (CRD) as an extension to the Kubernetes API server. +Defining a Function in Kyma essentially means creating a new instance of the Function custom resource (CR). However, the content of the Function CR specification may become quite long. It consists of the code (or Git reference to the code), dependencies, runtime specification, build-time specification, etc. Additionally, there are other CRs that are relevant for a Function developer - that is, [APIRule](https://kyma-project.io/docs/kyma/latest/05-technical-reference/00-custom-resources/apix-01-apirule/) (defining how Function is exposed to the outside world), [Subscription](https://kyma-project.io/docs/kyma/latest/05-technical-reference/00-custom-resources/evnt-01-subscription/) (defining which CloudEvents should trigger a Function), and others. + +All of that can be easily managed using the following best practices for the Function development. You will find recommendations that will be helpful for you at any stage of your development journey. + +## Use UI to Explore + +At the beginning of your Kyma journey, you will probably want to evaluate Serverless and draft a few Functions. +Kyma dashboard is perfect to gain basic experience and start the journey with Kyma Functions. Its dedicated Serverless features help you draft your first Functions by putting the code directly in the browser using a Web IDE. +Kyma dashboard will also help you expose your Function using HTTP, define environment variables, subscribe to CloudEvents, bind ServiceInstances, and even show you the Function logs - all in one place. + +Get started with [Function UI](tutorials/01-10-create-inline-function.md) +![function-ui](../assets/svls-function-ui.png) + +## Use Kyma CLI for Better Development Experience + +Defining your Function from the Kyma dashboard is very quick and easy, but it might not be enough to satisfy your needs as a developer. To code and test more complex cases, you may want to write your Function in your favorite IDE or run and debug the Function on your local machine, before actually deploying in Kyma runtime. Also, you might want to avoid recreating the same Functions manually from the UI on a different environment. In the end, having deployable artifacts is more desirable. This is where Kyma CLI comes in handy, as it enables you to keep your Function's code and configuration in the form of a workspace. + +Initialize a scaffold for a brand new Function using the `kyma init function` command or fetch the current state of an existing Function deployed in your Kyma runtime using `kyma sync function`. +Focus on the Function code and develop it from your favorite IDE. Configure your Functions directly in the [`config.yaml` manifest file](technical-reference/07-60-function-configuration-file.md) + +> [!TIP] +> Use `kyma init function --vscode` to generate a `.json` schema, which can be used in VSCode for autocompletion. + +Kyma CLI helps you run your code locally with a single `kyma run function` command. You can run your Function using your local Docker daemon with the same runtime Docker context, as if it was run in Kyma runtime. + +> [!TIP] +> Use `kyma run function` with `--hot-deploy` and spare yourself unnecessary restarts of the Functions whenever you test a changed Function logic. Also, use [`--debug` option](tutorials/01-40-debug-function.md) to allow connecting with your favorite debugger. + +![kyma-cli-functions](../assets/svls-kyma-cli-functions.png) + +Having written and tested your Function locally, simply deploy it to the Kyma runtime with the `kyma apply function` command, used in the folder of your Function's workspace. The command reads the files, translates them to the Kubernetes manifests, and deploys the Function. + +## Deploy Using CI/CD + +Kyma dashboard helps you get started. Kyma CLI helps you iterate and develop Functions. +But at the end of the day, you may want an automated deployment of your application, where Functions are just part of it. +It all comes down to the deployment the Kubernetes applications on different Kyma runtimes in a GitOps fashion. For the sake of simplicity, the deployment approach for Functions should not differ from the deployment of the other Kubernetes workloads, ConfigMaps, or Secrets. + +So, in the end, what you need is YAML manifests for everything - including Functions. + +Good news: Kyma CLI helps you generate the YAML manifests matching your `config.yaml` file crafted before. +Use the `--dry-run` option of the `kyma apply function` command to generate Kubernetes manifests that will include the Function CR itself and all the related CRs (for example, APIRules, Subscriptions, etc.). + + ```bash + kyma apply function --dry-run --ci -o yaml > my-function.yaml + ``` + +The generated manifest should be a part of all the manifests that define your application and are pushed to the Git repository. +Deploy everything in a consistent way either using CI/CD or GitOps operators (for example, `fluxcd` or `argocd`) installed on your Kyma runtime. + +> [!NOTE] +> Kyma Functions come in two types: `git` and `inline`. For the [Git type](tutorials/01-11-create-git-function.md), you configure a Git repository as a source of your Function code instead of creating it `inline`. +Thus, you can skip rendering the Kubernetes manifests and deploying them each time you made a change in the Function code or dependencies. Simply push the changes to the referenced Git repository, and the Serverless controller will rebuild the Function deployed in your Kyma runtime. + +Have a look at this [example](https://github.com/kyma-project/serverless/tree/main/examples/incluster_eventing) that illustrates how you can set up your Git project. Mind the `k8s resources` folder with the YAML manifests to be pushed to the Kubernetes API server (for example, using kubectl in our CI/CD or GitOps) and the `src` folder containing the Functions' source code. They are pulled directly by Kyma Serverless to build new Function images whenever the source content changes in the Git repository. + +Browse the [tutorials](tutorials/README.md) for Serverless to learn how to use it step-by-step in different scenarios. diff --git a/docs/user/README.md b/docs/user/README.md index 1c6854d6..4b1374c2 100644 --- a/docs/user/README.md +++ b/docs/user/README.md @@ -1,14 +1,43 @@ -> **TIP:** Apart from the {Module Name} heading, you can use your own titles for the remaining sections. You can also add more module-specific sections. +# Serverless Module -# {Module Name} -> Modify the title and insert the name of your module. Use Heading 1 (H1). +## What is serverless? -## Overview -> Provide a description of your module and its components. Describe its features and functionalities. Mention the scope and add information on the CustomResourceDefinitions (CRDs). -> You can divide this section to the relevant subsections. +"Serverless" refers to an architecture in which the infrastructure of your applications is managed by cloud providers. Contrary to its name, a serverless application does require a server but it doesn't require you to run and manage it on your own. Instead, you subscribe to a given cloud provider, such as AWS, Azure, or GCP, and pay a subscription fee only for the resources you actually use. Because the resource allocation can be dynamic and depends on your current needs, the serverless model is particularly cost-effective when you want to implement a certain logic that is triggered on demand. Simply, you get your things done and don't pay for the infrastructure that stays idle. -## Useful Links (Optional) -> Provide links to the most relevant module documentation (tutorials, technical references, resources, etc.). +Kyma Serverless Module offers a Kubernetes-based platform on which you can build, run, and manage serverless applications in Kubernetes. These applications are called **Functions**, and they are based on the [Function custom resource (CR)](resources/06-10-function-cr.md) objects. They contain simple code snippets that implement specific business logic. For example, you can define that you want to use a Function as a proxy that saves all incoming event details to an external database. -## Feedback (Optional) -> Describe how users can provide feedback. \ No newline at end of file +Such a Function can be: + +- Triggered by other workloads in the cluster (in-cluster events) or business events coming from external sources. You can subscribe to them using a [Subscription CR](https://kyma-project.io/#/eventing-manager/user/resources/evnt-cr-subscription). +- Exposed to an external endpoint (HTTPS). With an [APIRule CR](https://kyma-project.io/#/api-gateway/user/custom-resources/apirule/04-10-apirule-custom-resource), you can define who can reach the endpoint and what operations they can perform on it. + +## What is Serverless in Kyma? + +Serverless in Kyma is an area that: + +- Ensures quick deployments following a Function approach +- Enables scaling independent of the core applications +- Gives a possibility to revert changes without causing production system downtime +- Supports the complete asynchronous programming model +- Offers loose coupling of Event providers and consumers +- Enables flexible application scalability and availability + +Serverless in Kyma allows you to reduce the implementation and operation effort of an application to the absolute minimum. It provides a platform to run lightweight Functions in a cost-efficient and scalable way using JavaScript and Node.js. Serverless in Kyma relies on Kubernetes resources like [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), [Services](https://kubernetes.io/docs/concepts/services-networking/service/) and [HorizontalPodAutoscalers](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) for deploying and managing Functions and [Kubernetes Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) for creating Docker images. + +## Serverless Module + +The Serverless module allows you to install, uninstall and configure Kyma's Serverless on your Kubernetes cluster, using Serverless Operator. + +## Serverless Operator + +When you enable the Serverless module, Serverless Operator takes care of installation and configuration of Serverless on your cluster. It manages the Serverless lifecycle based on the dedicated Serverless custom resource (CR). + +## Useful Links + +If you want to perform some simple and more advanced tasks, check the [Serverless tutorials](tutorials/README.md). + +To troubleshoot Serverless-related issues, see the [troubleshooting guides](troubleshooting-guides/README.md). + +To analyze Function specification and configuration files and to understand technicalities behind Serverless implementation, visit [technical reference](technical-reference/README.md). + +For more information on the Serverless custom resources, see [Resources](resources/README.md). diff --git a/docs/user/_sidebar.md b/docs/user/_sidebar.md index 467416ed..a0a1ec87 100644 --- a/docs/user/_sidebar.md +++ b/docs/user/_sidebar.md @@ -1 +1,41 @@ -Use this file to create an unordered list of documents you want to display on the [Kyma website](https://kyma-project.io). The list serves to navigate through the user documentation. For more information, visit the [User documentation](https://github.com/kyma-project/community/blob/main/docs/guidelines/content-guidelines/01-user-docs.md) guide. \ No newline at end of file + +* [Back to Kyma Home](/) +* [Serverless Module](/serverless-manager/user/README.md) +* [From Code to Function](/serverless-manager/user/00-10-from-code-to-function.md) +* [Serverless Configuration](/serverless-manager/user/00-20-configure-serverless.md) +* [Development Toolkit](/serverless-manager/user/00-30-development-toolkit.md) +* [Security Considerations](/serverless-manager/user/00-40-security-considerations.md) +* [Limitations](/serverless-manager/user/00-50-limitations.md) +* [Tutorials](/serverless-manager/user/tutorials/README.md) + * [Create an Inline Function](/serverless-manager/user/tutorials/01-10-create-inline-function.md) + * [Create a Git Function](/serverless-manager/user/tutorials/01-11-create-git-function.md) + * [Expose the Function](/serverless-manager/user/tutorials/01-20-expose-function.md) + * [Manage Functions Through Kyma CLI](/serverless-manager/user/tutorials/01-30-manage-functions-with-kyma-cli.md) + * [Debug a Function](/serverless-manager/user/tutorials/01-40-debug-function.md) + * [Log Into a Private Package Registry](/serverless-manager/user/tutorials/01-80-log-into-private-packages-registry.md) + * [Set Asynchronous Communication Between Functions](/serverless-manager/user/tutorials/01-90-set-asynchronous-connection.md) + * [Customize Function Traces](/serverless-manager/user/tutorials/01-100-customize-function-traces.md) + * [Override Runtime Image](/serverless-manager/user/tutorials/01-110-override-runtime-image.md) + * [Inject Environment Variables](/serverless-manager/user/tutorials/01-120-inject-envs.md) + * [Use External Scalers](/serverless-manager/user/tutorials/01-130-use-external-scalers.md) + * [Access to Secrets Mounted as Volume](/serverless-manager/user/tutorials/01-140-use-secret-mounts.md) +* [Resources](/serverless-manager/user/resources/README.md) + * [Function CR](/serverless-manager/user/resources/06-10-function-cr.md) + * [Serverless CR](/serverless-manager/user/resources/06-20-serverless-cr.md) +* [Technical Reference](/serverless-manager/user/technical-reference/README.md) + * [Serverless Architecture](/serverless-manager/user/technical-reference/04-10-architecture.md) + * [Internal Docker Registry](/serverless-manager/user/technical-reference/04-20-internal-registry.md) + * [Environment Variables in Functions](/serverless-manager/user/technical-reference/05-20-env-variables.md) + * [Sample Functions](/serverless-manager/user/technical-reference/07-10-sample-functions.md) + * [Function Processing](/serverless-manager/user/technical-reference/07-20-function-processing-stages.md) + * [Git Source Type](/serverless-manager/user/technical-reference/07-40-git-source-type.md) + * [Function Configuration File](/serverless-manager/user/technical-reference/07-60-function-configuration-file.md) + * [Function's Specification](/serverless-manager/user/technical-reference/07-70-function-specification.md) + * [Available Presets](/serverless-manager/user/technical-reference/07-80-available-presets.md) +* [Troubleshooting Guides](/serverless-manager/user/troubleshooting-guides/README.md) + * [Functions Won't Build](/serverless-manager/user/troubleshooting-guides/03-10-cannot-build-functions.md) + * [Container Fails](/serverless-manager/user/troubleshooting-guides/03-20-failing-function-container.md) + * [Functions Failing To Build on k3d](/serverless-manager/user/troubleshooting-guides/03-40-function-build-failing-k3d.md) + * [Serverless Periodically Restarting](/serverless-manager/user/troubleshooting-guides/03-50-serverless-periodically-restaring.md) +* [Best Practices](/serverless-manager/user/08-10-best-practices.md) + \ No newline at end of file diff --git a/docs/user/resources/06-10-function-cr.md b/docs/user/resources/06-10-function-cr.md new file mode 100644 index 00000000..784d91a1 --- /dev/null +++ b/docs/user/resources/06-10-function-cr.md @@ -0,0 +1,210 @@ +# Function + +The `functions.serverless.kyma-project.io` CustomResourceDefinition (CRD) is a detailed description of the kind of data and the format used to manage Functions within Kyma. To get the up-to-date CRD and show the output in the YAML format, run this command: + +```bash +kubectl get crd functions.serverless.kyma-project.io -o yaml +``` + +## Sample Custom Resource + +The following Function object creates a Function which responds to HTTP requests with the "Hello John" message. The Function's code (**source**) and dependencies (**dependencies**) are specified in the Function CR. + +```yaml +apiVersion: serverless.kyma-project.io/v1alpha2 +kind: Function +metadata: + name: my-test-function + namespace: default + labels: + app: my-test-function +spec: + runtime: nodejs20 + source: + inline: + dependencies: | + { + "name": "hellowithdeps", + "version": "0.0.1", + "dependencies": { + "end-of-stream": "^1.4.1", + "from2": "^2.3.0", + "lodash": "^4.17.5" + } + } + source: | + module.exports = { + main: function(event, context) { + const name = process.env.PERSON_NAME; + return 'Hello ' + name; + } + } + scaleConfig: + minReplicas: 3 + maxReplicas: 3 + resourceConfiguration: + function: + resources: + limits: + cpu: 1 + memory: 1Gi + requests: + cpu: 500m + memory: 500Mi + build: + resources: + limits: + cpu: 2 + memory: 2Gi + requests: + cpu: 1 + memory: 1Gi + env: + - name: PERSON_NAME + value: "John" + secretMounts: + - secretName: SECRET_NAME + mountPath: /secret/mount/path + status: + conditions: + - lastTransitionTime: "2020-04-14T08:17:11Z" + message: "Deployment my-test-function-nxjdp is ready" + reason: DeploymentReady + status: "True" + type: Running + - lastTransitionTime: "2020-04-14T08:16:55Z" + message: "Job my-test-function-build-552ft finished" + reason: JobFinished + status: "True" + type: BuildReady + - lastTransitionTime: "2020-04-14T08:16:16Z" + message: "ConfigMap my-test-function-xv6pc created" + reason: ConfigMapCreated + status: "True" + type: ConfigurationReady +``` + +If you store the Function's source code and dependencies in a Git repository and want the Function Controller to fetch them from it, use these parameters in the Function CR: + +```yaml +apiVersion: serverless.kyma-project.io/v1alpha2 +kind: Function +metadata: + name: my-test-function +spec: + source: + gitRepository: + url: github.com/username/repo + baseDir: "/" + reference: "branchA" + auth: + type: basic + secretName: secret-name + runtime: "nodejs20" +``` + +## Custom Resource Parameters + +### Function.serverless.kyma-project.io/v1alpha2 + +**Spec:** + +| Parameter | Type | Description | +| ---- | ----------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **annotations** | map\[string\]string | Defines annotations used in Deployment's PodTemplate and applied on the Function's runtime Pod. | +| **env** | \[\]object | Specifies an array of key-value pairs to be used as environment variables for the Function. You can define values as static strings or reference values from ConfigMaps or Secrets. For configuration details, see the [official Kubernetes documentation](https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/). | +| **labels** | map\[string\]string | Defines labels used in Deployment's PodTemplate and applied on the Function's runtime Pod. | +| **replicas** | integer | Defines the exact number of Function's Pods to run at a time. If **ScaleConfig** is configured, or if the Function is targeted by an external scaler, then the **Replicas** field is used by the relevant HorizontalPodAutoscaler to control the number of active replicas. | +| **resourceConfiguration** | object | Specifies resources requested by the Function and the build Job. | +| **resourceConfiguration.​build** | object | Specifies resources requested by the build Job's Pod. | +| **resourceConfiguration.​build.​profile** | string | Defines the name of the predefined set of values of the resource. Can't be used together with **Resources**. | +| **resourceConfiguration.​build.​resources** | object | Defines the amount of resources available for the Pod. Can't be used together with **Profile**. For configuration details, see the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). | +| **resourceConfiguration.​function** | object | Specifies resources requested by the Function's Pod. | +| **resourceConfiguration.​function.​profile** | string | Defines the name of the predefined set of values of the resource. Can't be used together with **Resources**. | +| **resourceConfiguration.​function.​resources** | object | Defines the amount of resources available for the Pod. Can't be used together with **Profile**. For configuration details, see the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). | +| **runtime** (required) | string | Specifies the runtime of the Function. The available values are `nodejs18` - deprecated, `nodejs20`, `python39` - deprecated, and `python312`. | +| **runtimeImageOverride** | string | Specifies the runtime image used instead of the default one. | +| **scaleConfig** | object | Defines the minimum and maximum number of Function's Pods to run at a time. When it is configured, a HorizontalPodAutoscaler will be deployed and will control the **Replicas** field to scale the Function based on the CPU utilisation. | +| **scaleConfig.​maxReplicas** (required) | integer | Defines the maximum number of Function's Pods to run at a time. | +| **scaleConfig.​minReplicas** (required) | integer | Defines the minimum number of Function's Pods to run at a time. | +| **secretMounts** | \[\]object | Specifies Secrets to mount into the Function's container filesystem. | +| **secretMounts.​mountPath** (required) | string | Specifies the path within the container where the Secret should be mounted. | +| **secretMounts.​secretName** (required) | string | Specifies the name of the Secret in the Function's namespace. | +| **source** (required) | object | Contains the Function's source code configuration. | +| **source.​gitRepository** | object | Defines the Function as Git-sourced. Can't be used together with **Inline**. | +| **source.​gitRepository.​auth** | object | Specifies the authentication method. Required for SSH. | +| **source.​gitRepository.​auth.​secretName** (required) | string | Specifies the name of the Secret with credentials used by the Function Controller to authenticate to the Git repository in order to fetch the Function's source code and dependencies. This Secret must be stored in the same namespace as the Function CR. | +| **source.​gitRepository.​auth.​type** (required) | string | Defines the repository authentication method. The value is either `basic` if you use a password or token, or `key` if you use an SSH key. | +| **source.​gitRepository.​baseDir** | string | Specifies the relative path to the Git directory that contains the source code from which the Function is built. | +| **source.​gitRepository.​reference** | string | Specifies either the branch name, tag or commit revision from which the Function Controller automatically fetches the changes in the Function's code and dependencies. | +| **source.​gitRepository.​url** (required) | string | Specifies the URL of the Git repository with the Function's code and dependencies. Depending on whether the repository is public or private and what authentication method is used to access it, the URL must start with the `http(s)`, `git`, or `ssh` prefix. | +| **source.​inline** | object | Defines the Function as the inline Function. Can't be used together with **GitRepository**. | +| **source.​inline.​dependencies** | string | Specifies the Function's dependencies. | +| **source.​inline.​source** (required) | string | Specifies the Function's full source code. | + +**Status:** + +| Parameter | Type | Description | +| ---- | ----------- | ---- | +| **baseDir** | string | Specifies the relative path to the Git directory that contains the source code from which the Function is built. | +| **commit** | string | Specifies the commit hash used to build the Function. | +| **conditions** | \[\]object | Specifies an array of conditions describing the status of the parser. | +| **conditions.​lastTransitionTime** | string | Specifies the last time the condition transitioned from one status to another. | +| **conditions.​message** | string | Provides a human-readable message indicating details about the transition. | +| **conditions.​reason** | string | Specifies the reason for the condition's last transition. | +| **conditions.​status** (required) | string | Specifies the status of the condition. The value is either `True`, `False`, or `Unknown`. | +| **conditions.​type** | string | Specifies the type of the Function's condition. | +| **podSelector** | string | Specifies the Pod selector used to match Pods in the Function's Deployment. | +| **reference** | string | Specifies either the branch name, tag or commit revision from which the Function Controller automatically fetches the changes in the Function's code and dependencies. | +| **replicas** | integer | Specifies the total number of non-terminated Pods targeted by this Function. | +| **runtime** | string | Specifies the **Runtime** type of the Function. | +| **runtimeImage** | string | Specifies the image version used to build and run the Function's Pods. | +| **runtimeImageOverride** | string | Deprecated: Specifies the runtime image version which overrides the **RuntimeImage** status parameter. **RuntimeImageOverride** exists for historical compatibility and should be removed with v1alpha3 version. | + + + +### Status Reasons + +Processing of a Function CR can succeed, continue, or fail for one of these reasons: + +| Reason | Type | Description | +| -------------------------------- | -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `ConfigMapCreated` | `ConfigurationReady` | A new ConfigMap was created based on the Function CR definition. | +| `ConfigMapUpdated` | `ConfigurationReady` | The existing ConfigMap was updated after changes in the Function CR name, its source code or dependencies. | +| `SourceUpdated` | `ConfigurationReady` | The Function Controller managed to fetch changes in the Functions's source code and configuration from the Git repository. | +| `SourceUpdateFailed` | `ConfigurationReady` | The Function Controller failed to fetch changes in the Functions's source code and configuration from the Git repository. | +| `JobFailed` | `BuildReady` | The image with the Function's configuration could not be created due to an error. | +| `JobCreated` | `BuildReady` | The Kubernetes Job resource that builds the Function image was created. | +| `JobUpdated` | `BuildReady` | The existing Job was updated after changing the Function's metadata or spec fields that do not affect the way of building the Function image, such as labels. | +| `JobRunning` | `BuildReady` | The Job is in progress. | +| `JobsDeleted` | `BuildReady` | Previous Jobs responsible for building Function images were deleted. | +| `JobFinished` | `BuildReady` | The Job was finished and the Function's image was uploaded to the Docker Registry. | +| `DeploymentCreated` | `Running` | A new Deployment referencing the Function's image was created. | +| `DeploymentUpdated` | `Running` | The existing Deployment was updated after changing the Function's image, scaling parameters, variables, or labels. | +| `DeploymentFailed` | `Running` | The Function's Pod crashed or could not start due to an error. | +| `DeploymentWaiting` | `Running` | The Function was deployed and is waiting for the Deployment to be ready. | +| `DeploymentReady` | `Running` | The Function was deployed and is ready. | +| `ServiceCreated` | `Running` | A new Service referencing the Function's Deployment was created. | +| `ServiceUpdated` | `Running` | The existing Service was updated after applying required changes. | +| `ServiceFailed` | `Running` | The Function's service could not be created or updated. | +| `HorizontalPodAutoscalerCreated` | `Running` | A new Horizontal Pod Scaler referencing the Function's Deployment was created. | +| `HorizontalPodAutoscalerUpdated` | `Running` | The existing Horizontal Pod Scaler was updated after applying required changes. | +| `MinimumReplicasUnavailable` | `Running` | Insufficient number of available Replicas. The Function is unhealthy. | + +## Related Resources and Components + +These are the resources related to this CR: + +| Custom resource | Description | +| ----------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | +| [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) | Stores the Function's source code and dependencies. | +| [Job](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) | Builds an image with the Function's code in a runtime. | +| [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) | Serves the Function's image as a microservice. | +| [Service](https://kubernetes.io/docs/concepts/services-networking/service/) | Exposes the Function's Deployment as a network service inside the Kubernetes cluster. | +| [HorizontalPodAutoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) | Automatically scales the number of Function's Pods. | + +These components use this CR: + +| Component | Description | +| ------------------- | ------------------------------------------------------------------------------------------------------------ | +| Function Controller | Uses the Function CR for the detailed Function definition, including the environment on which it should run. | diff --git a/docs/user/resources/06-20-serverless-cr.md b/docs/user/resources/06-20-serverless-cr.md new file mode 100644 index 00000000..de0b562b --- /dev/null +++ b/docs/user/resources/06-20-serverless-cr.md @@ -0,0 +1,130 @@ +# Serverless + +The `serverlesses.operator.kyma-project.io` CustomResourceDefinition (CRD) is a detailed description of the Serverless configuration that you want to install on your cluster. To get the up-to-date CRD and show the output in the YAML format, run this command: + + ```bash + kubectl get crd serverlesses.operator.kyma-project.io -o yaml + ``` + +## Sample Custom Resource + +The following Serverless custom resource (CR) shows configuration of Serverless with the external registry, custom endpoints for eventing and tracing and custom additional configuration. + + ```yaml + apiVersion: operator.kyma-project.io/v1alpha1 + kind: Serverless + metadata: + finalizers: + - dockerregistry-operator.kyma-project.io/deletion-hook + name: default + namespace: kyma-system + spec: + dockerRegistry: + enableInternal: false + secretName: my-secret + eventing: + endpoint: http://eventing-publisher-proxy.kyma-system.svc.cluster.local/publish + tracing: + endpoint: http://telemetry-otlp-traces.kyma-system.svc.cluster.local:4318/v1/traces + targetCPUUtilizationPercentage: 50 + functionRequeueDuration: 5m + functionBuildExecutorArgs: "--insecure,--skip-tls-verify,--skip-unused-stages,--log-format=text,--cache=true,--use-new-run,--compressed-caching=false" + functionBuildMaxSimultaneousJobs: 5 + healthzLivenessTimeout: "10s" + functionRequestBodyLimitMb: 1 + functionTimeoutSec: 180 + defaultBuildJobPreset: "normal" + defaultRuntimePodPreset: "M" + status: + conditions: + - lastTransitionTime: "2023-04-28T10:09:37Z" + message: Configured with default Publisher Proxy URL and default Trace Collector + URL. + reason: Configured + status: "True" + type: Configured + - lastTransitionTime: "2023-04-28T10:15:15Z" + message: Serverless installed + reason: Installed + status: "True" + type: Installed + eventPublisherProxyURL: http://eventing-publisher-proxy.kyma-system.svc.cluster.local/publish + state: Ready + traceCollectorURL: http://telemetry-otlp-traces.kyma-system.svc.cluster.local:4318/v1/traces + ``` + +## Custom Resource Parameters + +For details, see the [Serverless specification file](https://github.com/kyma-project/serverless-manager/blob/main/components/operator/api/v1alpha1/serverless_types.go). + +### Serverless.operator.kyma-project.io/v1alpha1 + +**Spec:** + +| Parameter | Type | Description | +|-------------------------------------------|---------|-------------| +| **dockerRegistry** | object | | +| **dockerRegistry.​enableInternal** | boolean | When set to `true`, the internal Docker registry is enabled | +| **dockerRegistry.​secretName** | string | Secret used for configuration of the Docker registry | +| **eventing** | object | | +| **eventing.​endpoint** (required) | string | Used Eventing endpoint | +| **tracing** | object | | +| **tracing.​endpoint** (required) | string | Used Tracing endpoint | +| **targetCPUUtilizationPercentage** | string | Sets a custom CPU utilization threshold for scaling Function Pods | +| **functionRequeueDuration** | string | Sets the requeue duration for Function. By default, the Function associated with the default configuration is requeued every 5 minutes | +| **functionBuildExecutorArgs** | string | Specifies the arguments passed to the Function build executor | +| **functionBuildMaxSimultaneousJobs** | string | A number of simultaneous jobs that can run at the same time. The default value is `5` | +| **healthzLivenessTimeout** | string | Sets the timeout for the Function health check. The default value in seconds is `10` | +| **functionRequestBodyLimitMb** | string | Used to configure the maximum size limit for the request body of a Function. The default value is `1` megabyte | +| **functionTimeoutSec** | string | Sets the maximum execution time limit for a Function. By default, the value is `180` seconds | +| **defaultBuildJobPreset** | string | Configures the default build Job preset to be used | +| **defaultRuntimePodPreset** | string | Configures the default runtime Pod preset to be used | + +**Status:** + +| Parameter | Type | Description | +|------------------------------------------------------|------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **conditions** | \[\]object | Conditions associated with CustomStatus. | +| **conditions.​lastTransitionTime** (required) | string | Specifies the last time the condition transitioned from one status to another. This should be when the underlying condition changes. If that is not known, then using the time when the API field changed is acceptable. | +| **conditions.​message** (required) | string | Provides a human-readable message indicating details about the transition. This may be an empty string. | +| **conditions.​observedGeneration** | integer | Represents the **.metadata.generation** that the condition was set based upon. For instance, if **.metadata.generation** is currently `12`, but the **.status.conditions[x].observedGeneration** is `9`, the condition is out of date with respect to the current state of the instance. | +| **conditions.​reason** (required) | string | Contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field and whether the values are considered a guaranteed API. The value should be a camelCase string. This field may not be empty. | +| **conditions.​status** (required) | string | Specifies the status of the condition. The value is either `True`, `False`, or `Unknown`. | +| **conditions.​type** (required) | string | Specifies the condition type in camelCase or in `foo.example.com/CamelCase`. Many **.conditions.type** values are consistent across resources like `Available`, but because arbitrary conditions can be useful (see **.node.status.conditions**), the ability to deconflict is important. The regex it matches is `(dns1123SubdomainFmt/)?(qualifiedNameFmt)`. | +| **dockerRegistry** | string | Used registry configuration. Contains registry URL or "internal". | +| **eventingEndpoint** | string | Used Eventing endpoint. | +| **served** (required) | string | Served signifies that current Serverless is managed. Value can be one of `True`, or `False`. | +| **state** | string | Signifies the current state of Serverless. Value can be one of `Ready`, `Processing`, `Error`, or `Deleting`. | +| **tracingEndpoint** | string | Used Tracing endpoint. | +| **targetCPUUtilizationPercentage** | string | Used target CPU utilization percentage. | +| **functionRequeueDuration** | string | Used the Function requeue duration. | +| **functionBuildExecutorArgs** | string | Used the Function build executor arguments. | +| **functionBuildMaxSimultaneousJobs** | string | Used the Function build max number of simultaneous jobs. | +| **healthzLivenessTimeout** | string | Used the healthz liveness timeout. | +| **functionRequestBodyLimitMb** | string | Used the Function request body limit. | +| **functionTimeoutSec** | string | Used the Function timeout. | +| **defaultBuildJobPreset** | string | Used the default build Job preset. | +| **defaultRuntimePodPreset** | string | Used the default runtime Pod preset. | + + + +### Status Reasons + +Processing of a Serverless CR can succeed, continue, or fail for one of these reasons: + +## Serverless CR Conditions + +This section describes the possible states of the Serverless CR. Three condition types, `Installed`, `Configured` and `Deleted`, are used. + +| No | CR State | Condition type | Condition status | Condition reason | Remark | +|----|------------|----------------|------------------|-----------------------|-----------------------------------------------| +| 1 | Processing | Configured | true | Configured | Serverless configuration verified | +| 2 | Processing | Configured | unknown | ConfigurationCheck | Serverless configuration verification ongoing | +| 3 | Error | Configured | false | ConfigurationCheckErr | Serverless configuration verification error | +| 7 | Error | Configured | false | ServerlessDuplicated | Only one Serverless CR is allowed | +| 4 | Ready | Installed | true | Installed | Serverless workloads deployed | +| 5 | Processing | Installed | unknown | Installation | Deploying serverless workloads | +| 6 | Error | Installed | false | InstallationErr | Deployment error | +| 8 | Deleting | Deleted | unknown | Deletion | Deletion in progress | +| 9 | Deleting | Deleted | true | Deleted | Serverless module deleted | +| 10 | Error | Deleted | false | DeletionErr | Deletion failed | diff --git a/docs/user/resources/README.md b/docs/user/resources/README.md new file mode 100644 index 00000000..847fa413 --- /dev/null +++ b/docs/user/resources/README.md @@ -0,0 +1,3 @@ +# Resources + +In this section, you can find the custom resources (CR) used in the Serverless module. diff --git a/docs/user/technical-reference/04-10-architecture.md b/docs/user/technical-reference/04-10-architecture.md new file mode 100644 index 00000000..d13ae366 --- /dev/null +++ b/docs/user/technical-reference/04-10-architecture.md @@ -0,0 +1,28 @@ +# Serverless Architecture + +Serverless relies heavily on Kubernetes resources. It uses [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), [Services](https://kubernetes.io/docs/concepts/services-networking/service/) and [HorizontalPodAutoscalers](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) to deploy and manage Functions, and [Kubernetes Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) to create Docker images. See how these and other resources process a Function within a Kyma cluster: + +![Serverless architecture](../../assets/svls-architecture.svg) + +> [!WARNING] +> Serverless imposes some requirements on the setup of namespaces. For example, if you apply custom [LimitRanges](https://kubernetes.io/docs/concepts/policy/limit-range/) for a new namespace, they must be higher than or equal to the limits for building Jobs' resources. + +1. Create a Function either through the UI or by applying a Function custom resource (CR). This CR contains the Function definition (business logic that you want to execute) and information on the environment on which it should run. + +2. Before the Function can be saved or modified, it is first updated and then verified by the defaulting and validation webhooks respectively. + +3. Function Controller (FC) detects the new, validated Function CR. + +4. FC creates a ConfigMap with the Function definition. + +5. Based on the ConfigMap, FC creates a Kubernetes Job that triggers the creation of a Function image. + +6. The Job creates a Pod which builds the production Docker image based on the Function's definition. The Job then pushes this image to a Docker registry. + +7. FC monitors the Job status. When the image creation finishes successfully, FC creates a Deployment that uses the newly built image. + +8. FC creates a Service that points to the Deployment. + +9. FC creates a HorizontalPodAutoscaler that automatically scales the number of Pods in the Deployment based on the observed CPU utilization. + +10. FC waits for the Deployment to become ready. diff --git a/docs/user/technical-reference/04-20-internal-registry.md b/docs/user/technical-reference/04-20-internal-registry.md new file mode 100644 index 00000000..3257f2aa --- /dev/null +++ b/docs/user/technical-reference/04-20-internal-registry.md @@ -0,0 +1,22 @@ +# Internal Docker Registry + +By default, the Kyma Serverless module comes with the internal Docker registry, which stores the Function container images without using the third-party registry. + +The internal Docker registry is not recommended for production, as it's not deployed in the High Availability (HA) setup and has limited storage space and no garbage collection of the orphaned images. + +Still, it is very convenient for development and getting first-time experience with Kyma Serverless. + +See the following diagram to learn how it works: + +![Serverless architecture](../../assets/svls-internal-registry.svg) + +1. Build job pushes the Function image to the Docker registry using the in-cluster URL. +2. The Kubernetes DNS resolves the internal Docker registry URL to the actual IP address. +3. [kubelet](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) fetches the image using the URL: `localhost:{node_port}/{image}`. +4. NodePort allows kubelet to get into the cluster network, translate `localhost` to `internal-registry.kyma-system.svc.cluster.local`, and ask the Kubernetes DNS to resolve the name. +5. The Kubernetes DNS service resolves the name and provides the IP of the internal Docker registry. + +> [!NOTE] +> kubelet cannot resolve the in-cluster URL. That's why Serverless uses the NodePort service. + +The NodePort service routing assures that the pull request reaches the internal Docker registry regardless of whether it is from a different node. diff --git a/docs/user/technical-reference/05-20-env-variables.md b/docs/user/technical-reference/05-20-env-variables.md new file mode 100644 index 00000000..d5574c22 --- /dev/null +++ b/docs/user/technical-reference/05-20-env-variables.md @@ -0,0 +1,128 @@ +# Environment Variables + +You can use environment variables to configure an existing runtime, to read existing configuration or to build your own runtime based on them. + +## Environments Passed to Runtimes + +Every runtime provides its own unique environment configuration which can be read by a server and the `handler.js` file during the container run: + +### Common Environments + +| Environment | Default | Description | +|---------------|-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **FUNC_HANDLER** | `main` | The name of the exported Function inside the `MOD_NAME` file. | +| **MOD_NAME** | `handler` | The name of the main exported file. It must have an extension of `.py` for the Python runtimes and `.js` for the Node.js ones. The extension must be added on the server side. | +| **FUNC_PORT** | `8080` | The right port a server listens to. | +| **SERVICE_NAMESPACE** | None | The namespace where the right Function exists in a cluster. | +| **KUBELESS_INSTALL_VOLUME** | `/kubeless` | Full path to volume mount with users source code. | +| **FUNC_RUNTIME** | None | The name of the actual runtime. Possible values: `nodejs18` - deprecated, `nodejs20`, `python39` - deprecated, and `python312`. | +| **TRACE_COLLECTOR_ENDPOINT** | None | Full address of OpenTelemetry Trace Collector is exported if the trace collector's endpoint is present. | +| **PUBLISHER_PROXY_ADDRESS** | `http://eventing-publisher-proxy.kyma-system.svc .cluster.local/publish` | Full address of the Publisher Proxy service. | + +### Specific Environments + +There are a few environments that occur only for a specific runtimes. The following list includes all of them: + +#### Python Runtime-Specific Environment Variables + +| Environment | Default | Description | +|---------------|-----------|-------------| +| **PYTHONPATH** | `$(KUBELESS_INSTALL_VOLUME)/lib.python3.9/site-packages :$(KUBELESS_INSTALL_VOLUME)` | List of directories that Python must add to the sys.path directory list. | +| **PYTHONUNBUFFERED** | `TRUE` | Defines if Python's logs must be buffered before printing them out. | + +## Configure Runtime + +You can configure environment variables either separately for a given runtime or make them runtime-agnostic using a ConfigMap. + +### Define Environment Variables in a Config Map + +ConfigMaps allow you to define Function's environment variables for any runtime through key-value pairs. After you define the values in a ConfigMap, simply reference it in the Function custom resource (CR) through the **valueFrom** parameter. See an example of such a Function CR that specifies the `my-var` value as a reference to the key stored in the `my-vars-cm` ConfigMap as the `MY_VAR` environment variable. + +```yaml +apiVersion: serverless.kyma-project.io/v1alpha2 +kind: Function +metadata: + name: sample-cm-env-values + namespace: default +spec: + env: + - name: MY_VAR + valueFrom: + configMapKeyRef: + key: my-var + name: my-vars-cm + runtime: nodejs20 + source: + inline: + source: | + module.exports = { + main: function (event, context) { + return process.env["MY_VAR"]; + } + } +``` + +### Node.js Runtime-Specific Environment Variables + +To configure the Function with the Node.js runtime, override the default values of these environment variables: + +| Environment variable | Description | Type | Default value | +| -------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | ------- | ------------- | +| **FUNC_TIMEOUT** | Specifies the number of seconds in which a runtime must execute the code. | Number | `180` | +| **REQ_MB_LIMIT** | Specifies the payload body size limit in megabytes. | Number | `1` | +| **KYMA_INTERNAL_LOGGER_ENABLED** | Enables the default HTTP request logger which uses the standard Apache combined log output. To enable it, set its value to `true`. | Boolean | `false` | + +See the example of a Function with these environment variables set: + +```yaml +apiVersion: serverless.kyma-project.io/v1alpha2 +kind: Function +metadata: + name: sample-fn-with-envs + namespace: default +spec: + env: + - name: FUNC_TIMEOUT + value: '2' + - name: REQ_MB_LIMIT + value: '10' + runtime: nodejs20 + source: + inline: + source: | + module.exports = { + main: function (event, context) { + return "Hello World!"; + } + } +``` + +### Python Runtime-Specific Environment Variables + +To configure a Function with the Python runtime, override the default values of these environment variables: + +| Environment variable | Description | Unit | Default value | +| -------------------------------- |---------------------------------------------------------------------------------------------------------------------------- | ------- | --------------- | +|**FUNC_MEMFILE_MAX**|for the HTTP request body in bytes. | Number | `100*1024*1024` | | +| **CHERRYPY_NUMTHREADS** | Specifies the number of requests that can be handled in parallel | Number | `50` | +| **KYMA_INTERNAL_LOGGER_ENABLED** | Enables the default HTTP request logger which uses the standard Apache combined log output. To enable it, set its value to `true`. | Boolean | `false` | + +See [`kubeless.py`](https://github.com/kubeless/runtimes/blob/master/stable/python/_kubeless.py) to get a deeper understanding of how the Bottle server, which acts as a runtime, uses these values internally to run Python Functions. + +```yaml +apiVersion: serverless.kyma-project.io/v1alpha2 +kind: Function +metadata: + name: sample-fn-with-envs + namespace: default +spec: + env: + - name: FUNC_MEMFILE_MAX + value: '1048576' #1MiB + runtime: nodejs20 + source: + inline: + source: | + def main(event. context): + return "Hello World!" +``` diff --git a/docs/user/technical-reference/07-10-sample-functions.md b/docs/user/technical-reference/07-10-sample-functions.md new file mode 100644 index 00000000..1869e92c --- /dev/null +++ b/docs/user/technical-reference/07-10-sample-functions.md @@ -0,0 +1,64 @@ +# Sample Functions + +Functions support multiple languages through the use of runtimes. To use a chosen runtime, add its name and version as a value in the **spec.runtime** field of the [Function custom resource (CR)](../resources/06-10-function-cr.md). If this value is not specified, it defaults to `nodejs18`. Dependencies for a Node.js Function must be specified using the [`package.json`](https://docs.npmjs.com/creating-a-package-json-file) file format. Dependencies for a Python Function must follow the format used by [pip](https://packaging.python.org/key_projects/#pip). + +> [!TIP] +> If you are interested in the Function's signature, `event` and `context` objects, and custom HTTP responses the Function returns, read about [Function’s specification](07-70-function-specification.md). + +See sample Functions for all available runtimes: + + + +#### **Node.js** + +```bash +cat < diff --git a/docs/user/technical-reference/07-20-function-processing-stages.md b/docs/user/technical-reference/07-20-function-processing-stages.md new file mode 100644 index 00000000..8d2f5b39 --- /dev/null +++ b/docs/user/technical-reference/07-20-function-processing-stages.md @@ -0,0 +1,50 @@ +# Function Processing Stages + +From the moment you [create a Function](../tutorials/01-10-create-inline-function.md) (Function CR) until the time it is ready, it goes through three processing stages that are defined as these condition types: + +1. `ConfigurationReady` (PrinterColumn `CONFIGURED`) +2. `BuildReady` (PrinterColumn `BUILT`) +3. `Running` (PrinterColumn `RUNNING`) + +For a Function to be considered ready, the status of all three conditions must be `True`: + +```bash +NAME CONFIGURED BUILT RUNNING RUNTIME VERSION AGE +test-function True True True nodejs20 1 96s +``` + +When you update an existing Function, conditions change asynchronously depending on the change type. + +The diagrams illustrate all three core status changes in the Function processing circle that the Function Controller handles. They also list all custom resources involved in this process and specify in which cases their update is required. + +> [!NOTE] +> Before you start reading, see the [Function CR](../resources/06-10-function-cr.md) document for the custom resource detailed definition, the list of all Function's condition types, and reasons for their success or failure. + +## Configured + +This initial phase starts when you create a Function CR with configuration specifying the Function's setup. It ends with creating a ConfigMap that is used as a building block for a Function image. + +![Function configured](../../assets/svls-configured.svg) + +## Built + +This phase involves creating and processing the Job CR. It ends successfully when the Function image is built and sent to the Docker registry. If the image already exists and only an update is required, the Docker image receives a new tag. + +Updating an existing Function requires an image rebuild only if you change the Function's body (**source**) or dependencies (**deps**). An update of a Function's other configuration details, such as environment variables, replicas, resources, or labels, does not require image rebuild because it only affects the Deployment. + +> [!NOTE] +> Each time you update a Function's configuration, the Function Controller deletes all previous Job CRs for the given Function's **UID**. + +![Function built](../../assets/svls-built.svg) + +## Running + +This stage revolves around creating a Deployment, Service and HorizontalPodAutoscaler or updating them when configuration changes were made in the Function CR or the Function image was rebuilt. + +In general, the Deployment is considered updated when both configuration and the image tag in the Deployment are up to date. Service and HorizontalPodAutoscaler are considered updated when there are proper labels set and configuration is up to date. + +Thanks to the implemented reconciliation loop, the Function Controller constantly observes all newly created or updated resources. If it detects changes, it fetches the appropriate resource's status and only then updates the Function's status. + +The Function Controller observes the status of the underlying Deployment. If the minimum availability condition for the replicas is not satisfied, the Function Controller sets the **Running** status to `Unknown` with reason `MinimumReplicasUnavailable`. Such a Function should be considered unhealthy and the runtime profile or number of Replicas must be adjusted. + +![Function running](../../assets/svls-running.svg) diff --git a/docs/user/technical-reference/07-40-git-source-type.md b/docs/user/technical-reference/07-40-git-source-type.md new file mode 100644 index 00000000..fe63ee7d --- /dev/null +++ b/docs/user/technical-reference/07-40-git-source-type.md @@ -0,0 +1,31 @@ +# Git Source Type + +Depending on a runtime you use to build your Function (Node.js or Python), your Git repository must contain at least a directory with these files: + +- `handler.js` or `handler.py` with Function's code +- `package.json` or `requirements.txt` with Function's dependencies + +The Function CR must contain **spec.source.gitRepository** to specify that you use a Git repository for the Function's sources. + +To create a Function with the Git source, you must: + +1. Create a [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) (optional, only if you must authenticate to the repository). +2. Create a [Function CR](../resources/06-10-function-cr.md) with your Function definition and references to the Git repository. + +> [!NOTE] +> For detailed steps, see the tutorial on [creating a Function from Git repository sources](../tutorials/01-11-create-git-function.md). + +You can have various setups for your Function's Git source with different: + +- Directory structures + + To specify the location of your code dependencies, use the **baseDir** parameter in the Function CR. For example, use `"/"` if you keep the source files at the root of your repository. + +- Authentication methods + + To define that you must authenticate to the repository with a password or token (`basic`), or an SSH key (`key`), use the **spec.source.gitRepository.auth** parameter in the Function CR. + +- Function's rebuild triggers + + To define whether the Function Controller must monitor a given branch or commit in the Git repository to rebuild the Function upon their changes, use the **spec.source.gitRepository.reference** parameter in the Function CR. + \ No newline at end of file diff --git a/docs/user/technical-reference/07-60-function-configuration-file.md b/docs/user/technical-reference/07-60-function-configuration-file.md new file mode 100644 index 00000000..3a99aca2 --- /dev/null +++ b/docs/user/technical-reference/07-60-function-configuration-file.md @@ -0,0 +1,178 @@ +# Function Configuration File + +When you initialize a Function (with the `init` command), Kyma CLI creates the `config.yaml` file in your workspace folder. This file contains the whole Function's configuration and specification not only for the Function custom resource (CR) but also any other related resources you create for it, such as Subscriptions and APIRules. + +## Specification for an Inline Function + +See the sample `config.yaml` for an inline Function for which code and dependencies are stored in the Function CR under the **spec.source** and **spec.deps** fields. This specification also contains the definition of a sample Subscription and APIRules for the Function: + +```yaml +name: function-practical-filip5 +namespace: testme +runtime: nodejs20 +runtimeImageOverride: europe-docker.pkg.dev/kyma-project/prod/function-runtime-nodejs20:v20240320-dacf4702 +labels: + app: serverless-test +source: + sourceType: inline + sourcePath: /tmp/cli + sourceHandlerName: /code/handler.js + depsHandlerName: /dependencies/package.json +resources: + limits: + cpu: 1 + memory: 1Gi + requests: + cpu: 500m + memory: 500Mi +subscriptions: + - name: function-practical-filip5 + typeMatching: exact + source: "" + types: + - sap.kyma.custom.demo-app.order.created.v1 +apiRules: + - name: function-practical-filip5 + gateway: kyma-system/kyma-gateway + service: + host: path.kyma.example.com + port: 80 + rules: + - methods: + - GET + - POST + - PUT + - PATCH + - DELETE + - HEAD + accessStrategies: [] + - path: /path1/something1 + methods: + - PUT + - PATCH + - DELETE + accessStrategies: + - handler: noop + - path: /path1/something2 + methods: + - GET + accessStrategies: + - config: + required_scope: ["read"] + handler: oauth2_introspection + - path: /path2 + methods: + - DELETE + accessStrategies: + - handler: jwt + config: + jwksUrls: + - {jwks_uri of your OpenID Connect-compliant identity provider} + trustedIssuers: + - {issuer URL of your OpenID Connect-compliant Identity provider} +env: + - name: REDIS_PASS + value: YgJUg8z6eA + - name: REDIS_PORT + value: "6379" + - name: REDIS_HOST + value: hb-redis-enterp-6541066a-edbc-422f-8bef-fafca0befea8-redis.testme.svc.cluster.local + - valueFrom: + configMapKeyRef: + Name: configmap1 + Key: token-field + - valueFrom: + secretKeyRef: + Name: secret1 + Key: token-field +schemaVersion: v1 +``` + +## Specification for a Git Function + +See the sample `config.yaml` for a [Git Function](07-40-git-source-type.md) for which code and dependencies are stored in a selected Git repository: + +```yaml +name: function-practical-marcin +namespace: iteration-review +runtime: nodejs20 +source: + sourceType: git + url: https://github.com/username/public-gitops.git + repository: my-repo + reference: main + baseDir: / + credentialsType: basic + credentialsSecretName: secret2 +``` + +## Parameters + +See all parameter descriptions. + +> [!NOTE] +> The **Default value** column specifies the values that Kyma CLI sets when applying resources in a cluster, if no other values are provided. + +| Parameter | Required | Related custom resource | Default value | Description | +|----------------------------------------------------------------|:--------:| ---------| ---------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **name** | Yes | Function | | Specifies the name of your Function. | +| **namespace** | No | Function | `default` | Defines the namespace in which the Function is created. | +| **runtime** | Yes | Function | | Specifies the execution environment for your Function. The available values are `nodejs18` - deprecated, `nodejs20`, `python39` - deprecated, and `python312`. | +| **runtimeImageOverride** | No | Function | | Specifies the runtimes image which must be used instead of default one. | +| **labels** | No | Function | | Specifies the Function's Pod labels. | +| **source** | Yes | Function | | Provides details on the type and location of your Function's source code and dependencies. | +| **source.sourceType** | Yes | Function | | Defines whether you use either inline code or a Git repository as the source of the Function's code and dependencies. It must be set either to `inline` or `git`. | +| **source.sourcePath** | No | Function | Location of the `config.yaml` file | Specifies the absolute path to the directory with the Function's source code. | +| **source.sourceHandlerName** | No | Function | `handler.js` (Node.js) or `handler.py` (Python) | Defines the path to the file with your Function's code. Specify it if you want to store source code separately from the `config.yaml`. This path is a relative path to the one provided in **source.sourcePath**. | +| **source.depsHandlerName** | No | Function | `package.json` (Node.js) or `requirements.txt` (Python) | Defines the path to the file with your Function's dependencies. Specify it if you want to store dependencies separately from the `config.yaml`. This path is a relative path to the one provided in **source.sourcePath**. | +| **source.url** | No | Function | | Provides the address to the Git repository with the Function's code and dependencies. Depending on whether the repository is public or private and what authentication method is used to access it, the URL must start with the `http(s)`, `git`, or `ssh` prefix, and end with the `.git` suffix. | +| **source.repository** | No | Function | Function name | Specifies the name of the Git repository. | +| **source.reference** | No | Function | | Specifies either the branch name or the commit revision from which the Function Controller automatically fetches the changes in the Function's code and dependencies. | +| **source.baseDir** | No | Function | | Specifies the location of your code dependencies in the repository. It is recommended to keep the source files at the root of your repository (`/`). | +| **source.credentialsType** | No | Function | `basic` | Specifies the content type of the Secret with credentials to the Git repository. Defines if you must authenticate to the repository with a password or token (`basic`), or an SSH key (`key`). | +| **source.credentialsSecretName** | No | Function | | Specifies the name of the Secret with credentials to the Git repository. It is used by the Function Controller to authenticate to the Git repository to fetch the Function's source code and dependencies. This Secret must be stored in the same namespace as the [Function CR](../resources/06-10-function-cr.md). | +| **resources** | No | Function | | Defines CPU and memory available for the Function's Pod to use. | +| **resources.limits** | No | Function | | Defines the maximum available CPU and memory values for the Function. | +| **resources.limits.cpu** | No | Function | `100m` | Defines the maximum available CPU value for the Function. | +| **resources.limits.memory** | No | Function | `128Mi` | Defines the maximum available memory value for the Function. | +| **resources.requests** | No | Function | | Defines the minimum requested CPU and memory values for a Function. | +| **resources.requests.cpu** | No | Function | `50m` | Defines the minimum requested CPU value for the Function. | +| **resources.requests.memory** | No | Function | `64Mi` | Defines the minimum requested memory value for the Function. | +| **subscriptions** | No | Subscription | | Defines a Subscription by which the Function gets triggered to perform a business logic defined in the Function's source code. | +| **subscriptions.name** | Yes | Subscription | Function name | Specifies the name of the Subscription custom resource. It takes the name from the Function unless you specify otherwise. | +| **subscriptions.typeMatching** | No | Subscription | | Defines the matching type (`standard` or `exact`) for event types. When it is set to `exact`, Eventing does not do any kind of modifications to the provided `spec.types` internally. In case of `standard`, Eventing modifies the types internally to fulfil the backend requirements. It is set to `standard` unless you specify otherwise. | +| **subscriptions.source** | Yes | Subscription | | Defines the source of the event originated from. | +| **subscriptions.types** | Yes | Subscription | | Defines the list of event types used to trigger workloads. | +| **apiRules** | No | APIRule | | Provides the rules defining how your Function's Service API can be accessed. | +| **apiRules.name** | Yes | APIRule | Function name | Specifies the name of the exposed Service. It takes the name from the Function unless you specify otherwise. | +| **apiRules.gateway** | No | APIRule | `kyma-system/kyma-gateway` | Specifies the [Istio Gateway](https://istio.io/latest/docs/reference/config/networking/gateway/). | +| **apiRules.service** | No | APIRule | | Specifies the name of the exposed Service. | +| **apiRules.service.host** | No | APIRule | | Specifies the Service's communication address for inbound external traffic. | +| **apiRules.service.port** | No | APIRule | `80`. | Defines the port on which the Function's Service is exposed. This value cannot be modified. | +| **apiRules.rules** | Yes | APIRule | | Specifies the array of [Oathkeeper](https://www.ory.sh/oathkeeper/) access rules. | +| **apiRules.rules.methods** | No | APIRule | | Specifies the list of HTTP request methods available for **apiRules.rules.path** . | +| **apiRules.rules.accessStrategies** | Yes | APIRule | | Specifies the array of [Oathkeeper authenticators](https://www.ory.sh/oathkeeper/docs/pipeline/authn/). The supported authenticators are `oauth2_introspection`, `jwt`, `noop`, and `allow`. | +| **apiRules.rules.path** | No | APIRule | `/.*` | Specifies the path to the exposed Service. | +| **apiRules.rules.path.accessStrategies.handler** | Yes | APIRule | `allow` | Specifies one of the authenticators used: `oauth2_introspection`, `jwt`, `noop`, or `allow`. | +| **apiRules.rules.path.accessStrategies.config.** | No | APIRule | | Defines the handler used. It can be specified globally or per access rule. | +| **apiRules.rules.path.accessStrategies.config.required_scope** | No | APIRule | | Defines the [limits](https://oauth.net/2/scope/) that the client specifies for an access request. In turn, the authorization server issues the access token in the defined scope. | +| **apiRules.rules.path.accessStrategies.config.jwks_urls** | No | APIRule | | The URLs where ORY Oathkeeper can retrieve [JSON Web Keys](https://www.ory.sh/oathkeeper/docs/pipeline/authn/#jwt) from to validate the JSON Web Token. | +| **apiRules.rules.path.accessStrategies.config.trustedIssuers** | No | APIRule | | Sets a list of trusted token issuers. | +| **env.name** | No | Function | | Specifies the name of the environment variable to export for the Function. | +| **env.value** | No | Function | | Specifies the value of the environment variable to export for the Function. | +| **env.valueFrom** | No | Function | | Specifies that you want the Function to use values either from a Secret or a ConfigMap. These objects must be stored in the same namespace as the Function. | +| **env.valueFrom.configMapKeyRef** | No | Function | | Refers to the values from a ConfigMap that you want to use in the Function. | +| **env.valueFrom.configMapKeyRef.Name** | No | Function | | Specifies the name of the referred ConfigMap. | +| **env.valueFrom.configMapKeyRef.Key** | No | Function | | Specifies the key containing the referred value from the ConfigMap. | +| **env.valueFrom.secretKeyRef** | No | Function | | Refers to the values from a Secret that you want to use in the Function. | +| **env.valueFrom.secretKeyRef.Name** | No | Function | | Specifies the name of the referred Secret. | +| **env.valueFrom.secretKeyRef.Key** | No | Function | | Specifies the key containing the referred value from the Secret. | +| **schemaVersion** | Yes | Function | | Specifies the Subscription API version. | + +## Related Resources + +See the detailed descriptions of all related custom resources referred to in the `config.yaml`: + +- [Function](../resources/06-10-function-cr.md) +- [Subscription](https://kyma-project.io/docs/kyma/latest/05-technical-reference/00-custom-resources/evnt-01-subscription/) +- [API Rule](https://kyma-project.io/docs/kyma/latest/05-technical-reference/00-custom-resources/apix-01-apirule/) diff --git a/docs/user/technical-reference/07-70-function-specification.md b/docs/user/technical-reference/07-70-function-specification.md new file mode 100644 index 00000000..442a9d50 --- /dev/null +++ b/docs/user/technical-reference/07-70-function-specification.md @@ -0,0 +1,234 @@ +# Function's Specification + +With the Serverless module, you can create Functions in both Node.js and Python. Although the Function's interface is unified, its specification differs depending on the runtime used to run the Function. + +## Signature + +Function's code is represented by a handler that is a method that processes events. When your Function is invoked, it runs this handler method to process a given request and return a response. + +All Functions have a predefined signature with elements common for all available runtimes: + +- Functions' code must be introduced by the `main` handler name. +- Functions must accept two arguments that are passed to the Function handler: + - `event` + - `context` + +See these signatures for each runtime: + + + +#### **Node.js** + +```js +module.exports = { + main: function (event, context) { + return + } +} +``` + +#### **Python** + +```python +def main(event, context): + return +``` + + + +### Event Object + +The `event` object contains information about the event the Function refers to. For example, an API request event holds the HTTP request object. + +Functions in Kyma accept [CloudEvents](https://cloudevents.io/) (**ce**) with the following specification: + + + +#### **Node.js** + +```json +... +{ + "ce-type": "com.github.pull_request.opened", + "ce-source": "/cloudevents/spec/pull/123", + "ce-eventtypeversion": "v1", + "ce-specversion": "1.0", + "ce-id": "abc123", + "ce-time": "2020-12-20T13:37:33.647Z", + "data": {BUFFER}, + "tracer": {OPENTELEMETRY_TRACER}, + "extensions": { + "request": {INCOMING_MESSAGE}, + "response": {SERVER_RESPONSE}, + } +} +``` + +#### **Python** + +```json +{ + "ce-type": "com.github.pull_request.opened", + "ce-source": "/cloudevents/spec/pull/123", + "ce-eventtypeversion": "v1", + "ce-specversion": "1.0", + "ce-id": "abc123", + "ce-time": "2020-12-20T13:37:33.647Z", + "data": "", + "tracer": {OPENTELEMETRY_TRACER}, + "extensions": { + "request": {PICKLABLE_BOTTLE_REQUEST}, + } +} +``` + + + +See the detailed descriptions of these fields: + +| Field | Description | +|-------|-------------| +| **ce-type** | Type of the CloudEvent data related to the originating occurrence | +| **ce-source** | Unique context in which an event happened and can relate to an organization or a process | +| **ce-eventtypeversion** | Version of the CloudEvent type | +| **ce-specversion** | Version of the CloudEvent specification used for this event | +| **ce-id** | Unique identifier of the event | +| **ce-time** | Time at which the event was sent | +| **data** | Either JSON or a string, depending on the request type. Read more about [Buffer](https://nodejs.org/api/buffer.html) in Node.js and [bytes literals](https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals) in Python. | +| **tracer** | Fully configured OpenTelemetry [tracer](https://opentelemetry.io/docs/reference/specification/trace/api/#tracer) object that allows you to communicate with the user-defined trace backend service to share tracing data. For more information on how to use the tracer object see [Customize Function traces](../tutorials/01-100-customize-function-traces.md) | +| **extensions** | JSON object that can contain event payload, a Function's incoming request, or an outgoing response | + +### Event Object SDK + +The `event` object is extended by methods making some operations easier. You can use every method by providing `event.{FUNCTION_NAME(ARGUMENTS...)}`. + + + +#### **Node.js** + +| Method name | Arguments | Description | +|---------------|-----------|-------------| +| **setResponseHeader** | key, value | Sets a header to the `response` object based on the given key and value | +| **setResponseContentType** | type | Sets the `ContentType` header to the `response` object based on the given type | +| **setResponseStatus** | status | Sets the `response` status based on the given status | +| **publishCloudEvent** | event | **Deprecated: use `emitCloudEvent` instead.** Publishes a CloudEvent on the publisher service based on the given CloudEvent object | +| **buildResponseCloudEvent** | id, type, data | **Deprecated: use `emitCloudEvent` instead.** Builds a CloudEvent object based on the `request` CloudEvent object and the given arguments | +| **emitCloudEvent** | type, source, data, optionalCloudEventAttribute | Builds a CloudEvent based on the arguments and emits it on the eventing publisher service. You can pass any additional [cloudevent attributes](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/formats/json-format.md#2-attributes) as properties of the last optional argument `optionalCloudEventAttribute` | + +#### **Python** + +| Method name | Arguments | Description | +|----------|-----------|-------------| +| **publishCloudEvent** | event | Publishes a CloudEvent on the publisher service based on the given CloudEvent object | +| **buildResponseCloudEvent** | id, type, data | Builds a CloudEvent object based on the `request` CloudEvent object and the given arguments | + + + +### Context Object + +The `context` object contains information about the Function's invocation, such as runtime details, execution timeout, or memory limits. + +See sample context details: + +```json +... +{ + "function-name": "main", + "timeout": 180, + "runtime": "nodejs20", + "memory-limit": 200Mi +} +``` + +See the detailed descriptions of these fields: + +| Field | Description | +|-------|---------------------------------------------------------------------------------------------------------------------------------------------| +| **function-name** | Name of the invoked Function | +| **timeout** | Time, in seconds, after which the system cancels the request to invoke the Function | +| **runtime** | Environment used to run the Function. You can use `nodejs18` - deprecated, `nodejs20`, `python39` - deprecated, or `python312`. | +| **memory-limit** | Maximum amount of memory assigned to run a Function | + +## HTTP Requests + +You can use the **event.extensions.request** object to access properties and methods of a given request that vary depending on the runtime. For more information, read the API documentation for [Node.js Express](http://expressjs.com/en/api.html#req) and [Python](https://bottlepy.org/docs/dev/api.html#the-request-object). + +## Custom HTTP Responses + +By default, a failing Function simply throws an error to tell the Event Service to reinject the event at a later point. Such an HTTP-based Function returns the HTTP status code `500`. If you manage to invoke a Function successfully, the system returns the default HTTP status code `200`. + +Apart from these two default codes, you can define custom responses. Learn how to do that in Node.js and Python: + +By default, a failing Function simply throws an error to tell the Event Service to reinject the event at a later point. Such an HTTP-based Function returns the HTTP status code `500`. If you manage to invoke a Function successfully, the system returns the default HTTP status code `200`. + +Apart from these two default codes, you can define custom responses. Learn how to do that in Node.js and Python: + + + +#### **Node.js** + +To define custom responses in all Node.js runtimes, use the **event.extensions.response** object. + +This object is created by the Express framework and can be customized. For more information, read [Node.js API documentation](https://nodejs.org/docs/latest-v12.x/api/http.html#http_class_http_serverresponse). + +This example shows how to set such a custom response in Node.js for the HTTP status code `400`: + +```js +module.exports = { + main: function (event, context) { + if (event.extensions.request.query.id === undefined) { + res = event.extensions.response; + res.status(400) + return + } + return "42" + } +} +``` + +#### **Python** + +To define custom responses in all Python runtimes, use the **HTTPResponse** object available in Bottle. + +This object must be instantiated and can be customized. For more information, read [Bottle API documentation](https://bottlepy.org/docs/dev/api.html#the-response-object). + +The following example shows how to set such a custom response in Python for the HTTP status code `400`: + +```python +from bottle import HTTPResponse + +SUPPORTED_CONTENT_TYPES = ['application/json'] + +def main(event, context): + request = event['extensions']['request'] + + response_content_type = 'application/json' + headers = { + 'Content-Type': response_content_type + } + + status = 202 + response_payload = {'success': 'Message accepted.'} + + if request.headers.get('Content-Type') not in SUPPORTED_CONTENT_TYPES: + status = 400 + response_payload = json.dumps({'error': 'Invalid Content-Type.'}) + + return HTTPResponse(body=response_payload, status=status, headers=headers) +``` + + + +## Override Runtime Image + +You can use a custom runtime image to override the existing one. Your image must meet all the following requirements: + +- Expose the workload endpoint on the right port +- Provide liveness and readiness check endpoints at `/healthz` +- Fetch sources from the path under the `KUBELESS_INSTALL_VOLUME` environment +- Security support. Kyma runtimes are secure by default. You only need to protect your images. + +> [!NOTE] +> For better understanding, you can look at the [main Docker files](../../../config/serverless/templates/runtimes.yaml). They are responsible for building the final image based on the `base_image` argument. You, as a user, can override it and what we are doing in [this tutorial](../tutorials/01-110-override-runtime-image.md). + +Every Function's Pods container has the same system environments, which helps you configure the Functions server. For more information, read the [Environment variables](05-20-env-variables.md) page. diff --git a/docs/user/technical-reference/07-80-available-presets.md b/docs/user/technical-reference/07-80-available-presets.md new file mode 100644 index 00000000..588eb8cb --- /dev/null +++ b/docs/user/technical-reference/07-80-available-presets.md @@ -0,0 +1,35 @@ +# Available Presets + +Function's resources and replicas as well as resources for image-building Jobs are based on presets. A preset is a predefined group of values. There are two groups of presets defined for a Function CR and include the presents for: + +- Function's resources +- Image-building Job's resources + +## Usage + +If you want to apply values from a preset to a single Function, override the existing values for a given preset in the Function CR: First, remove the relevant fields from the Function CR and then, add the relevant preset labels. + +For example, to modify the default values for **buildResources**, remove all its entries from the Function CR and add an appropriate **serverless.kyma-project.io/build-resources-preset: {PRESET}** label to the Function CR. + +### Function's Resources + +| Preset | Request CPU | Request memory | Limit CPU | Limit memory | +| - | - | - | - | - | +| `XS` | `50m` | `64Mi` | `100m` | `128Mi` | +| `S` | `100m` | `128Mi` | `200m` | `256Mi` | +| `M` | `200m` | `256Mi` | `400m` | `512Mi` | +| `L` | `400m` | `512Mi` | `800m` | `1024Mi` | +| `XL` | `800m` | `1024Mi` | `1600m` | `2048Mi` | + +To apply values ​​from a given preset, use the **serverless.kyma-project.io/function-resources-preset: {PRESET}** label in the Function CR. + +### Build Job's Resources + +| Preset | Request CPU | Request memory | Limit CPU | Limit memory | +| - | - | - | - | - | +| `local-dev` | `200m` | `200Mi` | `400m` | `400Mi` | +| `slow` | `200m` | `200Mi` | `700m` | `700Mi` | +| `normal` | `700m` | `700Mi` | `1100m` | `1100Mi`| +| `fast` | `1100m` | `1100Mi` | `1700m` | `1100Mi`| + +To apply values ​​from a given preset, use the **serverless.kyma-project.io/build-resources-preset: {PRESET}** label in the Function CR. \ No newline at end of file diff --git a/docs/user/technical-reference/README.md b/docs/user/technical-reference/README.md new file mode 100644 index 00000000..5226938c --- /dev/null +++ b/docs/user/technical-reference/README.md @@ -0,0 +1,3 @@ +# Technical Reference + +In this section, you'll find the architecture documents, the configuration parameters, custom resources (CRs) and other useful references. diff --git a/docs/user/troubleshooting-guides/03-10-cannot-build-functions.md b/docs/user/troubleshooting-guides/03-10-cannot-build-functions.md new file mode 100644 index 00000000..077b0281 --- /dev/null +++ b/docs/user/troubleshooting-guides/03-10-cannot-build-functions.md @@ -0,0 +1,67 @@ +# Failure to Build Functions + +## Symptom + +You have issues when building your Function. + +## Cause + +In its default configuration, Serverless uses persistent volumes as the internal registry to store Docker images for Functions. The default storage size of such a volume is 20 GB. When this storage becomes full, you will have issues with building your Functions. + +## Remedy + +As a workaround, increase the default capacity up to a maximum of 100 GB by editing the `serverless-docker-registry` PersistentVolumeClaim (PVC) object on your cluster. + +1. Edit the `serverless-docker-registry` PVC: + + ```bash + kubectl edit pvc -n kyma-system serverless-docker-registry + ``` + +2. Change the value of **spec.resources.requests.storage** to higher, such as 30 GB, to increase the PVC capacity: + + ```yaml + ... + spec: + resources: + requests: + storage: 30Gi + ``` + +3. Save the changes and wait for a few minutes. Use this command to check if the **CAPACITY** of the `serverless-docker-registry` PVC has changed as expected: + + ```bash + kubectl get pvc serverless-docker-registry -n kyma-system + ``` + + You should get the following result: + + ```bash + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE + serverless-docker-registry Bound pvc-a69b96hc-ahbc-k85d-0gh6-19gkcr4yns4k 30Gi RWO standard 23d + ``` + +If the value of the storage does not change, restart the Pod to which this PVC is bound to finish the volume resize. + +To do this, follow these steps: + +1. List all available Pods in the `kyma-system` namespace: + + ```bash + kubectl get pods -n kyma-system + ``` + +2. Search for the Pod with the `serverless-docker-registry-{UNIQUE_ID}` name and delete it. See the example below: + + ```bash + kubectl delete pod serverless-docker-registry-6869bd57dc-9tqxp -n kyma-system + ``` + + > [!WARNING] + > Do not remove Pods named `serverless-docker-registry-self-signed-cert-{UNIQUE_ID}`. + +3. Search for the `serverless-docker-registry` PVC again to check that the capacity was resized: + + ```bash + kubectl get pvc serverless-docker-registry -n kyma-system + ``` diff --git a/docs/user/troubleshooting-guides/03-20-failing-function-container.md b/docs/user/troubleshooting-guides/03-20-failing-function-container.md new file mode 100644 index 00000000..6f969271 --- /dev/null +++ b/docs/user/troubleshooting-guides/03-20-failing-function-container.md @@ -0,0 +1,16 @@ +# Failing Function Container + +## Symptom + +The container suddenly fails when you use the `kyma run function` command with these flags: + +- `runtime=nodejs20` +- `debug=true` +- `hot-deploy=true` + +In such a case, you can see the `[nodemon] app crashed` message in the container's logs. + +## Remedy + +If you use Kyma in Kubernetes, Kubernetes itself should run the Function in the container. +If you use Kyma without Kubernetes, you have to rerun the container yourself. diff --git a/docs/user/troubleshooting-guides/03-40-function-build-failing-k3d.md b/docs/user/troubleshooting-guides/03-40-function-build-failing-k3d.md new file mode 100644 index 00000000..aee50fe9 --- /dev/null +++ b/docs/user/troubleshooting-guides/03-40-function-build-failing-k3d.md @@ -0,0 +1,52 @@ +# Functions Failing to Build on k3d + +## Symptom + +There are rare cases, for some k3d versions and configurations, where users experience Functions failing to be built. + +Your Function cannot be built and you get the following output: + + ```bash + $ kubectl get functions.serverless.kyma-project.io nyfun + NAME CONFIGURED BUILT RUNNING RUNTIME VERSION AGE + myfun True False nodejs20 1 3h15m + ``` + +Additionally, the Function build job shows the following error, meaning that your host k3d environment is likely to experience the problem: + + ```bash + $ kubectl logs myfun-build-zqhk8-7xl6h + kaniko should only be run inside of a container, run with the --force flag if you are sure you want to continue + ``` + +## Cause + +This problem originates in [kaniko](https://github.com/GoogleContainerTools/kaniko) - the container image build tool used in Kyma. kaniko is designed to be run in a container and this is how it is executed in Kyma (as build jobs). +kaniko has a detection mechanism to verify whether the build is actually executed in a container and fails in case it is not. +This detection mechanism has issues and in some circumstances (that is, hosts with cgroups in version 2 or other, not yet clearly identified) it shows a false positive result. + +Related issues: + +- https://github.com/kyma-project/kyma/issues/13051 +- https://github.com/GoogleContainerTools/kaniko/issues/1592 + +## Remedy + +You can force kaniko to skip the verification by overriding the kaniko execution arguments with the `--force` flag. + +Introduce a file with overrides, for example `my-overrides.yaml`. + + ```yaml + serverless: + containers: + manager: + envs: + functionBuildExecutorArgs: + value: --insecure,--skip-tls-verify,--skip-unused-stages,--log-format=text,--cache=true,--use-new-run,--compressed-caching=false,--force + ``` + +Use the file to override the default configuration while deploying Kyma on your k3d instance: + + ```bash + kyma deploy --values-file my-overrides.yaml + ``` diff --git a/docs/user/troubleshooting-guides/03-50-serverless-periodically-restaring.md b/docs/user/troubleshooting-guides/03-50-serverless-periodically-restaring.md new file mode 100644 index 00000000..029a6ab1 --- /dev/null +++ b/docs/user/troubleshooting-guides/03-50-serverless-periodically-restaring.md @@ -0,0 +1,13 @@ +# Serverless Periodically Restarting + +## Symptom + +When reconciling Git-sourced Functions, Serverless restarts every 10 minutes. + +## Cause + +Function Controller is polling for changes in referenced Git repositories. If you have a lot of Git-sourced Functions and they were deployed together at approximately the same time, their git sources will be checked out in a synchronized pulse (every 10 minutes). If you happen to reference large repositories (multi-repositories), there will be rhythmical, high demand on CPU and I/O resources necessary to check out repositories. This may cause Function Controller to crash and restart. + +## Remedy + +Avoid using multi-repositories or large repositories to source your git Functions. Using small, dedicated Function repositories decreases the CPU and I/O resources used to check out Git sources, and hence improves the stability of Function Controller. diff --git a/docs/user/troubleshooting-guides/README.md b/docs/user/troubleshooting-guides/README.md new file mode 100644 index 00000000..7c26f104 --- /dev/null +++ b/docs/user/troubleshooting-guides/README.md @@ -0,0 +1,5 @@ +# Troubleshooting + +The troubleshooting guides aim to identify the most common recurring problems the users face, as well as the most suitable solutions to these problems. + +If you can't find a solution, don't hesitate to create a [GitHub](https://github.com/kyma-project/kyma/issues) issue or reach out to our [Slack channel](https://kyma-community.slack.com/) to get direct support from the community. diff --git a/docs/user/tutorials/01-10-create-inline-function.md b/docs/user/tutorials/01-10-create-inline-function.md new file mode 100644 index 00000000..bb7bacb6 --- /dev/null +++ b/docs/user/tutorials/01-10-create-inline-function.md @@ -0,0 +1,147 @@ +# Create and Modify an Inline Function + +This tutorial shows how you can create a simple "Hello World" Function in Node.js. The Function's code and dependencies are defined as an inline code in the Function's **spec**. + +Serverless also allows you to store the Function's code and dependencies as sources in a Git repository. To learn more, read how to [Create a Git Function](01-11-create-git-function.md). +To learn more about Function's signature, `event` and `context` objects, and custom HTTP responses the Function returns, read [Function’s specification](../technical-reference/07-70-function-specification.md). + +> [!NOTE] +> Read about [Istio sidecars in Kyma and why you want them](https://kyma-project.io/docs/kyma/latest/01-overview/service-mesh/smsh-03-istio-sidecars-in-kyma/). Then, check how to [enable automatic Istio sidecar proxy injection](https://kyma-project.io/docs/kyma/latest/04-operation-guides/operations/smsh-01-istio-enable-sidecar-injection/). For more details, see [Default Istio setup in Kyma](https://kyma-project.io/docs/kyma/latest/01-overview/service-mesh/smsh-02-default-istio-setup-in-kyma/). + +## Steps + +You can create a Function with Kyma dashboard, Kyma CLI, or kubectl: + + + +#### **Kyma Dashboard** + +> [!NOTE] +> Kyma dashboard uses Busola, which is not installed by default. Follow the [installation instructions](https://github.com/kyma-project/busola/blob/main/docs/install-kyma-dashboard-manually.md). + +1. Create a namespace or select one from the drop-down list in the top navigation panel. + +2. Go to **Workloads** > **Functions** and select **Create Function**. + +3. In the dialog box, provide the Function's name or click on **Generate**. + +> [!NOTE] +> The **Node.js Function** preset is selected by default. It means that the selected runtime is `Node.js`, and the **Source** code is autogenerated. You can choose the Python runtime by clicking on the **Choose preset** button. + + ```js + module.exports = { + main: async function (event, context) { + const message = + `Hello World` + + ` from the Kyma Function ${context['function-name']}` + + ` running on ${context.runtime}!`; + console.log(message); + return message; + }, + }; + ``` + +The dialog box closes. Wait for the **Status** field to change into `RUNNING`, confirming that the Function was created successfully. + +1. If you decide to modify it, click **Edit** and confirm changes afterward by selecting the **Update** button. You will see the message at the bottom of the screen confirming the Function was updated. + +#### **Kyma CLI** + +1. Export these variables: + + ```bash + export NAME={FUNCTION_NAME} + export NAMESPACE={FUNCTION_NAMESPACE} + ``` + +2. Create your local development workspace. + + a. Create a new folder to keep the Function's code and configuration in one place: + + ```bash + mkdir {FOLDER_NAME} + cd {FOLDER_NAME} + ``` + + b. Create initial scaffolding for the Function: + + ```bash + kyma init function --name $NAME --namespace $NAMESPACE + ``` + +3. Code and configure. + + Open the workspace in your favorite IDE. If you have Visual Studio Code installed, run the following command from the terminal in your workspace folder: + + ```bash + code . + ``` + + It's time to inspect the code and the `config.yaml` file. Feel free to adjust the "Hello World" sample code. + +4. Deploy and verify. + + a. Call the `apply` command from the workspace folder. It will build the container and run it on the Kyma runtime pointed by your current KUBECONFIG file: + + ```bash + kyma apply function + ``` + + b. Check if your Function was created successfully: + + ```bash + kubectl get functions $NAME -n $NAMESPACE + ``` + + You should get a result similar to this example: + + ```bash + NAME CONFIGURED BUILT RUNNING RUNTIME VERSION AGE + test-function True True True nodejs20 1 96s + ``` + +#### **kubectl** + +1. Export these variables: + + ```bash + export NAME={FUNCTION_NAME} + export NAMESPACE={FUNCTION_NAMESPACE} + ``` + +2. Create a Function CR that specifies the Function's logic: + + ```bash + cat < diff --git a/docs/user/tutorials/01-100-customize-function-traces.md b/docs/user/tutorials/01-100-customize-function-traces.md new file mode 100644 index 00000000..b8ede922 --- /dev/null +++ b/docs/user/tutorials/01-100-customize-function-traces.md @@ -0,0 +1,100 @@ +# Customize Function Traces + +This tutorial shows how to use the built-in OpenTelemetry tracer object to send custom trace data to the trace backend. + +Kyma Functions are instrumented to handle trace headers. This means that every time you call your Function, the executed logic is traceable using a dedicated span visible in the trace backend (that is, start time and duration). +Additionally, you can extend the default trace context and create your own custom spans as you wish (that is, when calling a remote service in your distributed application) or add additional information to the tracing context by introducing events and tags. The following tutorial shows you how to do it using tracer client that is available as part of the [event](../technical-reference/07-70-function-specification.md#event-object) object. + +## Prerequisites + +Before you start, make sure you have these tools installed: + +- [Telemetry component installed](https://kyma-project.io/docs/kyma/latest/04-operation-guides/operations/02-install-kyma/#install-specific-components) +- [Trace pipeline configured](https://github.com/kyma-project/telemetry-manager/blob/main/docs/user/03-traces.md#setting-up-a-tracepipeline) + +## Steps + +The following code samples illustrate how to enrich the default trace with custom spans, events, and tags: + +1. [Create an inline Function](01-10-create-inline-function.md) with the following body: + + + +#### **Node.js** + + ```javascript + + const { SpanStatusCode } = require("@opentelemetry/api/build/src/trace/status"); + const axios = require("axios") + module.exports = { + main: async function (event, context) { + + const data = { + name: "John", + surname: "Doe", + type: "Employee", + id: "1234-5678" + } + + const span = event.tracer.startSpan('call-to-acme-service'); + return await callAcme(data) + .then(resp => { + if(resp.status!==200){ + throw new Error("Unexpected response from acme service"); + } + span.addEvent("Data sent"); + span.setAttribute("data-type", data.type); + span.setAttribute("data-id", data.id); + span.setStatus({code: SpanStatusCode.OK}); + return "Data sent"; + }).catch(err=> { + console.error(err) + span.setStatus({ + code: SpanStatusCode.ERROR, + message: err.message, + }); + return err.message; + }).finally(()=>{ + span.end(); + }); + } + } + + let callAcme = (data)=>{ + return axios.post('https://acme.com/api/people', data) + } + ``` + +#### **Python** + + [OpenTelemetry SDK](https://opentelemetry.io/docs/instrumentation/python/manual/#traces) allows you to customize trace spans and events. + + ```python + import requests + import time + + def main(event, context): + # Create a new span to track some work + with event.tracer.start_as_current_span("parent"): + time.sleep(1) + + # Create a nested span to track nested work + with event.tracer.start_as_current_span("child"): + time.sleep(2) + # the nested span is closed when it's out of scope + + # Now the parent span is the current span again + time.sleep(1) + + # This span is also closed when it goes out of scope + + # This request will be auto-intrumented + r = requests.get('https://swapi.dev/api/people/2') + return r.json() + ``` + + + +2. [Expose your Function](01-20-expose-function.md). + +3. Find the traces for the Function in the trace backend. diff --git a/docs/user/tutorials/01-11-create-git-function.md b/docs/user/tutorials/01-11-create-git-function.md new file mode 100644 index 00000000..808ceeba --- /dev/null +++ b/docs/user/tutorials/01-11-create-git-function.md @@ -0,0 +1,148 @@ +# Create a Git Function + +This tutorial shows how you can build a Function from code and dependencies stored in a Git repository, which is an alternative way to keeping the code in the Function CR. The tutorial is based on the Function from the [`orders service` example](https://github.com/kyma-project/examples/tree/main/orders-service). It describes steps required to fetch the Function's source code and dependencies from a public Git repository that does not need any authentication method. However, it also provides additional guidance on how to secure it if you are using a private repository. + +To learn more about Git repository sources for Functions and different ways of securing your repository, read about the [Git source type](../technical-reference/07-40-git-source-type.md). + +> [!NOTE] +> Read about [Istio sidecars in Kyma and why you want them](https://kyma-project.io/docs/kyma/latest/01-overview/service-mesh/smsh-03-istio-sidecars-in-kyma/). Then, check how to [enable automatic Istio sidecar proxy injection](https://kyma-project.io/docs/kyma/latest/04-operation-guides/operations/smsh-01-istio-enable-sidecar-injection/). For more details, see [Default Istio setup in Kyma](https://kyma-project.io/docs/kyma/latest/01-overview/service-mesh/smsh-02-default-istio-setup-in-kyma/). + +## Steps + +You can create a Function either with kubectl or Kyma dashboard: + + + +#### **Kyma Dashboard** + +> [!NOTE] +> Kyma dashboard uses Busola, which is not installed by default. Follow the [installation instructions](https://github.com/kyma-project/busola/blob/main/docs/install-kyma-dashboard-manually.md). + +1. Create a namespace or select one from the drop-down list in the top navigation panel. + +2. Create a Secret (optional). + + If you use a secured repository, you must first create a Secret with either basic (username and password or token) or SSH key authentication to this repository in the same namespace as the Function. To do that, follow these sub-steps: + + - Open your namespace view. In the left navigation panel, go to **Configuration** > **Secrets** and select the **Create Secret** button. + + - Open the **Advanced** view and enter the Secret name and type. + + - Under **Data**, enter these key-value pairs with credentials: + + - Basic authentication: `username: {USERNAME}` and `password: {PASSWORD_OR_TOKEN}` + + - SSH key: `key: {SSH_KEY}` + + > [!NOTE] + > Read more about the [supported authentication methods](../technical-reference/07-40-git-source-type.md). + + - Confirm by selecting **Create**. + +3. To connect the repository, go to **Workloads** > **Functions** > **Create Function**. + +4. Provide or generate the Function's name. + +5. Go to **Advanced**, change **Source Type** from **Inline** to **Git Repository**. + +6. Choose `JavaScript` from the **Language** dropdown and select the proper runtime. + +7. Click on the **Git Repository** section and enter the following values: + - Repository **URL**: `https://github.com/kyma-project/examples.git` + - **Base Dir**:`orders-service/function` + - **Reference**:`main` + + > [!NOTE] + > If you want to connect a secured repository instead of a public one, toggle the **Auth** switch. In the **Auth** section, choose **Secret** from the list and choose the preferred type. + +8. Click **Create**. + + After a while, a message confirms that the Function has been created. + Make sure that the new Function has the `RUNNING` status. + +#### **kubectl** + +1. Export these variables: + + ```bash + export GIT_FUNCTION={GIT_FUNCTION_NAME} + export NAMESPACE={FUNCTION_NAMESPACE} + ``` + +2. Create a Secret (optional). + + If you use a secured repository, follow the sub-steps for the basic or SSH key authentication: + + - Basic authentication (username and password or token) to this repository in the same namespace as the Function: + + 1. Generate a [personal access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-personal-access-token-classic) and copy it. + 2. Create a Secret containg your username and the generated token. + + ```bash + kubectl -n $NAMESPACE create secret generic git-creds-basic --from-literal=username={GITHUB_USERNAME} --from-literal=password={GENERATED_PERSONAL_TOKEN} + ``` + + - SSH key: + + 1. Generate a new SSH key pair (private and public). Follow [this tutorial](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent) to learn how to do it. Alternatively, you can use the existing pair. + 2. Install the generated private key in Kyma, as a Kubernetes Secret that lives in the same namespace as your Function. + + ```bash + kubectl -n $NAMESPACE create secret generic git-creds-ssh --from-file=key={PATH_TO_THE_FILE_WITH_PRIVATE_KEY} + ``` + + 3. Configure the public key in GitHub. Follow the steps described in [this tutorial](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/adding-a-new-ssh-key-to-your-github-account). + + > [!NOTE] + > Read more about the [supported authentication methods](../technical-reference/07-40-git-source-type.md). + +3. Create a Function CR that specifies the Function's logic and points to the directory with code and dependencies in the given repository. It also specifies the Git repository metadata: + + ```bash + cat < [!NOTE] + > If you use a secured repository, add the **auth** object with the adequate **type** and **secretName** fields to the spec under **gitRepository**: + + ```yaml + gitRepository: + ... + auth: + type: # "basic" or "key" + secretName: # "git-creds-basic" or "git-creds-key" + ``` + + > [!NOTE] + > To avoid performance degradation caused by large Git repositories and large monorepos, [Function Controller](../resources/06-10-function-cr.md#related-resources-and-components) implements a configurable backoff period for the source checkout based on `APP_FUNCTION_REQUEUE_DURATION`. If you want to allow the controller to perform the source checkout with every reconciliation loop, disable the backoff period by marking the Function CR with the annotation `serverless.kyma-project.io/continuousGitCheckout: true` + + > [!NOTE] + > See this [Function's code and dependencies](https://github.com/kyma-project/examples/tree/main/orders-service). + +4. Check if your Function was created and all conditions are set to `True`: + + ```bash + kubectl get functions $GIT_FUNCTION -n $NAMESPACE + ``` + + You should get a result similar to this example: + + ```bash + NAME CONFIGURED BUILT RUNNING RUNTIME VERSION AGE + test-function True True True nodejs20 1 96s + ``` + + diff --git a/docs/user/tutorials/01-110-override-runtime-image.md b/docs/user/tutorials/01-110-override-runtime-image.md new file mode 100644 index 00000000..a469e419 --- /dev/null +++ b/docs/user/tutorials/01-110-override-runtime-image.md @@ -0,0 +1,88 @@ +# Override Runtime Image + +This tutorial shows how to build a custom runtime image and override the Function's base image with it. + +## Prerequisites + +Before you start, make sure you have these tools installed: + +- [Serverless module installed](https://kyma-project.io/docs/kyma/latest/04-operation-guides/operations/08-install-uninstall-upgrade-kyma-module/) in a cluster + +## Steps + +Follow these steps: + +1. Follow [this example](https://github.com/kyma-project/serverless/tree/main/examples/custom-serverless-runtime-image) to build the Python's custom runtime image. + + + +#### **Kyma CLI** + +2. Export these variables: + + ```bash + export NAME={FUNCTION_NAME} + export NAMESPACE={FUNCTION_NAMESPACE} + export RUNTIME_IMAGE={RUNTIME_IMAGE_WITH_TAG} + ``` + +3. Create your local development workspace using the built image: + + ```bash + mkdir {FOLDER_NAME} + cd {FOLDER_NAME} + kyma init function --name $NAME --namespace $NAMESPACE --runtime-image-override $RUNTIME_IMAGE --runtime python312 + ``` + +4. Deploy your Function: + + ```bash + kyma apply function + ``` + +5. Verify whether your Function is running: + + ```bash + kubectl get functions $NAME -n $NAMESPACE + ``` + +#### **kubectl** + +2. Export these variables: + + ```bash + export NAME={FUNCTION_NAME} + export NAMESPACE={FUNCTION_NAMESPACE} + export RUNTIME_IMAGE={RUNTIME_IMAGE_WITH_TAG} + ``` + +3. Create a Function CR that specifies the Function's logic: + + ```bash + cat < diff --git a/docs/user/tutorials/01-120-inject-envs.md b/docs/user/tutorials/01-120-inject-envs.md new file mode 100644 index 00000000..abe22e6c --- /dev/null +++ b/docs/user/tutorials/01-120-inject-envs.md @@ -0,0 +1,147 @@ +# Inject Environment Variables + +This tutorial shows how to inject environment variables into Function. + +You can specify environment variables in the Function definition, or define references to the Kubernetes Secrets or ConfigMaps. + +## Prerequisites + +Before you start, make sure you have these tools installed: + +- [Serverless module installed](https://kyma-project.io/docs/kyma/latest/04-operation-guides/operations/08-install-uninstall-upgrade-kyma-module/) in a cluster + +## Steps + +Follow these steps: + +1. Create your ConfigMap + +```bash +kubectl create configmap my-config --from-literal config-env="I come from config map" +``` + +2. Create your Secret + +```bash +kubectl create secret generic my-secret --from-literal secret-env="I come from secret" +``` + + + +#### **Kyma CLI** + +3. Generate the Function's configuration and sources: + + ```bash + kyma init function --name my-function + ``` + +4. Define environment variables as part of the Function configuration file. Modify `config.yaml` with the following: + + ```yaml + name: my-function + namespace: default + runtime: nodejs20 + source: + sourceType: inline + env: + - name: env1 + value: "I come from function definition" + - name: env2 + valueFrom: + configMapKeyRef: + name: my-config + key: config-env + - name: env3 + valueFrom: + secretKeyRef: + name: my-secret + key: secret-env + ``` + +5. Use injected environment variables in the handler file. Modify `handler.js` with the following: + + ```js + module.exports = { + main: function (event, context) { + envs = ["env1", "env2", "env3"] + envs.forEach(function(key){ + console.log(`${key}:${readEnv(key)}`) + }); + return 'Hello Serverless' + } + } + + readEnv=(envKey) => { + if(envKey){ + return process.env[envKey]; + } + return + } + ``` + +6. Deploy your Function: + + ```bash + kyma apply function + ``` + +7. Verify whether your Function is running: + + ```bash + kubectl get functions my-function + ``` + +#### **kubectl** + +3. Create a Function CR that specifies the Function's logic: + + ```bash + cat < { + if(envKey){ + return process.env[envKey]; + } + return + } + EOF + ``` + +4. Verify whether your Function is running: + + ```bash + kubectl get functions my-function + ``` + + diff --git a/docs/user/tutorials/01-130-use-external-scalers.md b/docs/user/tutorials/01-130-use-external-scalers.md new file mode 100644 index 00000000..19893376 --- /dev/null +++ b/docs/user/tutorials/01-130-use-external-scalers.md @@ -0,0 +1,195 @@ +# Use External Scalers + +This tutorial shows how to use an external resource scaler, for example, HorizontalPodAutoscaler (HPA) or Keda's ScaledObject, with the Serverless Function. + +Keep in mind that the Serverless Functions implement the [scale subresource](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#scale-subresource), which means that you can use any Kubernetes-based scaler. + +## Prerequisites + +Before you start, make sure you have these tools installed: + +- [Keda module enabled](https://kyma-project.io/docs/kyma/latest/04-operation-guides/operations/08-install-uninstall-upgrade-kyma-module/) + +## Steps + +Follow these steps: + + + +#### **HPA** + +1. Create your Function with the `replicas` value set to 1, to prevent the internal Serverless HPA creation: + + ```bash + cat < [!NOTE] + > This tutorial uses the `cpu` trigger because of its simple configuration. If you want to use another trigger, check the official [list of supported triggers](https://keda.sh/docs/scalers/). + +3. After a few seconds, ScaledObject should be up to date and contain information about the actual replicas: + + ```bash + kubectl get scaledobject scaled-function + ``` + + You should get a result similar to this example: + + ```bash + NAME SCALETARGETKIND SCALETARGETNAME MIN MAX TRIGGERS AUTHENTICATION READY ACTIVE FALLBACK AGE + scaled-function serverless.kyma-project.io/v1alpha2.Function scaled-function 5 10 cpu True True Unknown 4m15s + ``` + +#### **Keda Prometheus** + +1. Create your Function with the **replicas** value set to `1` to prevent the internal Serverless HPA creation: + + ```bash + cat < [!NOTE] + > This tutorial uses the `prometheus` trigger because of its simple configuration. If you want to use another trigger, check the official [list of supported triggers](https://keda.sh/docs/scalers/). + +3. After a few seconds, ScaledObject should be up to date and contain information about the actual replicas: + + ```bash + kubectl get scaledobject scaled-function + ``` + + You should get a result similar to this example: + + ```bash + NAME SCALETARGETKIND SCALETARGETNAME MIN MAX TRIGGERS AUTHENTICATION READY ACTIVE FALLBACK AGE + scaled-function serverless.kyma-project.io/v1alpha2.Function scaled-function 1 5 prometheus True True Unknown 4m15s + ``` + +Check out this [example](https://github.com/kyma-project/keda-manager/tree/main/examples/scale-to-zero-with-keda) to see how to use Kyma Serverless and Eventing in combination with Keda to accomplish scaling to zero. + + diff --git a/docs/user/tutorials/01-140-use-secret-mounts.md b/docs/user/tutorials/01-140-use-secret-mounts.md new file mode 100644 index 00000000..c0a5de59 --- /dev/null +++ b/docs/user/tutorials/01-140-use-secret-mounts.md @@ -0,0 +1,123 @@ +# Access to Secrets Mounted as Volume + +This tutorial shows how to use Secrets mounted as volume with the Serverless Function. +It's based on a simple Function in Python 3.9. The Function reads data from Secret and returns it. + +## Prerequisites + +Before you start, make sure you have these tools installed: + +- [Serverless module installed](https://kyma-project.io/docs/kyma/latest/04-operation-guides/operations/08-install-uninstall-upgrade-kyma-module/) in a cluster + +## Steps + +Follow these steps: + +1. Export these variables: + + ```bash + export FUNCTION_NAME={FUNCTION_NAME} + export NAMESPACE={FUNCTION_NAMESPACE} + export DOMAIN={DOMAIN_NAME} + + export SECRET_NAME={SECRET_NAME} + export SECRET_DATA_KEY={SECRET_DATA_KEY} + export SECRET_MOUNT_PATH={SECRET_MOUNT_PATH} + ``` + +2. Create a Secret: + + ```bash + kubectl -n $NAMESPACE create secret generic $SECRET_NAME \ + --from-literal=$SECRET_DATA_KEY={SECRET_DATA_VALUE} + ``` + +3. Create your Function with `secretMounts`: + + ```bash + cat < [!NOTE] + > Read more about [creating Functions](01-10-create-inline-function.md). + +4. Create an APIRule: + + The following steps allow you to test the Function in action. + + ```bash + cat < [!NOTE] + > Read more about [exposing Functions](01-20-expose-function.md). + +5. Call Function: + + ```bash + curl https://$FUNCTION_NAME.$DOMAIN + ``` + + You should get `{SECRET_DATA_VALUE}` as a result. + +6. Next steps: + + Now you can edit the Secret and see if the Function returns the new value from the Secret. + + To edit your Secret, use: + + ```bash + kubectl -n $NAMESPACE edit secret $SECRET_NAME + ``` + + To encode values used in `data` from the Secret, use `base64`, for example: + + ```bash + echo -n '{NEW_SECRET_DATA_VALUE}' | base64 + ``` + + Calling the Function again (using `curl`) must return `{NEW_SECRET_DATA_VALUE}`. + Note that the Secret propagation may take some time, and the call may initially return the old value. diff --git a/docs/user/tutorials/01-20-expose-function.md b/docs/user/tutorials/01-20-expose-function.md new file mode 100644 index 00000000..053fff64 --- /dev/null +++ b/docs/user/tutorials/01-20-expose-function.md @@ -0,0 +1,163 @@ +# Expose a Function with an API Rule + +This tutorial shows how you can expose your Function to access it outside the cluster, through an HTTP proxy. To expose it, use an [APIRule custom resource (CR)](https://kyma-project.io/docs/kyma/latest/05-technical-reference/00-custom-resources/apix-01-apirule/). Function Controller reacts to an instance of the APIRule CR and, based on its details, it creates an Istio VirtualService and Oathkeeper Access Rules that specify your permissions for the exposed Function. + +When you complete this tutorial, you get a Function that: + +- Is available on an unsecured endpoint (**handler** set to `noop` in the APIRule CR). +- Accepts the `GET`, `POST`, `PUT`, and `DELETE` methods. + +To learn more about securing your Function, see the [Expose and secure a workload with OAuth2](https://kyma-project.io/docs/kyma/latest/03-tutorials/00-api-exposure/apix-05-expose-and-secure-a-workload/apix-05-01-expose-and-secure-workload-oauth2/) or [Expose and secure a workload with JWT](https://kyma-project.io/docs/kyma/latest/03-tutorials/00-api-exposure/apix-05-expose-and-secure-a-workload/apix-05-03-expose-and-secure-workload-jwt/) tutorials. + +Read also about [Function’s specification](../technical-reference/07-70-function-specification.md) if you are interested in its signature, `event` and `context` objects, and custom HTTP responses the Function returns. + +## Prerequisites + +- [Existing Function](01-10-create-inline-function.md) +- [API Gateway component installed](https://kyma-project.io/docs/kyma/latest/04-operation-guides/operations/02-install-kyma/#install-specific-components) + +## Steps + +You can expose a Function with Kyma dashboard, Kyma CLI, or kubectl: + + + +#### **Kyma Dashboard** + +> [!NOTE] +> Kyma dashboard uses Busola, which is not installed by default. Follow the [installation instructions](https://github.com/kyma-project/busola/blob/main/docs/install-kyma-dashboard-manually.md). + +1. Select a namespace from the drop-down list in the top navigation panel. Make sure the namespace includes the Function that you want to expose through an APIRule. + +2. Go to **Discovery and Network** > **API Rules**, and click on **Create API Rule**. + +3. Enter the following information: + + - The APIRule's **Name** matching the Function's name. + + > [!NOTE] + > The APIRule CR can have a name different from that of the Function, but it is recommended that all related resources share a common name. + + - **Service Name** matching the Function's name. + + - **Host** to determine the host on which you want to expose your Function. You must change the `*` symbol at the beginning to the subdomain name you want. + +4. In the **Rules > Access Strategies > Config** section, change the handler from `allow` to `noop` and select all the methods below. + +5. Select **Create** to confirm your changes. + +6. Check if you can access the Function by selecting the HTTPS link under the **Host** column for the newly created APIRule. + +#### **Kyma CLI** + +1. Export these variables: + + ```bash + export DOMAIN={DOMAIN_NAME} + export NAME={FUNCTION_NAME} + export NAMESPACE={NAMESPACE_NAME} + ``` + + > [!NOTE] + > The Function takes the name from the Function CR name. The APIRule CR can have a different name but for the purpose of this tutorial, all related resources share a common name defined under the **NAME** variable. +2. Download the latest configuration of the Function from the cluster. This way, you update the local `config.yaml` file with the Function's code. + + ```bash + kyma sync function $NAME -n $NAMESPACE + ``` + +3. Edit the local `config.yaml` file and add the **apiRules** schema for the Function at the end of the file: + + ```yaml + apiRules: + - name: {FUNCTION_NAME} + service: + host: {FUNCTION_NAME}.{DOMAIN_NAME} + rules: + - methods: + - GET + - POST + - PUT + - DELETE + accessStrategies: + - handler: noop + ``` + +4. Apply the new configuration to the cluster: + + ```bash + kyma apply function + ``` + +5. Check if the Function's code was pushed to the cluster and reflects the local configuration: + + ```bash + kubectl get apirules $NAME -n $NAMESPACE + ``` + +6. Check that the APIRule was created successfully and has the status `OK`: + + ```bash + kubectl get apirules $NAME -n $NAMESPACE -o=jsonpath='{.status.APIRuleStatus.code}' + ``` + +7. Call the Function's external address: + + ```bash + curl https://$NAME.$DOMAIN + ``` + +#### **kubectl** + +1. Export these variables: + + ```bash + export DOMAIN={DOMAIN_NAME} + export NAME={FUNCTION_NAME} + export NAMESPACE={FUNCTION_NAMESPACE} + ``` + + > [!NOTE] + > The Function takes the name from the Function CR name. The APIRule CR can have a different name but for the purpose of this tutorial, all related resources share a common name defined under the **NAME** variable. + +2. Create an APIRule CR for your Function. It is exposed on port `80`, which is the default port of the [Service Placeholder](../technical-reference/04-10-architecture.md). + + ```bash + cat < \ No newline at end of file diff --git a/docs/user/tutorials/01-30-manage-functions-with-kyma-cli.md b/docs/user/tutorials/01-30-manage-functions-with-kyma-cli.md new file mode 100644 index 00000000..d6ef5249 --- /dev/null +++ b/docs/user/tutorials/01-30-manage-functions-with-kyma-cli.md @@ -0,0 +1,111 @@ +# Manage Functions with Kyma CLI + +This tutorial shows how to use the available CLI commands to manage Functions in Kyma. You will see how to: + +1. Create local files that contain the basic configuration for a sample "Hello World" Python Function (`kyma init function`). +2. Generate a Function custom resource (CR) from these files and apply it on your cluster (`kyma apply function`). +3. Fetch the current state of your Function's cluster configuration after it was modified (`kyma sync function`). + +> [!NOTE] +> Read about [Istio sidecars in Kyma and why you want them](https://kyma-project.io/docs/kyma/latest/01-overview/service-mesh/smsh-03-istio-sidecars-in-kyma/). Then, check how to [enable automatic Istio sidecar proxy injection](https://kyma-project.io/docs/kyma/latest/04-operation-guides/operations/smsh-01-istio-enable-sidecar-injection/). For more details, see [Default Istio setup in Kyma](https://kyma-project.io/docs/kyma/latest/01-overview/service-mesh/smsh-02-default-istio-setup-in-kyma/). + +This tutorial is based on a sample Python Function run in a lightweight [k3d](https://k3d.io/) cluster. + +## Prerequisites + +Before you start, make sure you have these tools installed: + +- [Docker](https://www.docker.com/) +- [Kyma CLI](https://github.com/kyma-project/cli) +- [Serverless module installed](https://kyma-project.io/docs/kyma/latest/04-operation-guides/operations/08-install-uninstall-upgrade-kyma-module/) locally or in a cluster + +## Steps + +Follow these steps: + +1. To create local files with the default configuration for a Python Function, go to the folder in which you want to initiate the workspace content and run the `init` Kyma CLI command: + + ```bash + kyma init function --runtime python312 --name {FUNCTION_NAME} + ``` + + You can also use the `--dir {FULL_FOLDER_PATH}` flag to point to the directory where you want to create the Function's source files. + + > [!NOTE] + > Python 3.9 is only one of the available runtimes. Read about all [supported runtimes and sample Functions to run on them](../technical-reference/07-10-sample-functions.md). + + The `init` command creates these files in your workspace folder: + + - `config.yaml` with the Function's configuration + + > [!NOTE] + > See the detailed description of all fields available in the [`config.yaml` file](../technical-reference/07-60-function-configuration-file.md). + + - `handler.py` with the Function's code and the simple "Hello World" logic + - `requirements.txt` with an empty file for your Function's custom dependencies + + The `kyma init` command also sets **sourcePath** in the `config.yaml` file to the full path of the workspace folder: + + ```yaml + name: my-function + namespace: default + runtime: python312 + source: + sourceType: inline + sourcePath: {FULL_PATH_TO_WORKSPACE_FOLDER} + ``` + +1. Run the `apply` Kyma CLI command to create a Function CR in the YAML format on your cluster: + + ```bash + kyma apply function + ``` + + > [!TIP] + > To apply a Function from a different location, use the `--filename` flag followed by the full path to the `config.yaml` file. + + Alternatively, use the `--dry-run` flag to list the file that will be created before you apply it. You can also preview the file's content in the format of your choice by adding the `--output {FILE_FORMAT}` flag, such as `--output yaml`. + +3. Once applied, view the Function's details in the cluster: + + ```bash + kubectl describe function {FUNCTION_NAME} + ``` + +4. Change the Function's source code in the cluster to return "Hello Serverless!": + + a) Edit the Function: + + ```bash + kubectl edit function {FUNCTION_NAME} + ``` + + b) Modify **source** as follows: + + ```yaml + ... + spec: + runtime: python312 + source: |- + def main(event, context): + return "Hello Serverless!" + ``` + +5. Fetch the content of the resource to synchronize your local workspace sources with the cluster changes: + + ```bash + kyma sync function {FUNCTION_NAME} + ``` + +6. Check the local `handler.py` file with the Function's code to make sure that the cluster changes were fetched: + + ```bash + cat handler.py + ``` + + This command returns the result confirming that the local sources were synchronized with cluster changes: + + ```python + def main(event, context): + return "Hello Serverless!" + ``` diff --git a/docs/user/tutorials/01-40-debug-function.md b/docs/user/tutorials/01-40-debug-function.md new file mode 100644 index 00000000..99067a9a --- /dev/null +++ b/docs/user/tutorials/01-40-debug-function.md @@ -0,0 +1,83 @@ +# Debug a Function + +This tutorial shows how to use an external IDE to debug a Function in Kyma CLI. + +## Steps + +Learn how to debug a Function with Visual Studio Code for Node.js or Python, or GoLand: + + + +#### **Visual Studio Code** + +1. In VSC, navigate to the location of the file with the Function definition. +2. Create the `.vscode` directory. +3. In the `.vscode` directory, create the `launch.json` file with the following content: + + For Node.js: + + ```json + { + "version": "0.2.0", + "configurations": [ + { + "name": "attach", + "type": "node", + "request": "attach", + "port": 9229, + "address": "localhost", + "localRoot": "${workspaceFolder}/kubeless", + "remoteRoot": "/kubeless", + "restart": true, + "protocol": "inspector", + "timeout": 1000 + } + ] + } + ``` + + For Python: + + ```json + { + "version": "0.2.0", + "configurations": [ + { + "name": "Python: Kyma function", + "type": "python", + "request": "attach", + "pathMappings": [ + { + "localRoot": "${workspaceFolder}", + "remoteRoot": "/kubeless" + } + ], + "connect": { + "host": "localhost", + "port": 5678 + } + } + ] + } + ``` + +4. Run the Function with the `--debug` flag. + + ```bash + kyma run function --debug + ``` + +#### **GoLand** + +1. In GoLand, navigate to the location of the file with the Function definition. +2. Choose the **Add Configuration...** option. +3. Add new **Attach to Node.js/Chrome** configuration with these options: + - Host: `localhost` + - Port: `9229` +4. Run the Function with the `--debug` flag. + + ```bash + kyma run function --debug + ``` + + \ No newline at end of file diff --git a/docs/user/tutorials/01-50-sync-function-with-gitops.md b/docs/user/tutorials/01-50-sync-function-with-gitops.md new file mode 100644 index 00000000..1a21e66b --- /dev/null +++ b/docs/user/tutorials/01-50-sync-function-with-gitops.md @@ -0,0 +1,222 @@ +# Synchronize Git Resources with the Cluster Using a Gitops Operator + +This tutorial shows how you can automate the deployment of local Kyma resources in a cluster using the GitOps logic. You will use [Kyma CLI](https://github.com/kyma-project/cli) to create an inline Python Function. You will later push the resource to a GitHub repository of your choice and set up a GitOps operator to monitor the given repository folder and synchronize any changes in it with your cluster. For the purpose of this tutorial, you will install and use the [Flux](https://fluxcd.io/flux/get-started/) GitOps operator and a lightweight [k3d](https://k3d.io/) cluster. + +> [!TIP] +> Although this tutorial uses Flux to synchronize Git resources with the cluster, you can use an alternative GitOps operator for this purpose, such as [Argo](https://argoproj.github.io/argo-cd/). + +## Prerequisites + +All you need before you start is to have the following: + +- [Docker](https://www.docker.com/) +- Git repository +- [Homebrew](https://docs.brew.sh/Installation) +- Kyma CLI +- Kubeconfig file to your Kyma cluster + +## Steps + +These sections will lead you through the whole installation, configuration, and synchronization process. You will first install k3d and create a cluster for your custom resources (CRs). Then, you will need to apply the necessary CustomResourceDefinition (CRD) from Kyma to be able to create Functions. Finally, you will install Flux and authorize it with the `write` access to your GitHub repository in which you store the resource files. Flux will automatically synchronize any new changes pushed to your repository with your k3d cluster. + +### Install and Configure a k3d Cluster + +1. Install k3d using Homebrew on macOS: + + ```bash + brew install k3d + ``` + +2. Create a default k3d cluster with a single server node: + + ```bash + k3d cluster create {CLUSTER_NAME} + ``` + + This command also sets your context to the newly created cluster. Run this command to display the cluster information: + + ```bash + kubectl cluster-info + ``` + +3. Apply the `functions.serverless.kyma-project.io` CRD from sources in the [`serverless`](https://github.com/kyma-project/serverless/tree/main/components/serverless/config/crd) repository. You will need it to create the Function CR in the cluster. + + ```bash + kubectl apply -f https://raw.githubusercontent.com/kyma-project/serverless/main/components/serverless/config/crd/bases/serverless.kyma-project.io_functions.yaml + ``` + +4. Run this command to make sure the CRs are applied: + + ```bash + kubectl get customresourcedefinitions + ``` + +### Prepare Your Local Workspace + +1. Create a workspace folder in which you will create source files for your Function: + + ```bash + mkdir {WORKSPACE_FOLDER} + ``` + +2. Use the `init` Kyma CLI command to create a local workspace with default configuration for a Python Function: + + ```bash + kyma init function --runtime python312 --dir $PWD/{WORKSPACE_FOLDER} + ``` + + > [!TIP] + > Python 3.9 is only one of the available runtimes. Read about all [supported runtimes and sample Functions to run on them](../technical-reference/07-10-sample-functions.md). + + This command will download the following files to your workspace folder: + + - `config.yaml` with the Function's configuration + - `handler.py` with the Function's code and the simple "Hello World" logic + - `requirements.txt` with an empty file for your Function's custom dependencies + +### Install and Configure Flux + +You can now install the Flux operator, connect it with a specific Git repository folder, and authorize Flux to automatically pull changes from this repository folder and apply them on your cluster. + +1. Install Flux: + + ```bash + brew install fluxctl + ``` + +2. Create a `flux` namespace for the Flux operator's CRDs: + + ```bash + kubectl create namespace flux + kubectl label namespace flux istio-injection=enabled --overwrite + ``` + +3. Export details of your GitHub repository - its name, the account name, and related e-mail address. You must also specify the name of the folder in your GitHub repository to which you will push the Function CR built from local sources. If you don't have this folder in your repository yet, you will create it in further steps. Flux will synchronize the cluster with the content of this folder on the `main` branch. + + ```bash + export GH_USER="{USERNAME}" + export GH_REPO="{REPOSITORY_NAME}" + export GH_EMAIL="{EMAIL_OF_YOUR_GH_ACCOUNT}" + export GH_FOLDER="{GIT_REPO_FOLDER_FOR_FUNCTION_RESOURCES}" + ``` + +4. Run this command to apply CRDs of the Flux operator to the `flux` namespace on your cluster: + + ```bash + fluxctl install \ + --git-user=${GH_USER} \ + --git-email=${GH_EMAIL} \ + --git-url=git@github.com:${GH_USER}/${GH_REPO}.git \ + --git-path=${GH_FOLDER} \ + --namespace=flux | kubectl apply -f - + ``` + + You will see that Flux created these CRDs: + + ```bash + serviceaccount/flux created + clusterrole.rbac.authorization.k8s.io/flux created + clusterrolebinding.rbac.authorization.k8s.io/flux created + deployment.apps/flux created + secret/flux-git-deploy created + deployment.apps/memcached created + service/memcached created + ``` + +5. List all Pods in the `flux` namespace to make sure that the one for Flux is in the `Running` state: + + ```bash + kubectl get pods --namespace flux + ``` + + Expect a response similar to this one: + + ```bash + NAME READY STATUS RESTARTS AGE + flux-75758595b9-m4885 1/1 Running 0 32m + ``` + +6. Obtain the certificate (SSH key) that Flux generated: + + ```bash + fluxctl identity --k8s-fwd-ns flux + ``` + +7. Run this command to copy the SSH key to the clipboard: + + ```bash + fluxctl identity --k8s-fwd-ns flux | pbcopy + ``` + +8. Go to **Settings** in your GitHub account: + + ![GitHub account settings](../../assets/svls-settings.png) + +9. Go to the **SSH and GPG keys** section and select the **New SSH key** button: + + ![Create a new SSH key](../../assets/svls-create-ssh-key.png) + +10. Provide the new key name, paste the previously copied SSH key, and confirm changes by selecting the **Add SSH Key** button: + + ![Add a new SSH key](../../assets/svls-add-ssh-key.png) + +### Create a Function + +Now that Flux is authenticated to pull changes from your Git repository, you can start creating CRs from your local workspace files. + +In this section, you will create a sample inline Function. + +1. Back in the terminal, clone this GitHub repository to your current workspace location: + + ```bash + git clone https://github.com/${GH_USER}/${GH_REPO}.git + ``` + + > [!NOTE] + > You can also clone the repository using SSH. To do that, you need to [generate a new SSH key and add it to the ssh-agent](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent). + +2. Go to the repository folder: + + ```bash + cd ${GH_REPO} + ``` + +3. If the folder you specified during the Flux configuration does not exist yet in the Git repository, create it: + + ```bash + mkdir ${GH_FOLDER} + ``` + +4. Run the `apply` Kyma CLI command to create a Function CR in the YAML format in your remote GitHub repository. This command will generate the output in the `my-function.yaml` file. + + ```bash + kyma apply function --filename {FULL_PATH_TO_LOCAL_WORKSPACE_FOLDER}/config.yaml --output yaml --dry-run > ./${GH_FOLDER}/my-function.yaml + ``` + +5. Push the local changes to the remote repository: + + ```bash + git add . # Stage changes for the commit + git commit -m 'Add my-function' # Add a commit message + git push origin main # Push changes to the "main" branch of your Git repository. If you have a repository with the "main" branch, use this command instead: git push origin main + ``` + +6. Go to the GitHub repository to check that the changes were pushed. + +7. By default, Flux pulls CRs from the Git repository and pushes them to the cluster in 5-minute intervals. To enforce immediate synchronization, run this command from the terminal: + + ```bash + fluxctl sync --k8s-fwd-ns flux + ``` + +8. Make sure that the Function CR was applied by Flux to the cluster: + + ```bash + kubectl get functions + ``` + +You can see that Flux synchronized the resource and the new Function CR was added to your cluster. + +## Reverting Feature + +Once you set it up, Flux will keep monitoring the given Git repository folder for any changes. If you modify the existing resources directly in the cluster, Flux will automatically revert these changes and update the given resource back to its version on the `main` branch of the Git repository. diff --git a/docs/user/tutorials/01-60-set-external-registry.md b/docs/user/tutorials/01-60-set-external-registry.md new file mode 100644 index 00000000..e257e686 --- /dev/null +++ b/docs/user/tutorials/01-60-set-external-registry.md @@ -0,0 +1,244 @@ +# Set an External Docker Registry + +By default, you install Kyma with Serverless that uses the internal Docker registry running in a cluster. This tutorial shows how to override this default setup with an external Docker registry from one of these cloud providers: + +- [Docker Hub](https://hub.docker.com/) +- [Google Artifact Registry (GAR)](https://cloud.google.com/artifact-registry) +- [Azure Container Registry (ACR)](https://azure.microsoft.com/en-us/services/container-registry/) + +> [!WARNING] +> Function images are not cached in the Docker Hub. The reason is that this registry is not compatible with the caching logic defined in [Kaniko](https://cloud.google.com/cloud-build/docs/kaniko-cache) that Serverless uses for building images. + +## Prerequisites + + + +#### **Docker Hub** + +- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) + +#### **GAR** + +- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) +- [gcloud](https://cloud.google.com/sdk/gcloud/) +- [Google Cloud Platform (GCP)](https://cloud.google.com) project + +#### **ACR** + +- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) +- [Azure CLI](https://docs.microsoft.com/en-us/cli/azure) +- [Microsoft Azure](http://azure.com) subscription + + + +## Steps + +### Create Required Cloud Resources + + + +#### **Docker Hub** + +1. Run the `export {VARIABLE}={value}` command to set up these environment variables, where: + + - **USER_NAME** is the name of the account in the Docker Hub. + - **PASSWORD** is the password for the account in the Docker Hub. + - **SERVER_ADDRESS** is the server address of the Docker Hub. At the moment, Kyma only supports the `https://index.docker.io/v1/` server address. + - **REGISTRY_ADDRESS** is the registry address in the Docker Hub. + + > [!TIP] + > Usually, the Docker registry address is the same as the account name. + + Example: + + ```bash + export USER_NAME=kyma-rocks + export PASSWORD=admin123 + export SERVER_ADDRESS=https://index.docker.io/v1/ + export REGISTRY_ADDRESS=kyma-rocks + ``` + +#### **GAR** + +To use GAR, create a Google service account that has a private key and the **Storage Admin** role permissions. Follow these steps: + +1. Run the `export {VARIABLE}={value}` command to set up these environment variables, where: + + - **SA_NAME** is the name of the service account. + - **SA_DISPLAY_NAME** is the display name of the service account. + - **PROJECT** is the GCP project ID. + - **SECRET_FILE** is the path to the private key. + - **ROLE** is the **Storage Admin** role bound to the service account. + - **SERVER_ADDRESS** is the server address of the Docker registry. + + Example: + + ```bash + export SA_NAME=my-service-account + export SA_DISPLAY_NAME=service-account + export PROJECT=test-project-012345 + export SECRET_FILE=my-private-key-path + export ROLE=roles/storage.admin + export SERVER_ADDRESS=gar.io + ``` + +2. When you communicate with Google Cloud for the first time, set the context for your Google Cloud project. Run this command: + + ```bash + gcloud config set project ${PROJECT} + ``` + +3. Create a service account. Run: + + ```bash + gcloud iam service-accounts create ${SA_NAME} --display-name ${SA_DISPLAY_NAME} + ``` + +4. Add a policy binding for the **Storage Admin** role to the service account. Run: + + ```bash + gcloud projects add-iam-policy-binding ${PROJECT} --member=serviceAccount:${SA_NAME}@${PROJECT}.iam.gserviceaccount.com --role=${ROLE} + ``` + +5. Create a private key for the service account: + + ```bash + gcloud iam service-accounts keys create ${SECRET_FILE} --iam-account=${SA_NAME}@${PROJECT}.iam.gserviceaccount.com + ``` + +6. Export the private key as an environment variable: + + ```bash + export GCS_KEY_JSON=$(< "$SECRET_FILE" base64 | tr -d '\n') + ``` + +#### **ACR** + +Create an ACR and a service principal. Follow these steps: + +1. Run the `export {VARIABLE}={value}` command to set up these environment variables, where: + + - **AZ_REGISTRY_NAME** is the name of the ACR. + - **AZ_RESOURCE_GROUP** is the name of the resource group. + - **AZ_RESOURCE_GROUP_LOCATION** is the location of the resource group. + - **AZ_SUBSCRIPTION_ID** is the ID of the Azure subscription. + - **AZ_SERVICE_PRINCIPAL_NAME** is the name of the Azure service principal. + - **ROLE** is the **acrpush** role bound to the service principal. + - **SERVER_ADDRESS** is the server address of the Docker registry. + + Example: + + ```bash + export AZ_REGISTRY_NAME=registry + export AZ_RESOURCE_GROUP=my-resource-group + export AZ_RESOURCE_GROUP_LOCATION=westeurope + export AZ_SUBSCRIPTION_ID=123456-123456-123456-1234567 + export AZ_SERVICE_PRINCIPAL_NAME=acr-service-principal + export ROLE=acrpush + export SERVER_ADDRESS=azurecr.io + ``` + +2. When you communicate with Microsoft Azure for the first time, log into your Azure account. Run this command: + + ```bash + az login + ``` + +3. Create a resource group. Run: + + ```bash + az group create --name ${AZ_RESOURCE_GROUP} --location ${AZ_RESOURCE_GROUP_LOCATION} --subscription ${AZ_SUBSCRIPTION_ID} + ``` + +4. Create an ACR. Run: + + ```bash + az acr create --name ${AZ_REGISTRY_NAME} --resource-group ${AZ_RESOURCE_GROUP} --subscription ${AZ_SUBSCRIPTION_ID} --sku {Basic, Classic, Premium, Standard} + ``` + +5. Obtain the full ACR ID. Run: + + ```bash + export AZ_REGISTRY_ID=$(az acr show --name ${AZ_REGISTRY_NAME} --query id --output tsv) + ``` + +6. Create a service principal with rights scoped to the ACR. Run: + + ```bash + export SP_PASSWORD=$(az ad sp create-for-rbac --name http://${AZ_SERVICE_PRINCIPAL_NAME} --scopes ${AZ_REGISTRY_ID} --role ${ROLE} --query password --output tsv) + export SP_APP_ID=$(az ad sp show --id http://${AZ_SERVICE_PRINCIPAL_NAME} --query appId --output tsv) + ``` + + Alternatively, assign the desired role to the existing service principal. Run: + + ```bash + export SP_APP_ID=$(az ad sp show --id http://${AZ_SERVICE_PRINCIPAL_NAME} --query appId --output tsv) + export SP_PASSWORD=$(az ad sp show --id http://${AZ_SERVICE_PRINCIPAL_NAME} --query password --output tsv) + az role assignment create --assignee ${SP_APP_ID} --scope ${AZ_REGISTRY_ID} --role ${ROLE} + ``` + + + +### Override Serverless Configuration + +Prepare yaml file with overrides that match your Docker registry provider: + + + +#### **Docker Hub** + +```bash +cat > docker-registry-overrides.yaml < docker-registry-overrides.yaml < docker-registry-overrides.yaml < + +> [!WARNING] +> If you want to set an external Docker registry before you install Kyma, manually apply the Secret to the cluster before you run the installation script. + +### Apply Configuration + +Deploy Kyma with different configuration for Docker registry . Run: + +```bash +kyma deploy --values-file docker-registry-overrides.yaml +``` + +> [!NOTE] +> To learn more, read about [changing Kyma configuration](https://kyma-project.io/docs/kyma/latest/04-operation-guides/operations/03-change-kyma-config-values). diff --git a/docs/user/tutorials/01-80-log-into-private-packages-registry.md b/docs/user/tutorials/01-80-log-into-private-packages-registry.md new file mode 100644 index 00000000..6e3b7c84 --- /dev/null +++ b/docs/user/tutorials/01-80-log-into-private-packages-registry.md @@ -0,0 +1,118 @@ +# Log Into a Private Package Registry Using Credentials from a Secret + +Serverless allows you to consume private packages in your Functions. This tutorial shows how you can log into a private package registry by defining credentials in a Secret custom resource (CR). + +## Steps + +### Create a Secret + +Create a Secret CR for your Node.js or Python Functions. You can also create one combined Secret CR for both runtimes. + + + +#### **Node.js** + +1. Export these variables: + + ```bash + export REGISTRY={ADDRESS_TO_REGISTRY} + export TOKEN={TOKEN_TO_REGISTRY} + export NAMESPACE={FUNCTION_NAMESPACE} + ``` + +2. Create a Secret: + + ```bash + cat < + +### Test the Package Registry Switch + +[Create a Function](01-10-create-inline-function.md) with dependencies from the external registry. Check if your Function was created and all conditions are set to `True`: + +```bash +kubectl get functions -n $NAMESPACE +``` + +You should get a result similar to the this example: + +```bash +NAME CONFIGURED BUILT RUNNING RUNTIME VERSION AGE +test-function True True True nodejs20 1 96s +``` + +> [!WARNING] +> If you want to create a cluster-wide Secret, you must create it in the `kyma-system` namespace and add the `serverless.kyma-project.io/config: credentials` label. diff --git a/docs/user/tutorials/01-90-set-asynchronous-connection.md b/docs/user/tutorials/01-90-set-asynchronous-connection.md new file mode 100644 index 00000000..840d2712 --- /dev/null +++ b/docs/user/tutorials/01-90-set-asynchronous-connection.md @@ -0,0 +1,146 @@ +# Set Asynchronous Communication Between Functions + +This tutorial demonstrates how to connect two Functions asynchronously. It is based on the [in-cluster Eventing example](https://github.com/kyma-project/serverless/tree/main/examples/incluster_eventing). + +The example provides a very simple scenario of asynchronous communication between two Functions. The first Function accepts the incoming traffic via HTTP, sanitizes the payload, and publishes the content as an in-cluster event using [Kyma Eventing](https://kyma-project.io/docs/kyma/latest/01-overview/eventing/). +The second Function is a message receiver. It subscribes to the given event type and stores the payload. + +This tutorial shows only one possible use case. There are many more use cases on how to orchestrate your application logic into specialized Functions and benefit from decoupled, re-usable components and event-driven architecture. + +## Prerequisites + +- [Kyma CLI](https://github.com/kyma-project/cli) +- [Eventing and Istio components installed](https://kyma-project.io/docs/kyma/latest/04-operation-guides/operations/02-install-kyma/#install-specific-components) + +## Steps + +1. Export the `KUBECONFIG` variable: + + ```bash + export KUBECONFIG={KUBECONFIG_PATH} + ``` + +2. Create the `emitter` and `receiver` folders in your project. + +### Create the Emitter Function + +1. Go to the `emitter` folder and run Kyma CLI `init` command to initialize the scaffold for your first Function: + + ```bash + kyma init function + ``` + + The `init` command creates these files in your workspace folder: + + - `config.yaml` with the Function's configuration + + > [!NOTE] + > See the detailed description of all fields available in the [`config.yaml` file](../technical-reference/07-60-function-configuration-file.md). + + - `handler.js` with the Function's code and the simple "Hello Serverless" logic + + - `package.json` with the Function's dependencies + +2. In the `config.yaml` file, configure an APIRule to expose your Function to the incoming traffic over HTTP. Provide the subdomain name in the `host` property: + + ```yaml + apiRules: + - name: incoming-http-trigger + service: + host: incoming + rules: + - methods: + - GET + accessStrategies: + - handler: allow + ``` + +3. Provide your Function logic in the `handler.js` file: + + > [!NOTE] + > In this example, there's no sanitization logic. The `sanitize` Function is just a placeholder. + + ```js + module.exports = { + main: async function (event, context) { + let sanitisedData = sanitise(event.data) + + const eventType = "sap.kyma.custom.acme.payload.sanitised.v1"; + const eventSource = "kyma"; + + return await event.emitCloudEvent(eventType, eventSource, sanitisedData) + .then(resp => { + return "Event sent"; + }).catch(err=> { + console.error(err) + return err; + }); + } + } + let sanitise = (data)=>{ + console.log(`sanitising data...`) + console.log(data) + return data + } + ``` + + The `sap.kyma.custom.acme.payload.sanitised.v1` is a sample event type that the emitter Function declares when publishing events. You can choose a different one that better suits your use case. Keep in mind the constraints described on the [Event names](https://kyma-project.io/docs/kyma/latest/05-technical-reference/evnt-01-event-names/) page. The receiver subscribes to the event type to consume the events. + + The event object provides convenience functions to build and publish events. To send the event, build the Cloud Event. To learn more, read [Function's specification](../technical-reference/07-70-function-specification.md#event-object-sdk). In addition, your **eventOut.source** key must point to `“kyma”` to use Kyma in-cluster Eventing. + There is a `require('axios')` line even though the Function code is not using it directly. This is needed for the auto-instrumentation to properly handle the outgoing requests sent using the `publishCloudEvent` method (which uses `axios` library under the hood). Without the `axios` import the Function still works, but the published events are not reflected in the trace backend. + +4. Apply your emitter Function: + + ```bash + kyma apply function + ``` + + Your Function is now built and deployed in Kyma runtime. Kyma exposes it through the APIRule. The incoming payloads are processed by your emitter Function. It then sends the sanitized content to the workload that subscribes to the selected event type. In our case, it's the receiver Function. + +5. Test the first Function. Send the payload and see if your HTTP traffic is accepted: + + ```bash + export KYMA_DOMAIN={KYMA_DOMAIN_VARIABLE} + + curl -X POST https://incoming.${KYMA_DOMAIN} -H 'Content-Type: application/json' -d '{"foo":"bar"}' + ``` + +### Create the Receiver Function + +1. Go to your `receiver` folder and run Kyma CLI `init` command to initialize the scaffold for your second Function: + + ```bash + kyma init function + ``` + + The `init` command creates the same files as in the `emitter` folder. + +2. In the `config.yaml` file, configure event types your Function will subscribe to: + + ```yaml + name: event-receiver + namespace: default + runtime: nodejs20 + source: + sourceType: inline + subscriptions: + - name: event-receiver + typeMatching: exact + source: "" + types: + - sap.kyma.custom.acme.payload.sanitised.v1 + schemaVersion: v1 + ``` + +3. Apply your receiver Function: + + ```bash + kyma apply function + ``` + + The Function is configured, built, and deployed in Kyma runtime. The Subscription becomes active and all events with the selected type are processed by the Function. + +### Test the Whole Setup + +Send a payload to the first Function. For example, use the POST request mentioned above. As the Functions are joined by the in-cluster Eventing, the payload is processed in sequence by both of your Functions. +In the Function's logs, you can see that both sanitization logic (using the first Function) and the storing logic (using the second Function) are executed. diff --git a/docs/user/tutorials/README.md b/docs/user/tutorials/README.md new file mode 100644 index 00000000..9edc9149 --- /dev/null +++ b/docs/user/tutorials/README.md @@ -0,0 +1,3 @@ +# Tutorials + +This section will help you understand how the Serverless Function works and how to use it in different scenarios. You can also learn how to set and switch a Docker registry. diff --git a/go.mod b/go.mod new file mode 100644 index 00000000..5d6848ec --- /dev/null +++ b/go.mod @@ -0,0 +1,164 @@ +module github.com/kyma-project/docker-registry + +go 1.21 + +toolchain go1.21.3 + +require ( + github.com/onsi/ginkgo/v2 v2.17.1 + github.com/onsi/gomega v1.32.0 + github.com/pkg/errors v0.9.1 + github.com/stretchr/testify v1.9.0 + github.com/vrischmann/envconfig v1.3.0 + go.uber.org/zap v1.27.0 + gopkg.in/yaml.v3 v3.0.1 + helm.sh/helm/v3 v3.14.3 + k8s.io/api v0.29.4 + k8s.io/apiextensions-apiserver v0.29.4 + k8s.io/apimachinery v0.29.4 + k8s.io/client-go v0.29.4 + k8s.io/utils v0.0.0-20230726121419-3b25d923346b + sigs.k8s.io/controller-runtime v0.17.3 +) + +require ( + github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/BurntSushi/toml v1.3.2 // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.2.1 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/Masterminds/squirrel v1.5.4 // indirect + github.com/Microsoft/hcsshim v0.11.4 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/chai2010/gettext-go v1.0.2 // indirect + github.com/containerd/containerd v1.7.12 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/docker/cli v24.0.6+incompatible // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect + github.com/docker/docker v24.0.7+incompatible // indirect + github.com/docker/docker-credential-helpers v0.7.0 // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-metrics v0.0.1 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch v5.7.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.8.0 // indirect + github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect + github.com/fatih/color v1.13.0 // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-errors/errors v1.4.2 // indirect + github.com/go-gorp/gorp/v3 v3.1.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/btree v1.0.1 // indirect + github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/mux v1.8.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/gosuri/uitable v0.0.4 // indirect + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/huandu/xstrings v1.4.0 // indirect + github.com/imdario/mergo v0.3.13 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jmoiron/sqlx v1.3.5 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.16.0 // indirect + github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect + github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/locker v1.0.1 // indirect + github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc5 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.19.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.48.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/rubenv/sql-migrate v1.5.2 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/shopspring/decimal v1.3.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spf13/cast v1.5.0 // indirect + github.com/spf13/cobra v1.8.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect + github.com/xlab/treeprint v1.2.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect + go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.22.0 // indirect + golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/oauth2 v0.16.0 // indirect + golang.org/x/sync v0.6.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.17.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/grpc v1.58.3 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + k8s.io/apiserver v0.29.4 // indirect + k8s.io/cli-runtime v0.29.4 // indirect + k8s.io/component-base v0.29.4 // indirect + k8s.io/klog/v2 v2.110.1 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/kubectl v0.29.0 // indirect + oras.land/oras-go v1.2.4 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect + sigs.k8s.io/kustomize/kyaml v0.16.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 00000000..a6ad8721 --- /dev/null +++ b/go.sum @@ -0,0 +1,602 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= +github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= +github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8= +github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= +github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= +github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0= +github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk= +github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM= +github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 h1:aBfCb7iqHmDEIp6fBvC/hQUddQfg+3qdYjwzaiP9Hnc= +github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2/go.mod h1:WHNsWjnIn2V1LYOrME7e8KxSeKunYHsxEm4am0BUtcI= +github.com/docker/cli v24.0.6+incompatible h1:fF+XCQCgJjjQNIMjzaSmiKJSCcfcXb3TWTcc7GAneOY= +github.com/docker/cli v24.0.6+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= +github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= +github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= +github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= +github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= +github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= +github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/gobuffalo/logger v1.0.6 h1:nnZNpxYo0zx+Aj9RfMPBm+x9zAU2OayFh/xrAWi34HU= +github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs= +github.com/gobuffalo/packd v1.0.1 h1:U2wXfRr4E9DH8IdsDLlRFwTZTK7hLfq9qT/QHXGVe/0= +github.com/gobuffalo/packd v1.0.1/go.mod h1:PP2POP3p3RXGz7Jh6eYEf93S7vA2za6xM7QT85L4+VY= +github.com/gobuffalo/packr/v2 v2.8.3 h1:xE1yzvnO56cUC0sTpKR3DIbxZgB54AftTFMhB2XEWlY= +github.com/gobuffalo/packr/v2 v2.8.3/go.mod h1:0SahksCVcx4IMnigTjiFuyldmTrdTctXsOdiU5KwbKc= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= +github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= +github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= +github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= +github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= +github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= +github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= +github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/markbates/errx v1.1.0 h1:QDFeR+UP95dO12JgW+tgi2UVfo0V8YBHiUIOaeBPiEI= +github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc= +github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY= +github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= +github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= +github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.1.25 h1:dFwPR6SfLtrSwgDcIq2bcU/gVutB4sNApq2HBdqcakg= +github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= +github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= +github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= +github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rubenv/sql-migrate v1.5.2 h1:bMDqOnrJVV/6JQgQ/MxOpU+AdO8uzYYA/TxFUBzFtS0= +github.com/rubenv/sql-migrate v1.5.2/go.mod h1:H38GW8Vqf8F0Su5XignRyaRcbXbJunSWxs+kmzlg0Is= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/vrischmann/envconfig v1.3.0 h1:4XIvQTXznxmWMnjouj0ST5lFo/WAYf5Exgl3x82crEk= +github.com/vrischmann/envconfig v1.3.0/go.mod h1:bbvxFYJdRSpXrhS63mBFtKJzkDiNkyArOLXtY6q0kuI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= +golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= +helm.sh/helm/v3 v3.14.3 h1:HmvRJlwyyt9HjgmAuxHbHv3PhMz9ir/XNWHyXfmnOP4= +helm.sh/helm/v3 v3.14.3/go.mod h1:v6myVbyseSBJTzhmeE39UcPLNv6cQK6qss3dvgAySaE= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.29.4 h1:WEnF/XdxuCxdG3ayHNRR8yH3cI1B/llkWBma6bq4R3w= +k8s.io/api v0.29.4/go.mod h1:DetSv0t4FBTcEpfA84NJV3g9a7+rSzlUHk5ADAYHUv0= +k8s.io/apiextensions-apiserver v0.29.4 h1:M7hbuHU/ckbibR7yPbe6DyNWgTFKNmZDbdZKD8q1Smk= +k8s.io/apiextensions-apiserver v0.29.4/go.mod h1:TTDC9fB+0kHY2rogf5hgBR03KBKCwED+GHUsXGpR7SM= +k8s.io/apimachinery v0.29.4 h1:RaFdJiDmuKs/8cm1M6Dh1Kvyh59YQFDcFuFTSmXes6Q= +k8s.io/apimachinery v0.29.4/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= +k8s.io/apiserver v0.29.4 h1:wPwGOO58GQOpRiZu59P5eRoDcB7QtV+QBglkRiXwCiM= +k8s.io/apiserver v0.29.4/go.mod h1:VqTF9t98HVfhKZVRohCPezsdUt9u2g3bHKftxGcXoRo= +k8s.io/cli-runtime v0.29.4 h1:QvUrddBxVX6XFJ6z64cGpEk7e4bQduKweqbqq+qBd9g= +k8s.io/cli-runtime v0.29.4/go.mod h1:NmklYuZ4DLfOr2XEIT8Nzl883KMZUCv7KMj3wMHayCA= +k8s.io/client-go v0.29.4 h1:79ytIedxVfyXV8rpH3jCBW0u+un0fxHDwX5F9K8dPR8= +k8s.io/client-go v0.29.4/go.mod h1:kC1thZQ4zQWYwldsfI088BbK6RkxK+aF5ebV8y9Q4tk= +k8s.io/component-base v0.29.4 h1:xeKzuuHI/1tjleu5jycDAcYbhAxeGHCQBZUY2eRIkOo= +k8s.io/component-base v0.29.4/go.mod h1:pYjt+oEZP9gtmwSikwAJgfSBikqKX2gOqRat0QjmQt0= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kubectl v0.29.0 h1:Oqi48gXjikDhrBF67AYuZRTcJV4lg2l42GmvsP7FmYI= +k8s.io/kubectl v0.29.0/go.mod h1:0jMjGWIcMIQzmUaMgAzhSELv5WtHo2a8pq67DtviAJs= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +oras.land/oras-go v1.2.4 h1:djpBY2/2Cs1PV87GSJlxv4voajVOMZxqqtq9AB8YNvY= +oras.land/oras-go v1.2.4/go.mod h1:DYcGfb3YF1nKjcezfX2SNlDAeQFKSXmf+qrFmrh4324= +sigs.k8s.io/controller-runtime v0.17.3 h1:65QmN7r3FWgTxDMz9fvGnO1kbf2nu+acg9p2R9oYYYk= +sigs.k8s.io/controller-runtime v0.17.3/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= +sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= +sigs.k8s.io/kustomize/kyaml v0.16.0 h1:6J33uKSoATlKZH16unr2XOhDI+otoe2sR3M8PDzW3K0= +sigs.k8s.io/kustomize/kyaml v0.16.0/go.mod h1:xOK/7i+vmE14N2FdFyugIshB8eF6ALpy7jI87Q2nRh4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/Makefile b/hack/Makefile new file mode 100644 index 00000000..078eba60 --- /dev/null +++ b/hack/Makefile @@ -0,0 +1,7 @@ +# This Makefile is used to add context to the `gardener.mk` file by adding the PROJECT_ROOT variable +# this is needed to use targets in the gardener.mk file from a prompt (e.g. `make -C hack provision-gardener`) + +PROJECT_ROOT=.. + +include ${PROJECT_ROOT}/hack/help.mk +include ${PROJECT_ROOT}/hack/gardener.mk diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt new file mode 100755 index 00000000..29c55ecd --- /dev/null +++ b/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ \ No newline at end of file diff --git a/hack/gardener.mk b/hack/gardener.mk new file mode 100644 index 00000000..54522978 --- /dev/null +++ b/hack/gardener.mk @@ -0,0 +1,27 @@ +ifndef PROJECT_ROOT +$(error PROJECT_ROOT is undefined) +endif +include ${PROJECT_ROOT}/hack/tools.mk + +##@ Gardener + +GARDENER_INFRASTRUCTURE = az +HIBERNATION_HOUR=$(shell echo $$(( ( $(shell date +%H | sed s/^0//g) + 5 ) % 24 ))) +GIT_COMMIT_SHA=$(shell git rev-parse --short=8 HEAD) +ifneq (,$(GARDENER_SA_PATH)) +GARDENER_K8S_VERSION=$(shell kubectl --kubeconfig=${GARDENER_SA_PATH} get cloudprofiles.core.gardener.cloud ${GARDENER_INFRASTRUCTURE} -o=jsonpath='{.spec.kubernetes.versions[0].version}') +else +GARDENER_K8S_VERSION=1.27.4 +endif +#Overwrite default kyma cli gardenlinux version because it's not supported. +GARDENER_LINUX_VERSION=1312.3.0 + +.PHONY: provision-gardener +provision-gardener: kyma ## Provision gardener cluster with latest k8s version + ${KYMA} provision gardener ${GARDENER_INFRASTRUCTURE} -c ${GARDENER_SA_PATH} -n test-${GIT_COMMIT_SHA} -p ${GARDENER_PROJECT} -s ${GARDENER_SECRET_NAME} -k ${GARDENER_K8S_VERSION}\ + --gardenlinux-version=$(GARDENER_LINUX_VERSION) --hibernation-start="00 ${HIBERNATION_HOUR} * * ?" + +.PHONY: deprovision-gardener +deprovision-gardener: kyma ## Deprovision gardener cluster + kubectl --kubeconfig=${GARDENER_SA_PATH} annotate shoot test-${GIT_COMMIT_SHA} confirmation.gardener.cloud/deletion=true + kubectl --kubeconfig=${GARDENER_SA_PATH} delete shoot test-${GIT_COMMIT_SHA} --wait=false diff --git a/hack/get_kyma_file_name.sh b/hack/get_kyma_file_name.sh new file mode 100755 index 00000000..f6d792c8 --- /dev/null +++ b/hack/get_kyma_file_name.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +function get_kyma_file_name () { + + local _OS_TYPE=$1 + local _OS_ARCH=$2 + + [ "$_OS_TYPE" == "Linux" ] && [ "$_OS_ARCH" == "x86_64" ] && echo "kyma-linux" || + [ "$_OS_TYPE" == "Linux" ] && [ "$_OS_ARCH" == "arm64" ] && echo "kyma-linux-arm" || + [ "$_OS_TYPE" == "Windows" ] && [ "$_OS_ARCH" == "x86_64" ] && echo "kyma.exe" || + [ "$_OS_TYPE" == "Windows" ] && [ "$_OS_ARCH" == "arm64" ] && echo "kyma-arm.exe" || + [ "$_OS_TYPE" == "Darwin" ] && [ "$_OS_ARCH" == "x86_64" ] && echo "kyma-darwin" || + [ "$_OS_TYPE" == "Darwin" ] && [ "$_OS_ARCH" == "arm64" ] && echo "kyma-darwin-arm" +} + +get_kyma_file_name "$@" diff --git a/hack/help.mk b/hack/help.mk new file mode 100644 index 00000000..a7a9d8f5 --- /dev/null +++ b/hack/help.mk @@ -0,0 +1,5 @@ +##@ General +.DEFAULT_GOAL=help +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) diff --git a/hack/k3d.mk b/hack/k3d.mk new file mode 100644 index 00000000..7eab3ad0 --- /dev/null +++ b/hack/k3d.mk @@ -0,0 +1,26 @@ +CLUSTER_NAME ?= kyma +REGISTRY_PORT ?= 5001 +REGISTRY_NAME ?= ${CLUSTER_NAME}-registry + +ifndef PROJECT_ROOT +$(error PROJECT_ROOT is undefined) +endif +include $(PROJECT_ROOT)/hack/tools.mk + +##@ K3D + +.PHONY: create-k3d +create-k3d: kyma ## Create k3d with kyma CRDs. + ${KYMA} provision k3d --registry-port ${REGISTRY_PORT} --name ${CLUSTER_NAME} --ci -p 6080:8080@loadbalancer -p 6433:8433@loadbalancer + kubectl create namespace kyma-system + +.PHONY: delete-k3d +delete-k3d: delete-k3d-cluster delete-k3d-registry ## Delete k3d registry & cluster. + +.PHONY: delete-k3d-registry +delete-k3d-registry: ## Delete k3d kyma registry. + -k3d registry delete ${REGISTRY_NAME} + +.PHONY: delete-k3d-cluster +delete-k3d-cluster: ## Delete k3d kyma cluster. + -k3d cluster delete ${CLUSTER_NAME} diff --git a/hack/makefile-strategy.md b/hack/makefile-strategy.md new file mode 100644 index 00000000..4560b4d0 --- /dev/null +++ b/hack/makefile-strategy.md @@ -0,0 +1,40 @@ +# Makefile architecture + +The goal is to develop a way to extend Makefile targets in the most readable way, without keeping all targets in one file. + +Pros of the architecture: + +* targets are well organized +* single responsibility +* extensibility + +## Dependencies description +* `Makefile` - The main makefile that allows for installing and running the Serverless Operator. It's a high-level target to run the module without knowing its internals. It's the first contact point for the Serverless module users. +* `hack/Makefile` - High-level API that contains all targets that may be used by any CI/CD system. It has dependencies on the `hack/*.mk` makefiles. +* `hack/*.mk` - Contains common targets that may be used by other makefiles (they are included and shouldn't be run directly). Targets are grouped by functionality. They should contain helpers' targets. +* `components/operator/Makefile` - Contains all basic operations on Serverless Operator like builds, tests, etc., used during development. It's also used by `Makefile`. +* `components/serverless/Makefile` - Contains all basic operations on Serverless like builds, tests, etc., used during development. + +## Good practices + +Every makefile (`Makefile` and `*.mk`) must contain a few pieces, making the file more useful and human-readable: + +* include `hack/help.mk` - this file provides the `help` target describing what is inside `Makefile` and what we can do with it. +* before `include` you must define the `PROJECT_ROOT` environment variable pointing to the project root directory. + +Additionally, `Makefile` (but not `*.mk`) can also contain the following: + +* Description - helps understand what the target does and shows it in the help. (`## description` after target name). +* Sections - allows for separations of targets based on their destination. (`##@`). + +Example of target that includes all good practices: + +```Makefile +PROJECT_ROOT=. +include ${PROJECT_ROOT}/hack/help.mk + +##@ General + +.PHONY: run +run: create-k3d install-serverless-main ## Create k3d cluster and install serverless from main +``` \ No newline at end of file diff --git a/hack/tools.mk b/hack/tools.mk new file mode 100644 index 00000000..751d843f --- /dev/null +++ b/hack/tools.mk @@ -0,0 +1,77 @@ +## Location to install dependencies to +ifndef PROJECT_ROOT +$(error PROJECT_ROOT is undefined) +endif +LOCALBIN ?= $(realpath $(PROJECT_ROOT))/bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) + +# Operating system architecture +OS_ARCH=$(shell uname -m) +# Operating system type +OS_TYPE=$(shell uname) + +##@ Tools + +########## Kyma CLI ########### +KYMA_STABILITY ?= unstable + +define os_error +$(error Error: unsuported platform OS_TYPE:$1, OS_ARCH:$2; to mitigate this problem set variable KYMA with absolute path to kyma-cli binary compatible with your operating system and architecture) +endef + +KYMA ?= $(LOCALBIN)/kyma-$(KYMA_STABILITY) +kyma: $(LOCALBIN) $(KYMA) ## Download kyma locally if necessary. +$(KYMA): + $(eval KYMA_FILE_NAME=$(shell ${PROJECT_ROOT}/hack/get_kyma_file_name.sh ${OS_TYPE} ${OS_ARCH})) + ## Detect if operating system + $(if $(KYMA_FILE_NAME),,$(call os_error, ${OS_TYPE}, ${OS_ARCH})) + test -f $@ || curl -s -Lo $(KYMA) https://storage.googleapis.com/kyma-cli-$(KYMA_STABILITY)/$(KYMA_FILE_NAME) + chmod +x $(KYMA) + +########## Kustomize ########### +KUSTOMIZE ?= $(LOCALBIN)/kustomize +KUSTOMIZE_VERSION ?= v4.5.5 +KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" + +.PHONY: kustomize +kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. +$(KUSTOMIZE): $(LOCALBIN) + test -s $(LOCALBIN)/kustomize || { curl -Ss $(KUSTOMIZE_INSTALL_SCRIPT) | bash -s -- $(subst v,,$(KUSTOMIZE_VERSION)) $(LOCALBIN); } + +########## Controller-Gen ########### +CONTROLLER_TOOLS_VERSION ?= v0.14.0 +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen + +.PHONY: controller-gen +controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. +$(CONTROLLER_GEN): $(LOCALBIN) + test "$(${LOCALBIN}/controller-gen --version)" = "Version: ${CONTROLLER_TOOLS_VERSION}" || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION) + +########## Envtest ########### +ENVTEST ?= $(LOCALBIN)/setup-envtest +KUBEBUILDER_ASSETS=$(LOCALBIN)/k8s/kubebuilder_assets + +define path_error +$(error Error: path is empty: $1) +endef + +# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. +ENVTEST_K8S_VERSION = 1.27.1 + +.PHONY: envtest +envtest: $(ENVTEST) ## Download envtest-setup locally if necessary. +$(ENVTEST): $(LOCALBIN) + test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest + +# Envtest download binaries to k8s/(K8S_Version)-(arch)-(os) directory which is different on every machine. +# To use the same `envtest` binaries on CI and during local development this target moves it to upfront known directory. +# Additionaly `OS-ARCH` return X86_64, but envtest uses `amd64` name. +.PHONY: kubebuilder-assets +kubebuilder-assets: envtest + $(eval DOWNLOADED_ASSETS=$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)) + $(if $(DOWNLOADED_ASSETS),,$(call path_error, ${DOWNLOADED_ASSETS})) + chmod -R 755 $(DOWNLOADED_ASSETS) + mkdir -p $(LOCALBIN)/k8s/kubebuilder_assets/ + mv $(DOWNLOADED_ASSETS)/* $(LOCALBIN)/k8s/kubebuilder_assets/ + rm -d $(DOWNLOADED_ASSETS) diff --git a/markdown_heading_capitalization.js b/markdown_heading_capitalization.js new file mode 100644 index 00000000..7077d5c1 --- /dev/null +++ b/markdown_heading_capitalization.js @@ -0,0 +1,31 @@ +// This file is used to trigger the custom rule that checks if all markdown headings (words longer than 4 characters) are written in the title case. To run this check, you must include the check in the markdownlint command. +// For example, if you want to run the check on the `docs` folder, run the following command: `markdownlint -r ./markdown_heading_capitalization.js docs/`. +module.exports = [{ + "names": [ "custom/capitalize-headings" ], + "description": "Heading words longer than 4 characters should be capitalized", + "tags": [ "formatting" ], + "function": function rule(params, onError) { + params.tokens.filter(function filterToken(token) { + return token.type === "heading_open"; + }).forEach(function forToken(heading) { + var headingTokenContent = heading.line.trim(); + var wordsInHeading = headingTokenContent.split(' '); + + for (var i = 0; i < wordsInHeading.length; i++) { + if (wordsInHeading[i].length > 4 && wordsInHeading[i] && + wordsInHeading[i].charAt(0) !== wordsInHeading[i].charAt(0).toUpperCase()) { + var capitalizedWord = wordsInHeading[i].charAt(0).toUpperCase() + wordsInHeading[i].slice(1); + var detailMessage = "Change " + "'" + wordsInHeading[i] + "'" + " to " + "'" + capitalizedWord + "'"; + + onError({ + "lineNumber": heading.lineNumber, + "detail": detailMessage, + "context": headingTokenContent, // Show the whole heading as context + "range": [headingTokenContent.indexOf(wordsInHeading[i]), wordsInHeading[i].length] // Underline the word which needs a change + }); + } + } + }); + } + }]; + \ No newline at end of file diff --git a/module-config-template.yaml b/module-config-template.yaml new file mode 100644 index 00000000..96e034e0 --- /dev/null +++ b/module-config-template.yaml @@ -0,0 +1,8 @@ +name: {{.Name}} +channel: {{.Channel}} +version: {{.Version}} +defaultCR: config/samples/default-dockerregistry-cr.yaml +manifest: dockerregistry-operator.yaml +annotations: + "operator.kyma-project.io/doc-url": "https://kyma-project.io/#/docker-registry/user/README" +moduleRepo: https://github.com/kyma-project/docker-registry.git diff --git a/sec-scanners-config.yaml b/sec-scanners-config.yaml new file mode 100644 index 00000000..13036cc1 --- /dev/null +++ b/sec-scanners-config.yaml @@ -0,0 +1,11 @@ +module-name: docker-registry +rc-tag: 1.4.0 +protecode: + - europe-docker.pkg.dev/kyma-project/prod/dockerregistry-operator:main + - europe-docker.pkg.dev/kyma-project/prod/tpi/registry:2.8.1-1ae4c190 +whitesource: + language: golang-mod + subprojects: false + exclude: + - "**/test/**" + - "**/*_test.go"