From 9eddaafb6ebcb52f84fbfa02216c612db68c44ca Mon Sep 17 00:00:00 2001 From: Heitor Tashiro Sergent Date: Thu, 18 Apr 2024 08:56:46 -0500 Subject: [PATCH 01/23] chore: add basic structure for k6-operator docs --- .../v0.50.x/set-up/set-up-distributed-k6/_index.md | 10 ++++++++++ .../set-up-distributed-k6/install-k6-operator.md | 10 ++++++++++ .../set-up/set-up-distributed-k6/troubleshooting.md | 10 ++++++++++ .../set-up-distributed-k6/upgrade-k6-operator.md | 10 ++++++++++ .../set-up/set-up-distributed-k6/usage/_index.md | 10 ++++++++++ .../usage/configure-scripts-in-testrun-crd.md | 10 ++++++++++ .../set-up/set-up-distributed-k6/usage/extensions.md | 10 ++++++++++ .../set-up/set-up-distributed-k6/usage/reference.md | 10 ++++++++++ 8 files changed, 80 insertions(+) create mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/_index.md create mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md create mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md create mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md create mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/_index.md create mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/configure-scripts-in-testrun-crd.md create mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md create mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/reference.md diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/_index.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/_index.md new file mode 100644 index 0000000000..82419e6d32 --- /dev/null +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/_index.md @@ -0,0 +1,10 @@ +--- +weight: 150 +title: Set up distributed k6 +--- + +# Set up distributed k6 + + + +{{< section depth=2 >}} diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md new file mode 100644 index 0000000000..ff5ff606e1 --- /dev/null +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md @@ -0,0 +1,10 @@ +--- +weight: 100 +title: Install k6-operator +--- + +# Install k6-operator + + + +{{< section depth=2 >}} diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md new file mode 100644 index 0000000000..49c0b6169d --- /dev/null +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md @@ -0,0 +1,10 @@ +--- +weight: 400 +title: Troubleshooting +--- + +# Troubleshooting + + + +{{< section depth=2 >}} diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md new file mode 100644 index 0000000000..5d93f6777c --- /dev/null +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md @@ -0,0 +1,10 @@ +--- +weight: 200 +title: Upgrade k6-operator +--- + +# Upgrade k6-operator + + + +{{< section depth=2 >}} diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/_index.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/_index.md new file mode 100644 index 0000000000..d4e5e36a30 --- /dev/null +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/_index.md @@ -0,0 +1,10 @@ +--- +weight: 300 +title: Usage +--- + +# Usage + + + +{{< section depth=2 >}} diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/configure-scripts-in-testrun-crd.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/configure-scripts-in-testrun-crd.md new file mode 100644 index 0000000000..33141304b2 --- /dev/null +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/configure-scripts-in-testrun-crd.md @@ -0,0 +1,10 @@ +--- +weight: 100 +title: Configure scripts in TestRun CRD +--- + +# Configure scripts in TestRun CRD + + + +{{< section depth=2 >}} diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md new file mode 100644 index 0000000000..fa3eb91c6a --- /dev/null +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md @@ -0,0 +1,10 @@ +--- +weight: 200 +title: Extensions +--- + +# Extensions + + + +{{< section depth=2 >}} diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/reference.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/reference.md new file mode 100644 index 0000000000..37b139497a --- /dev/null +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/reference.md @@ -0,0 +1,10 @@ +--- +weight: 300 +title: Reference +--- + +# Reference + + + +{{< section depth=2 >}} From 4ea406dedc033d8fa9d634067cc419d3ba5f0255 Mon Sep 17 00:00:00 2001 From: Heitor Tashiro Sergent Date: Thu, 18 Apr 2024 08:59:04 -0500 Subject: [PATCH 02/23] chore: hide reference for now --- .../v0.50.x/set-up/set-up-distributed-k6/usage/reference.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/reference.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/reference.md index 37b139497a..5ac5f89b85 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/reference.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/reference.md @@ -1,6 +1,8 @@ --- weight: 300 title: Reference +_build: + list: false --- # Reference From 2f1015bca9a22fb4db556c1a5b0d0719425dd83b Mon Sep 17 00:00:00 2001 From: Olha Yevtushenko Date: Thu, 16 May 2024 15:13:35 +0300 Subject: [PATCH 03/23] k6-operator: add main sections from the repo Readme.md --- .../install-k6-operator.md | 83 ++++++- .../usage/common-options.md | 58 +++++ .../usage/configure-scripts-in-testrun-crd.md | 10 - .../executing-k6-scripts-with-testrun-crd.md | 221 ++++++++++++++++++ .../set-up-distributed-k6/usage/extensions.md | 104 ++++++++- .../set-up-distributed-k6/usage/reference.md | 4 +- .../usage/scheduling-tests.md | 108 +++++++++ 7 files changed, 574 insertions(+), 14 deletions(-) create mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/common-options.md delete mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/configure-scripts-in-testrun-crd.md create mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md create mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/scheduling-tests.md diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md index ff5ff606e1..440f3af12a 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md @@ -5,6 +5,87 @@ title: Install k6-operator # Install k6-operator - +## Prerequisites + +The minimal prerequisite for k6-operator is a Kubernetes cluster and access to it with [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl). + +## Deploying the operator + +### Bundle deployment + +The easiest way to install the operator is with bundle: +```bash +curl https://raw.githubusercontent.com/grafana/k6-operator/main/bundle.yaml | kubectl apply -f - +``` + +Bundle includes default manifests for k6-operator, including `k6-operator-system` namespace and k6-operator Deployment with latest tagged Docker image. Customizations can be made on top of this manifest as needs be, e.g. with `kustomize`. + +### Deployment with Helm + +Helm releases of k6-operator are published together with other Grafana Helm charts and can be installed with the following commands: + +```bash +helm repo add grafana https://grafana.github.io/helm-charts +helm repo update +helm install k6-operator grafana/k6-operator +``` + +Passing additional configuration can be done with `values.yaml` (example can be found [here](https://github.com/grafana/k6-operator/blob/main/charts/k6-operator/samples/customAnnotationsAndLabels.yaml)): + +```bash +helm install k6-operator grafana/k6-operator -f values.yaml +``` + +Complete list of options available for Helm can be found [here](https://github.com/grafana/k6-operator/blob/main/charts/k6-operator/README.md). + +### Makefile deployment + +In order to install the operator with Makefile, the following additional tooling must be installed: +- [go](https://go.dev/doc/install) +- [kustomize](https://kubectl.docs.kubernetes.io/installation/kustomize/) + +A more manual, low-level way to install the operator is by running the command below: + +```bash +make deploy +``` + +This method may be more useful for development of k6-operator, depending on specifics of the setup. + +## Installing the CRD + +The k6-operator includes custom resources called `TestRun`, `PrivateLoadZone` and currently also `K6`. These will be automatically installed when you do a deployment or install a bundle, but in case you want to do it yourself, you may run the command below: + +```bash +make install +``` + +{{% admonition type="warning" %}} + +`K6` CRD has been substituted with `TestRun` CRD and will be deprecated in the future. Please use `TestRun` CRD. + +{{% /admonition %}} + +## Namespaced deployment + +By default, k6-operator watches `TestRun` and `PriaveLoadZone` custom resources in all namespaces. But it is possible to configure k6-operator to watch only a specific namespace by setting a `WATCH_NAMESPACE` environment variable for the operator's deployment: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: k6-operator-controller-manager + namespace: k6-operator-system +spec: + template: + spec: + containers: + - name: manager + image: ghcr.io/grafana/k6-operator:controller-v0.0.14 + env: + - name: WATCH_NAMESPACE + value: "some-ns" +# ... +``` {{< section depth=2 >}} diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/common-options.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/common-options.md new file mode 100644 index 0000000000..c03f48eba1 --- /dev/null +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/common-options.md @@ -0,0 +1,58 @@ +--- +weight: 300 +title: Common options +--- + + + +# Common options + +The only options that must be defined as part of `TestRun` CRD spec are `script` and `parallelism`. But there are many others; here are some of the most common. + +## Parallelism + +`parallelism` defines how many instances of k6 runneres you want to create. Each instance will be assigned an equal execution segment. For instance, if your test script is configured to run 200 VUs and `parallelism` is set to 4, the k6-operator will +create four k6 jobs, each running 50 VUs to achieve the desired VU count. + +## Separate + +`separate: true` indicates that the jobs created need to be distributed across different nodes. This is useful if you're running a +test with a really high VU count and want to make sure the resources of each node won't become a bottleneck. + +## Service account + +If you want to use a custom Service Account you'll need to pass it into both the starter and runner object: + +```yaml +apiVersion: k6.io/v1alpha1 +kind: TestRun +metadata: + name: +spec: + script: + configMap: + name: "" + runner: + serviceAccountName: + starter: + serviceAccountName: +``` + +## Runner + +Defines options for the test runner pods. The non-exhaustive list includes: + +* passing resource limits and requests +* passing in labels and annotations +* passing in affinity and anti-affinity +* passing in a custom image + +## Starter + +Defines options for the starter pod. The non-exhaustive list includes: + +* passing in custom image +* passing in labels and annotations + + +{{< section depth=2 >}} diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/configure-scripts-in-testrun-crd.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/configure-scripts-in-testrun-crd.md deleted file mode 100644 index 33141304b2..0000000000 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/configure-scripts-in-testrun-crd.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -weight: 100 -title: Configure scripts in TestRun CRD ---- - -# Configure scripts in TestRun CRD - - - -{{< section depth=2 >}} diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md new file mode 100644 index 0000000000..d7304cf1d9 --- /dev/null +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md @@ -0,0 +1,221 @@ +--- +weight: 100 +title: Executing k6 scripts with TestRun CRD +--- + +# Executing k6 scripts with TestRun CRD + +## Defining test scripts + +There are several ways to configure scripts in `TestRun` CRD. +#The operator utilises `ConfigMap`s and `LocalFile` to serve test scripts to the jobs. To upload your own test script, run the following command to configure through `ConfigMap`: + +### ConfigMap + +The main way to configure script is to create a ConfigMap with the script contents: + +```bash +kubectl create configmap my-test --from-file /path/to/my/test.js +``` + +Then specify it in `TestRun`: + +```bash + script: + configMap: + name: my-test + file: test.js +``` + +{{% admonition type="note" %}} + +There is a character limit of 1048576 bytes to a single configmap. If you need to have a larger test file, you'll need to use a volumeClaim or a localFile instead + +{{% /admonition %}} + +### VolumeClaim + +If you have a PVC with the name `stress-test-volumeClaim` containing your script and any other supporting file(s), you can pass it to the test like this: + +```yaml +spec: + script: + volumeClaim: + name: "stress-test-volumeClaim" + # test.js should exist inside /test/ folder. + # All the js files and directories test.js is importing + # should be inside the same directory as well. + file: "test.js" +``` + +The pods will expect to find script files in `/test/` folder. If `volumeClaim` fails, it's the first place to check: the latest initializer pod does not generate any logs and when it can't find the file, it will terminate with error. So missing file may not be that obvious and it makes sense to check it is present manually. See [GH issue](https://github.com/k6-operator/issues/143) for potential improvements. + +#### Sample directory structure + +``` +├── test +│ ├── requests +│ │ ├── stress-test.js +│ ├── test.js +``` + +In the above example, `test.js` imports a function from `stress-test.js` and these files would look like this: + +```js +// test.js +import stressTest from "./requests/stress-test.js"; + +export const options = { + vus: 50, + duration: '10s' +}; + +export default function () { + stressTest(); +} +``` + +```js +// stress-test.js +import { sleep, check } from 'k6'; +import http from 'k6/http'; + + +export default () => { + const res = http.get('https://test-api.k6.io'); + check(res, { + 'status is 200': () => res.status === 200, + }); + sleep(1); +}; +``` + +### LocalFile + +If the script is present in the filesystem of custom runner image, it can be accessed with `localFile` option: + +```yaml +spec: + parallelism: 4 + script: + localFile: /test/test.js + runner: + image: +``` + +{{% admonition type="note" %}} + +If there is any limitation on usage of `volumeClaim` in your cluster you can use the `localFile` option, but usage of `volumeClaim` is recommneded. + +{{% /admonition %}} + + +### Multi-file tests + +In case your k6 script is split between more than one JS file, you can simply create a ConfigMap with several data entries like this: + +```bash +kubectl create configmap scenarios-test --from-file test.js --from-file utils.js +``` + +If there are too many files to specify manually, kubectl with folder might be an option as well: +```bash +kubectl create configmap scenarios-test --from-file=./test +``` + +Alternatively, you can create an archive with k6: +```bash +k6 archive test.js [args] +``` + +The above command will create an `archive.tar` in your current folder, unless `-O` option is used to change the name of the output archive. Then it is possible to put that archive into configmap similarly to JS script: +```bash +kubectl create configmap scenarios-test --from-file=archive.tar +``` + +In case of using an archive it must correctly set in your yaml for `TestRun` deployment: + +```yaml +# ... +spec: + script: + configMap: + name: "crocodile-stress-test" + file: "archive.tar" # <-- change here +``` + +In other words, `file` option must be the correct entrypoint for `k6 run` command. + + +## Executing tests + +Tests are executed by applying the custom resource `TestRun` to a cluster where the k6-operator is running. Additional optional properties of `TestRun` CRD allow you to control some key aspects of a distributed execution. For example: + +```yaml +# k6-resource.yml + +apiVersion: k6.io/v1alpha1 +kind: TestRun +metadata: + name: k6-sample +spec: + parallelism: 4 + script: + configMap: + name: k6-test + file: test.js + separate: false + runner: + image: + metadata: + labels: + cool-label: foo + annotations: + cool-annotation: bar + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + runAsNonRoot: true + resources: + limits: + cpu: 200m + memory: 1000Mi + requests: + cpu: 100m + memory: 500Mi + starter: + image: + metadata: + labels: + cool-label: foo + annotations: + cool-annotation: bar + securityContext: + runAsUser: 2000 + runAsGroup: 2000 + runAsNonRoot: true +``` + +`TestRun` CR is created with this command: + +```bash +kubectl apply -f /path/to/your/k6-resource.yml +``` + +## Cleaning up resources + +After completing a test run, you need to clean up the test jobs created. Manually this can be done by running the following command: + +```bash +kubectl delete -f /path/to/your/k6-resource.yml +``` + +Alternatively, automatic deletion of all resources can be configured with `cleanup` option: +```yaml +spec: + cleanup: "post" +``` + +With `cleanup` option set, k6-operator will remove `TestRun` CRD and all created resources once the test run is finished. + +{{< section depth=2 >}} diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md index fa3eb91c6a..50edd0d8b2 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md @@ -5,6 +5,108 @@ title: Extensions # Extensions - +By default, the operator will use `grafana/k6:latest` as the container image for the test jobs. +If you want to use [extensions](https://grafana.com/docs/k6//extensions/) built with [xk6](https://github.com/grafana/xk6) you'll need to create your own image and override the `image` property on the `TestRun` Kubernetes resource. + +For example, create a `Dockerfile` with the following content: + +```Dockerfile +# Build the k6 binary with the extension +FROM golang:1.20 as builder + +RUN go install go.k6.io/xk6/cmd/xk6@latest +# For our example, we'll add support for output of test metrics to InfluxDB v2. +# Feel free to add other extensions using the '--with ...'. +RUN xk6 build \ + --with github.com/grafana/xk6-output-influxdb@latest \ + --output /k6 + +# Use the operator's base image and override the k6 binary +FROM grafana/k6:latest +COPY --from=builder /k6 /usr/bin/k6 +``` + +Build the image based on this `Dockerfile` by executing: +```bash +docker build -t k6-extended:local . +``` + +Once the build is completed, push the resulting `k6-extended:local` image to an image repository accessible to your Kubernetes cluster. +We can now use it as follows: + +```yaml +# k6-resource-with-extensions.yml + +apiVersion: k6.io/v1alpha1 +kind: TestRun +metadata: + name: k6-sample-with-extensions +spec: + parallelism: 4 + script: + configMap: + name: my-stress-test + file: test.js + runner: + image: k6-extended:local + env: + - name: K6_OUT + value: xk6-influxdb=http://influxdb.somewhere:8086/demo +``` + +Note that we are overriding the default image with `k6-extended:latest`, providing the test runner with environment variables used by our included extensions. + +## k6 Cloud output + +k6 supports [output to its Cloud](https://grafana.com/docs/k6//results-output/real-time/cloud) with `k6 run --out cloud script.js` command. This feature is available in k6-operator as well for subscribed users. Note that it supports only `parallelism: 20` or less. + +To use this option in k6-operator, set the argument in yaml: + +```yaml +# ... + script: + configMap: + name: "" + arguments: --out cloud +# ... +``` + +Then, if you installed operator with bundle or Helm, create a secret with the following command: + +```bash +kubectl -n k6-operator-system create secret generic my-cloud-token \ + --from-literal=token= && kubectl -n k6-operator-system label secret my-cloud-token "k6cloud=token" +``` + +Alternatively, if you installed operator with Makefile, you can uncomment cloud output section in `config/default/kustomization.yaml` and copy your token from the Cloud there: + +```yaml +# Uncomment this section if you need cloud output and copy-paste your token +secretGenerator: +- name: cloud-token + literals: + - token= + options: + annotations: + kubernetes.io/service-account.name: k6-operator-controller + labels: + k6cloud: token +``` + +And re-run `make deploy`. + +This is sufficient to run k6 with the Cloud output and default values of `projectID` and `name`. For non-default values, extended script options can be used like this: + +```js +export let options = { + // ... + ext: { + loadimpact: { + name: 'Configured k6-operator test', + projectID: 1234567, + } + } +}; +``` {{< section depth=2 >}} diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/reference.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/reference.md index 5ac5f89b85..f6f8b6d06f 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/reference.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/reference.md @@ -1,5 +1,5 @@ --- -weight: 300 +weight: 500 title: Reference _build: list: false @@ -7,6 +7,6 @@ _build: # Reference - + {{< section depth=2 >}} diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/scheduling-tests.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/scheduling-tests.md new file mode 100644 index 0000000000..6a8bdf3f2b --- /dev/null +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/scheduling-tests.md @@ -0,0 +1,108 @@ +--- +weight: 400 +title: Scheduling tests +--- + +# Scheduling Tests + +While the k6-operator doesn't support scheduling k6 tests directly, one can schedule tests with the `CronJob` object from Kubernetes directly. The `CronJob` would run on a schedule and execute creation and deletion of `TestRun` object. + +Running these tests requires a little more setup than standalone test run. + +## Create a ConfigMap with k6 scripts + +This step is described in [Executing k6 script with `TestRun` CRD](https://grafana.com/docs/k6//set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd/). + + +## Create a ConfigMap of the yaml for the `TestRun` job + + +When using `make deploy` installation method, add a `configMapGenerator` to the `kustomization.yaml`: + +```yaml +configMapGenerator: + - name: -config + files: + - .yaml +``` + +## Create a `ServiceAccount` for the `CronJob` + +For the `CronJob` to be able to create and delete `TestRun` objects, create a service account: + +```yaml +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: k6- +rules: + - apiGroups: + - k6.io + resources: + - testruns + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: k6- +roleRef: + kind: Role + name: k6- + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: k6- + namespace: +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: k6- +``` + +## Create a `CronJob` + +A `CronJob` can be defined in the following way: + +```yaml +# snapshotter.yml +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: -cron +spec: + schedule: "" + concurrencyPolicy: Forbid + jobTemplate: + spec: + template: + spec: + serviceAccount: k6 + containers: + - name: kubectl + image: bitnami/kubectl + volumeMounts: + - name: k6-yaml + mountPath: /tmp/ + command: + - /bin/bash + args: + - -c + - "kubectl delete -f /tmp/.yaml; kubectl apply -f /tmp/.yaml" + restartPolicy: OnFailure + volumes: + - name: k6-yaml + configMap: + name: -config +``` + +{{< section depth=2 >}} \ No newline at end of file From 07313803606e10066c8032997073c2c43ffad19a Mon Sep 17 00:00:00 2001 From: Heitor Tashiro Sergent Date: Mon, 3 Jun 2024 16:15:29 -0500 Subject: [PATCH 04/23] Add a short introduction to the index page --- .../v0.50.x/set-up/set-up-distributed-k6/_index.md | 11 ++++++++++- .../set-up/set-up-distributed-k6/usage/_index.md | 2 +- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/_index.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/_index.md index 82419e6d32..2b038b5e2d 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/_index.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/_index.md @@ -5,6 +5,15 @@ title: Set up distributed k6 # Set up distributed k6 - +It's possible to run large load tests even when using a single node, or single machine. But, depending on your use case, you might also want to run a distributed Grafana k6 test in your own infrastructure. + +A couple of reasons why you might want to do this: + +- You run your application in Kubernetes and would like k6 to be executed in the same fashion as all your other infrastructure components. +- You want to run your tests within your private network for security and/or privacy reasons. + +[k6 operator](https://github.com/grafana/k6-operator) is a Kubernetes operator that you can use to run distributed k6 tests in your cluster. + +This section includes the following topics: {{< section depth=2 >}} diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/_index.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/_index.md index d4e5e36a30..48ddb3b67c 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/_index.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/_index.md @@ -5,6 +5,6 @@ title: Usage # Usage - +This section includes the following topics: {{< section depth=2 >}} From 835339fa8259eefc1479ab031b17d5a3272f2b98 Mon Sep 17 00:00:00 2001 From: Heitor Tashiro Sergent Date: Mon, 3 Jun 2024 16:38:40 -0500 Subject: [PATCH 05/23] Update install-k6-operator.md --- .../install-k6-operator.md | 55 +++++++++++-------- 1 file changed, 32 insertions(+), 23 deletions(-) diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md index 440f3af12a..1d57df98a0 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md @@ -5,24 +5,32 @@ title: Install k6-operator # Install k6-operator -## Prerequisites +This guide provides step-by-step instructions on how to install k6 operator. -The minimal prerequisite for k6-operator is a Kubernetes cluster and access to it with [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl). +## Before you begin -## Deploying the operator +To install k6 operator, you'll need: -### Bundle deployment +- A Kubernetes cluster, along with access to it. +- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl). + +## Deploy the operator + +There are three different options that you can use to deploy the k6-operator. + +### Deploy with bundle The easiest way to install the operator is with bundle: + ```bash curl https://raw.githubusercontent.com/grafana/k6-operator/main/bundle.yaml | kubectl apply -f - ``` -Bundle includes default manifests for k6-operator, including `k6-operator-system` namespace and k6-operator Deployment with latest tagged Docker image. Customizations can be made on top of this manifest as needs be, e.g. with `kustomize`. +Bundle includes default manifests for k6-operator, including a `k6-operator-system` namespace and k6-operator deployment with the latest tagged Docker image. Customizations can be made on top of this manifest as needed, for example, with `kustomize`. -### Deployment with Helm +### Deploy with Helm -Helm releases of k6-operator are published together with other Grafana Helm charts and can be installed with the following commands: +Helm releases of k6-operator are published together with other Grafana Helm charts. You can install it with the following commands: ```bash helm repo add grafana https://grafana.github.io/helm-charts @@ -30,45 +38,48 @@ helm repo update helm install k6-operator grafana/k6-operator ``` -Passing additional configuration can be done with `values.yaml` (example can be found [here](https://github.com/grafana/k6-operator/blob/main/charts/k6-operator/samples/customAnnotationsAndLabels.yaml)): +You can also pass additional configuration options with a `values.yaml` file: ```bash helm install k6-operator grafana/k6-operator -f values.yaml ``` -Complete list of options available for Helm can be found [here](https://github.com/grafana/k6-operator/blob/main/charts/k6-operator/README.md). +Refer to the [k6-operator samples folder](https://github.com/grafana/k6-operator/blob/main/charts/k6-operator/samples/customAnnotationsAndLabels.yaml) for an example file. -### Makefile deployment +You can find a complete list of Helm options in the [k6 operator charts folder](https://github.com/grafana/k6-operator/blob/main/charts/k6-operator/README.md). + +### Deploy with Makefile + +In order to install the operator with a Makefile, you'll need: -In order to install the operator with Makefile, the following additional tooling must be installed: - [go](https://go.dev/doc/install) - [kustomize](https://kubectl.docs.kubernetes.io/installation/kustomize/) -A more manual, low-level way to install the operator is by running the command below: +A more manual, low-level way to install the k6 operator is by running the command below: ```bash make deploy ``` -This method may be more useful for development of k6-operator, depending on specifics of the setup. +This method may be more useful for development of the k6-operator, depending on specifics of the setup. -## Installing the CRD +## Install the CRD -The k6-operator includes custom resources called `TestRun`, `PrivateLoadZone` and currently also `K6`. These will be automatically installed when you do a deployment or install a bundle, but in case you want to do it yourself, you may run the command below: +The k6-operator includes custom resources called `TestRun`, `PrivateLoadZone`, and `K6`. They're automatically installed when you do a deployment or install a bundle, but you can also manually install them by running: ```bash make install ``` -{{% admonition type="warning" %}} +{{< admonition type="warning" >}} -`K6` CRD has been substituted with `TestRun` CRD and will be deprecated in the future. Please use `TestRun` CRD. +The `K6` CRD has been replaced by the `TestRun` CRD and will be deprecated in the future. We recommend using the `TestRun` CRD. -{{% /admonition %}} +{{< /admonition >}} -## Namespaced deployment +## Deploy with custom namespace -By default, k6-operator watches `TestRun` and `PriaveLoadZone` custom resources in all namespaces. But it is possible to configure k6-operator to watch only a specific namespace by setting a `WATCH_NAMESPACE` environment variable for the operator's deployment: +By default, the k6-operator watches `TestRun` and `PriavteLoadZone` custom resources in all namespaces. You can also configure the k6-operator to watch a specific namespace by setting the `WATCH_NAMESPACE` environment variable for the operator's deployment: ```yaml apiVersion: apps/v1 @@ -84,8 +95,6 @@ spec: image: ghcr.io/grafana/k6-operator:controller-v0.0.14 env: - name: WATCH_NAMESPACE - value: "some-ns" + value: 'some-ns' # ... ``` - -{{< section depth=2 >}} From 96b0ecff8c429f6a6f9f92c35bdf93c807951c68 Mon Sep 17 00:00:00 2001 From: Heitor Tashiro Sergent Date: Mon, 3 Jun 2024 16:40:59 -0500 Subject: [PATCH 06/23] Update upgrade-k6-operator.md --- .../v0.50.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md index 5d93f6777c..299649f874 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md @@ -6,5 +6,3 @@ title: Upgrade k6-operator # Upgrade k6-operator - -{{< section depth=2 >}} From 0bd07ec41ecc373af4b4c36c00e874f03625264d Mon Sep 17 00:00:00 2001 From: Heitor Tashiro Sergent Date: Mon, 3 Jun 2024 16:41:19 -0500 Subject: [PATCH 07/23] Update troubleshooting.md --- .../v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md index 49c0b6169d..3b6ad70e50 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md @@ -6,5 +6,3 @@ title: Troubleshooting # Troubleshooting - -{{< section depth=2 >}} From 024875d00b6e0376deee4b01e62af3f3f4f64004 Mon Sep 17 00:00:00 2001 From: Heitor Tashiro Sergent Date: Mon, 3 Jun 2024 17:47:18 -0500 Subject: [PATCH 08/23] Update executing-k6-scripts-with-testrun-crd.md --- .../executing-k6-scripts-with-testrun-crd.md | 80 +++++++++---------- 1 file changed, 39 insertions(+), 41 deletions(-) diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md index d7304cf1d9..45ba5fb977 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md @@ -1,18 +1,19 @@ --- weight: 100 -title: Executing k6 scripts with TestRun CRD +title: Run k6 scripts with TestRun CRD --- -# Executing k6 scripts with TestRun CRD +# Run k6 scripts with TestRun CRD + +This guide covers how you can configure your k6 scripts to run using the k6 operator. ## Defining test scripts -There are several ways to configure scripts in `TestRun` CRD. -#The operator utilises `ConfigMap`s and `LocalFile` to serve test scripts to the jobs. To upload your own test script, run the following command to configure through `ConfigMap`: +There are several ways to configure scripts in the `TestRun` CRD. The operator uses `ConfigMap` and `LocalFile` to serve test scripts to the jobs. ### ConfigMap -The main way to configure script is to create a ConfigMap with the script contents: +The main way to configure a script is to create a `ConfigMap` with the script contents: ```bash kubectl create configmap my-test --from-file /path/to/my/test.js @@ -27,28 +28,28 @@ Then specify it in `TestRun`: file: test.js ``` -{{% admonition type="note" %}} +{{< admonition type="note" >}} -There is a character limit of 1048576 bytes to a single configmap. If you need to have a larger test file, you'll need to use a volumeClaim or a localFile instead +A single `ConfigMap` has a character limit of 1048576 bytes. If you need to have a larger test file, you have to use a `volumeClaim` or a `LocalFile` instead. -{{% /admonition %}} +{{< /admonition >}} ### VolumeClaim -If you have a PVC with the name `stress-test-volumeClaim` containing your script and any other supporting file(s), you can pass it to the test like this: +If you have a PVC with the name `stress-test-volumeClaim` containing your script and any other supporting files, you can pass it to the test like this: ```yaml spec: script: volumeClaim: - name: "stress-test-volumeClaim" + name: 'stress-test-volumeClaim' # test.js should exist inside /test/ folder. - # All the js files and directories test.js is importing + # All the js files and directories test.js is importing # should be inside the same directory as well. - file: "test.js" + file: 'test.js' ``` -The pods will expect to find script files in `/test/` folder. If `volumeClaim` fails, it's the first place to check: the latest initializer pod does not generate any logs and when it can't find the file, it will terminate with error. So missing file may not be that obvious and it makes sense to check it is present manually. See [GH issue](https://github.com/k6-operator/issues/143) for potential improvements. +The pods will expect to find the script files in the `/test/` folder. If `volumeClaim` fails, that's the first place to check. The latest initializer pod doesn't generate any logs and when it can't find the file, it exits with an error. Refer to [this GitHub issue](https://github.com/grafana/k6-operator/issues/143) for potential improvements. #### Sample directory structure @@ -59,15 +60,15 @@ The pods will expect to find script files in `/test/` folder. If `volumeClaim` f │ ├── test.js ``` -In the above example, `test.js` imports a function from `stress-test.js` and these files would look like this: +In the preceding example, `test.js` imports a function from `stress-test.js` and these files would look like this: ```js // test.js -import stressTest from "./requests/stress-test.js"; +import stressTest from './requests/stress-test.js'; export const options = { vus: 50, - duration: '10s' + duration: '10s', }; export default function () { @@ -80,7 +81,6 @@ export default function () { import { sleep, check } from 'k6'; import http from 'k6/http'; - export default () => { const res = http.get('https://test-api.k6.io'); check(res, { @@ -92,7 +92,7 @@ export default () => { ### LocalFile -If the script is present in the filesystem of custom runner image, it can be accessed with `localFile` option: +If the script is present in the filesystem of a custom runner image, it can be accessed with the `localFile` option: ```yaml spec: @@ -103,53 +103,52 @@ spec: image: ``` -{{% admonition type="note" %}} - -If there is any limitation on usage of `volumeClaim` in your cluster you can use the `localFile` option, but usage of `volumeClaim` is recommneded. +{{< admonition type="note" >}} -{{% /admonition %}} +If there is any limitation on the usage of `volumeClaim` in your cluster, you can use the `localFile` option. We recommend using `volumeClaim` if possible. +{{< /admonition >}} ### Multi-file tests -In case your k6 script is split between more than one JS file, you can simply create a ConfigMap with several data entries like this: +In case your k6 script is split between multiple JavaScript files, you can create a `ConfigMap` with several data entries like this: ```bash kubectl create configmap scenarios-test --from-file test.js --from-file utils.js ``` -If there are too many files to specify manually, kubectl with folder might be an option as well: +If there are too many files to specify manually, using `kubectl` with a folder might be an option as well: + ```bash kubectl create configmap scenarios-test --from-file=./test ``` Alternatively, you can create an archive with k6: + ```bash k6 archive test.js [args] ``` -The above command will create an `archive.tar` in your current folder, unless `-O` option is used to change the name of the output archive. Then it is possible to put that archive into configmap similarly to JS script: +The `k6 archive` command creates an `archive.tar` in your current folder. You can then use that file in the `configmap`, similarly to a JavaScript script: + ```bash kubectl create configmap scenarios-test --from-file=archive.tar ``` -In case of using an archive it must correctly set in your yaml for `TestRun` deployment: +If you use an archive, you must edit your YAML file for the `TestRun` deployment so that the `file` option is set to the correct entrypoint for the `k6 run` command: ```yaml # ... spec: script: configMap: - name: "crocodile-stress-test" - file: "archive.tar" # <-- change here + name: 'crocodile-stress-test' + file: 'archive.tar' # <-- change here ``` -In other words, `file` option must be the correct entrypoint for `k6 run` command. - - -## Executing tests +## Run tests -Tests are executed by applying the custom resource `TestRun` to a cluster where the k6-operator is running. Additional optional properties of `TestRun` CRD allow you to control some key aspects of a distributed execution. For example: +Tests are executed by applying the custom resource `TestRun` to a cluster where the k6-operator is running. Additional optional properties of the `TestRun` CRD allow you to control some key aspects of a distributed execution. For example: ```yaml # k6-resource.yml @@ -196,26 +195,25 @@ spec: runAsNonRoot: true ``` -`TestRun` CR is created with this command: +A `TestRun` CR is created with this command: ```bash kubectl apply -f /path/to/your/k6-resource.yml ``` -## Cleaning up resources +## Clean up resources -After completing a test run, you need to clean up the test jobs created. Manually this can be done by running the following command: +After completing a test run, you need to clean up the test jobs that were created: ```bash kubectl delete -f /path/to/your/k6-resource.yml ``` -Alternatively, automatic deletion of all resources can be configured with `cleanup` option: +Alternatively, you can configure the automatic deletion of all resources with the `cleanup` option: + ```yaml spec: - cleanup: "post" + cleanup: 'post' ``` -With `cleanup` option set, k6-operator will remove `TestRun` CRD and all created resources once the test run is finished. - -{{< section depth=2 >}} +With the `cleanup` option set, k6-operator removes the `TestRun` CRD and all created resources once the test run ends. From fc15830e32a1ef3e675b67667bb00774cd8ab802 Mon Sep 17 00:00:00 2001 From: Heitor Tashiro Sergent Date: Mon, 3 Jun 2024 17:57:22 -0500 Subject: [PATCH 09/23] Update extensions.md --- .../set-up-distributed-k6/usage/extensions.md | 70 ++++++++----------- 1 file changed, 31 insertions(+), 39 deletions(-) diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md index 50edd0d8b2..2baf61e1d2 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md @@ -3,18 +3,20 @@ weight: 200 title: Extensions --- -# Extensions +# Use k6-operator with k6 extensions -By default, the operator will use `grafana/k6:latest` as the container image for the test jobs. -If you want to use [extensions](https://grafana.com/docs/k6//extensions/) built with [xk6](https://github.com/grafana/xk6) you'll need to create your own image and override the `image` property on the `TestRun` Kubernetes resource. +By default, the k6 operator uses `grafana/k6:latest`, or the latest version of k6, as the container image for the test jobs. -For example, create a `Dockerfile` with the following content: +If you want to use k6 [extensions](https://grafana.com/docs/k6//extensions/) built with [xk6](https://github.com/grafana/xk6), you'll need to create your own image and override the `image` property on the `TestRun` Kubernetes resource. + +For example, this is a `Dockerfile` that builds a k6 binary with the `xk6-output-influxdb` extension: ```Dockerfile # Build the k6 binary with the extension FROM golang:1.20 as builder RUN go install go.k6.io/xk6/cmd/xk6@latest + # For our example, we'll add support for output of test metrics to InfluxDB v2. # Feel free to add other extensions using the '--with ...'. RUN xk6 build \ @@ -26,13 +28,15 @@ FROM grafana/k6:latest COPY --from=builder /k6 /usr/bin/k6 ``` -Build the image based on this `Dockerfile` by executing: +You can build the image based on this `Dockerfile` by executing: + ```bash docker build -t k6-extended:local . ``` -Once the build is completed, push the resulting `k6-extended:local` image to an image repository accessible to your Kubernetes cluster. -We can now use it as follows: +After the build completes, you can push the resulting `k6-extended:local` image to an image repository accessible to your Kubernetes cluster. + +You can then use that image as follows: ```yaml # k6-resource-with-extensions.yml @@ -54,20 +58,20 @@ spec: value: xk6-influxdb=http://influxdb.somewhere:8086/demo ``` -Note that we are overriding the default image with `k6-extended:latest`, providing the test runner with environment variables used by our included extensions. +Note that this examples overrides the default image with `k6-extended:latest`, and it includes environment variables that are required by the `xk6-output-influxdb` extension. -## k6 Cloud output +## Output to Grafana Cloud k6 -k6 supports [output to its Cloud](https://grafana.com/docs/k6//results-output/real-time/cloud) with `k6 run --out cloud script.js` command. This feature is available in k6-operator as well for subscribed users. Note that it supports only `parallelism: 20` or less. +With k6, you can send the [output from a test run to Grafana Cloud k6](https://grafana.com/docs/k6//results-output/real-time/cloud) with the `k6 run --out cloud script.js` command. This feature is also available in k6-operator, if you have a Grafana Cloud account. Note that it supports only `parallelism: 20` or less. -To use this option in k6-operator, set the argument in yaml: +To use this option in k6-operator, set the argument in YAML: ```yaml # ... - script: - configMap: - name: "" - arguments: --out cloud +script: + configMap: + name: '' +arguments: --out cloud # ... ``` @@ -78,35 +82,23 @@ kubectl -n k6-operator-system create secret generic my-cloud-token \ --from-literal=token= && kubectl -n k6-operator-system label secret my-cloud-token "k6cloud=token" ``` -Alternatively, if you installed operator with Makefile, you can uncomment cloud output section in `config/default/kustomization.yaml` and copy your token from the Cloud there: +Alternatively, if you installed operator with Makefile, you can uncomment the cloud output section in `config/default/kustomization.yaml` and copy your token from Grafana Cloud k6 there: ```yaml # Uncomment this section if you need cloud output and copy-paste your token secretGenerator: -- name: cloud-token - literals: - - token= - options: - annotations: - kubernetes.io/service-account.name: k6-operator-controller - labels: - k6cloud: token + - name: cloud-token + literals: + - token= + options: + annotations: + kubernetes.io/service-account.name: k6-operator-controller + labels: + k6cloud: token ``` -And re-run `make deploy`. +After updating the file, run `make deploy`. -This is sufficient to run k6 with the Cloud output and default values of `projectID` and `name`. For non-default values, extended script options can be used like this: - -```js -export let options = { - // ... - ext: { - loadimpact: { - name: 'Configured k6-operator test', - projectID: 1234567, - } - } -}; -``` +After these steps, you can run k6 with the cloud output and default values of `projectID` and `name`. -{{< section depth=2 >}} +Refer to [Cloud options](https://grafana.com/docs/grafana-cloud/testing/k6/author-run/cloud-scripting-extras/cloud-options/#cloud-options) for details on how to change the `projectID` and `name` options. From af61bb48318abcffde47109621afd41597e7662b Mon Sep 17 00:00:00 2001 From: Heitor Tashiro Sergent Date: Mon, 3 Jun 2024 18:05:35 -0500 Subject: [PATCH 10/23] Update extensions.md --- .../v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md index 2baf61e1d2..d3520a81a7 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md @@ -1,6 +1,6 @@ --- weight: 200 -title: Extensions +title: Use k6-operator with k6 extensions --- # Use k6-operator with k6 extensions From 0bd92fb9e00f8a2cd8e99968579a2858c164f4c0 Mon Sep 17 00:00:00 2001 From: Heitor Tashiro Sergent Date: Mon, 3 Jun 2024 18:05:38 -0500 Subject: [PATCH 11/23] Update common-options.md --- .../usage/common-options.md | 28 ++++++++----------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/common-options.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/common-options.md index c03f48eba1..8be3aef9c5 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/common-options.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/common-options.md @@ -3,16 +3,15 @@ weight: 300 title: Common options --- - - # Common options -The only options that must be defined as part of `TestRun` CRD spec are `script` and `parallelism`. But there are many others; here are some of the most common. + + +The only options that are required as part of the `TestRun` CRD spec are `script` and `parallelism`. This guide covers some of the most common options. ## Parallelism -`parallelism` defines how many instances of k6 runneres you want to create. Each instance will be assigned an equal execution segment. For instance, if your test script is configured to run 200 VUs and `parallelism` is set to 4, the k6-operator will -create four k6 jobs, each running 50 VUs to achieve the desired VU count. +`parallelism` defines how many instances of k6 runners you want to create. Each instance is assigned an equal execution segment. For instance, if your test script is configured to run 200 VUs and `parallelism` is set to 4, the k6-operator creates four k6 jobs, each running 50 VUs to achieve the desired VU count. ## Separate @@ -21,7 +20,7 @@ test with a really high VU count and want to make sure the resources of each nod ## Service account -If you want to use a custom Service Account you'll need to pass it into both the starter and runner object: +If you want to use a custom Service Account you'll need to pass it into both the starter and the runner object: ```yaml apiVersion: k6.io/v1alpha1 @@ -31,7 +30,7 @@ metadata: spec: script: configMap: - name: "" + name: '' runner: serviceAccountName: starter: @@ -42,17 +41,14 @@ spec: Defines options for the test runner pods. The non-exhaustive list includes: -* passing resource limits and requests -* passing in labels and annotations -* passing in affinity and anti-affinity -* passing in a custom image +- Passing resource limits and requests. +- Passing in labels and annotations. +- Passing in affinity and anti-affinity. +- Passing in a custom image. ## Starter Defines options for the starter pod. The non-exhaustive list includes: -* passing in custom image -* passing in labels and annotations - - -{{< section depth=2 >}} +- Passing in a custom image. +- Passing in labels and annotations. From 66bd18848109318e450b5c366209059069e9ae64 Mon Sep 17 00:00:00 2001 From: Heitor Tashiro Sergent Date: Mon, 3 Jun 2024 18:09:32 -0500 Subject: [PATCH 12/23] Update scheduling-tests.md --- .../usage/scheduling-tests.md | 24 +++++++++---------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/scheduling-tests.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/scheduling-tests.md index 6a8bdf3f2b..bffbe9d993 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/scheduling-tests.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/scheduling-tests.md @@ -1,23 +1,23 @@ --- weight: 400 -title: Scheduling tests +title: Schedule k6 tests --- -# Scheduling Tests +# Schedule k6 tests -While the k6-operator doesn't support scheduling k6 tests directly, one can schedule tests with the `CronJob` object from Kubernetes directly. The `CronJob` would run on a schedule and execute creation and deletion of `TestRun` object. +While the k6-operator doesn't support scheduling k6 tests directly, you can schedule tests with the `CronJob` object from Kubernetes directly. The `CronJob` would run on a schedule and execute the creation and deletion of the `TestRun` object. -Running these tests requires a little more setup than standalone test run. +Running these tests requires a little more setup than a standalone test run. -## Create a ConfigMap with k6 scripts +## Create a `ConfigMap` with k6 scripts -This step is described in [Executing k6 script with `TestRun` CRD](https://grafana.com/docs/k6//set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd/). +Refer to [Run k6 scripts with `TestRun` CRD](https://grafana.com/docs/k6//set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd/) for details on how to create a `ConfigMap` with k6 scripts. +## Create a ConfigMap of the YAML file for the `TestRun` job -## Create a ConfigMap of the yaml for the `TestRun` job -When using `make deploy` installation method, add a `configMapGenerator` to the `kustomization.yaml`: +When using the `make deploy` installation method, add a `configMapGenerator` to the `kustomization.yaml`: ```yaml configMapGenerator: @@ -71,7 +71,7 @@ metadata: ## Create a `CronJob` -A `CronJob` can be defined in the following way: +This is an example of how to define a `CronJob` in a YAML file: ```yaml # snapshotter.yml @@ -80,7 +80,7 @@ kind: CronJob metadata: name: -cron spec: - schedule: "" + schedule: '' concurrencyPolicy: Forbid jobTemplate: spec: @@ -97,12 +97,10 @@ spec: - /bin/bash args: - -c - - "kubectl delete -f /tmp/.yaml; kubectl apply -f /tmp/.yaml" + - 'kubectl delete -f /tmp/.yaml; kubectl apply -f /tmp/.yaml' restartPolicy: OnFailure volumes: - name: k6-yaml configMap: name: -config ``` - -{{< section depth=2 >}} \ No newline at end of file From 5c71c70a5d421012300117a53316665725aec28f Mon Sep 17 00:00:00 2001 From: Olha Yevtushenko Date: Fri, 14 Jun 2024 13:42:06 +0300 Subject: [PATCH 13/23] k6-operator: fix typos --- .../set-up/set-up-distributed-k6/install-k6-operator.md | 2 +- .../usage/executing-k6-scripts-with-testrun-crd.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md index 1d57df98a0..b4febacf2b 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md @@ -79,7 +79,7 @@ The `K6` CRD has been replaced by the `TestRun` CRD and will be deprecated in th ## Deploy with custom namespace -By default, the k6-operator watches `TestRun` and `PriavteLoadZone` custom resources in all namespaces. You can also configure the k6-operator to watch a specific namespace by setting the `WATCH_NAMESPACE` environment variable for the operator's deployment: +By default, the k6-operator watches `TestRun` and `PrivateLoadZone` custom resources in all namespaces. You can also configure the k6-operator to watch a specific namespace by setting the `WATCH_NAMESPACE` environment variable for the operator's deployment: ```yaml apiVersion: apps/v1 diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md index 45ba5fb977..16eceffa41 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md @@ -9,7 +9,7 @@ This guide covers how you can configure your k6 scripts to run using the k6 oper ## Defining test scripts -There are several ways to configure scripts in the `TestRun` CRD. The operator uses `ConfigMap` and `LocalFile` to serve test scripts to the jobs. +There are several ways to configure scripts in the `TestRun` CRD. The operator uses `configMap`, `volumeClaim` and `localFile` to serve test scripts to the jobs. ### ConfigMap @@ -30,7 +30,7 @@ Then specify it in `TestRun`: {{< admonition type="note" >}} -A single `ConfigMap` has a character limit of 1048576 bytes. If you need to have a larger test file, you have to use a `volumeClaim` or a `LocalFile` instead. +A single `ConfigMap` has a character limit of 1048576 bytes. If you need to have a larger test file, you have to use a `volumeClaim` or a `localFile` instead. {{< /admonition >}} From 8a7e3117cb942e899944934b00d8205672931702 Mon Sep 17 00:00:00 2001 From: Olha Yevtushenko Date: Fri, 14 Jun 2024 13:48:04 +0300 Subject: [PATCH 14/23] k6-operator: add content for troubleshooting.md --- .../set-up-distributed-k6/troubleshooting.md | 245 +++++++++++++++++- 1 file changed, 244 insertions(+), 1 deletion(-) diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md index 3b6ad70e50..14fa34b9f4 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md @@ -5,4 +5,247 @@ title: Troubleshooting # Troubleshooting - +Just as any Kubernetes application, k6-operator can get into error scenarios which are sometimes a result of a misconfigured test or setup. This document is meant to help troubleshoot such scenarios quicker. + +## Common tricks + +### Preparation + +{{% admonition type="warning" %}} + +Before trying to run a script with k6-operator, be it via `TestRun` or via `PrivateLoadZone`, always run it locally: + +```bash +k6 run script.js +``` + +{{% /admonition %}} + +If there are going to be environment variables or CLI options, pass them in as well: +```bash +MY_ENV_VAR=foo k6 run script.js --tag my_tag=bar +``` + +This ensures that the script has correct syntax and can be parsed with k6 in the first place. Additionally, running locally will make it obvious if the configured options are doing what is expected. If there are any errors or unexpected results in the output of `k6 run`, make sure to fix those prior to deploying the script elsewhere. + +### `TestRun` deployment + +#### The pods + +In case of one `TestRun` Custom Resource (CR) creation with `parallelism: n`, there are certain repeating patterns: + +1. There will be `n + 2` Jobs (with corresponding Pods) created: initializer, starter, `n` runners. +1. If any of these Jobs did not result in a Pod being deployed, there must be an issue with that Job. Some commands that can help here: + ```bash + kubectl get jobs -A + kubectl describe job mytest-initializer + ``` +1. If one of the Pods was deployed but finished with `Error`, it makes sense to check its logs: + ```bash + kubectl logs mytest-initializer-xxxxx + ``` + +If the Pods seem to be working but not producing an expected result and there's not enough information in the logs of the Pods, it might make sense to turn on k6 [verbose option](https://grafana.com/docs/k6/latest/using-k6/k6-options/#options) in `TestRun` spec: + +```yaml +apiVersion: k6.io/v1alpha1 +kind: TestRun +metadata: + name: k6-sample +spec: + parallelism: 2 + script: + configMap: + name: "test" + file: "test.js" + arguments: --verbose +``` + +#### k6-operator + +Another source of info is k6-operator itself. It is deployed as a Kubernetes `Deployment`, with `replicas: 1` by default, and its logs together with observations about the Pods from [previous subsection](#the-pods) usually contain enough information to glean correct diagnosis. With the standard deployment, the logs of k6-operator can be checked with: + +```bash +kubectl -n k6-operator-system -c manager logs k6-operator-controller-manager-xxxxxxxx-xxxxx +``` + +#### Inspect `TestRun` resource + +One `TestRun` CR is deployed, it can be inspected the same way as any other resource: + +```bash +kubectl describe testrun my-testrun +``` + +Firstly, check if the spec is as expected. Then, see the current status: + +```yaml +Status: + Conditions: + Last Transition Time: 2024-01-17T10:30:01Z + Message: + Reason: CloudTestRunFalse + Status: False + Type: CloudTestRun + Last Transition Time: 2024-01-17T10:29:58Z + Message: + Reason: TestRunPreparation + Status: Unknown + Type: TestRunRunning + Last Transition Time: 2024-01-17T10:29:58Z + Message: + Reason: CloudTestRunAbortedFalse + Status: False + Type: CloudTestRunAborted + Last Transition Time: 2024-01-17T10:29:58Z + Message: + Reason: CloudPLZTestRunFalse + Status: False + Type: CloudPLZTestRun + Stage: error +``` + +If `Stage` is equal to `error` then it definitely makes sense to check the logs of k6-operator. + +Conditions can be used as a source of info as well, but it is a more advanced troubleshooting option that should be used if previous suggestions are insufficient. Note, that conditions that start with `Cloud` prefix matter only in the setting of k6 Cloud test runs, i.e. cloud output and PLZ test runs. + +### `PrivateLoadZone` deployment + +If `PrivateLoadZone` CR was successfully created in Kubernetes, it should become visible in your account in Grafana Cloud k6 (GCk6) interface soon afterwards. If it doesn't appear in the UI, then there is likely a problem to troubleshoot. + +Firstly, go over the [guide](https://grafana.com/docs/grafana-cloud/k6/author-run/private-load-zone-v2/) to double-check if all the steps have been done correctly and successfully. + +Unlike `TestRun` deployment, when `PrivateLoadZone` is first created, there are no additional resources deployed. So the only source for troubleshooting are the logs of k6-operator. See the [above subsection](#k6-operator) on how to access its logs. Any errors there might be a hint to what is wrong. See [below](#privateloadzone-subscription-error) for some potential errors explained in more detail. + +### Running tests in `PrivateLoadZone` + +Each time a user runs a test in a PLZ, for example with `k6 cloud script.js`, there is a corresponding `TestRun` being deployed by k6-operator. This `TestRun` will be deployed in the same namespace as its `PrivateLoadZone`. If such test is misbehaving (errors out, does not produce expected result, etc.), then one should check: +1) if there are any messages in GCk6 UI +2) if there are any messages in the output of `k6 cloud` command +3) the resources and their logs, the same way as with [standalone `TestRun` deployment](#testrun-deployment) + +## Common scenarios + +### Where are my env vars... + +Some tricky cases with environment variables are described in [this doc](https://github.com/grafana/k6-operator/blob/main/docs/env-vars.md). + +### Tags are not working?! + +Currently, tags are a rather common source of frustration in usage of k6-operator. For example: + +```yaml + arguments: --tag product_id="Test A" + # or + arguments: --tag foo=\"bar\" +``` + +Passing the above leads to parsing errors which can be seen in the logs of either initializer or runner Pod, e.g.: +```bash +time="2024-01-11T11:11:27Z" level=error msg="invalid argument \"product_id=\\\"Test\" for \"--tag\" flag: parse error on line 1, column 12: bare \" in non-quoted-field" +``` + +This is a standard problem with escaping the characters, and there's even an [issue](https://github.com/grafana/k6-operator/issues/211) that can be upvoted. + +### Initializer logs an error but it's not about tags + +Often, this happens because of lack of attention to the [preparation](#preparation) step. One more command that can be tried here is to run the following: + +```bash +k6 inspect --execution-requirements script.js +``` + +This command is a shortened version of what initializer Pod is executing. If the above command produces an error, it is definitely a problem with the script and should be first solved outside of k6-operator. The error itself may contain a hint to what is wrong, for instance a syntax error. + +If standalone `k6 inspect --execution-requirements` executes successfully, then it's likely a problem with `TestRun` deployment specific to your Kubernetes setup. Recommendations here: +- read carefully the output in initializer Pod: is it logged by k6 process or by something else? + - :information_source: k6-operator expects initializer logs to contain only the output of `k6 inspect`. If there's any other log line present, then k6-operator will fail to parse it and the test will not start. ([issue](https://github.com/grafana/k6-operator/issues/193)) +- check events in initializer Job and Pod as they may contain another hint about what is wrong + +### Non-existent ServiceAccount + +ServiceAccount can be defined as `serviceAccountName` and `runner.serviceAccountName` in PrivateLoadZone and TestRun CRD respectfully. If the specified ServiceAccount does not exist, k6-operator will successfully create Jobs but corresponding Pods will fail to be deployed, and k6-operator will wait indefinitely for Pods to be `Ready`. This error can be best seen in the events of the Job: + +```bash +kubectl describe job plz-test-xxxxxx-initializer +... +Events: + Warning FailedCreate 57s (x4 over 2m7s) job-controller Error creating: pods "plz-test-xxxxxx-initializer-" is forbidden: error looking up service account plz-ns/plz-sa: serviceaccount "plz-sa" not found +``` + +Currently, k6-operator does not try to analyze such scenarios on its own but we have an [issue](https://github.com/grafana/k6-operator/issues/260) for improvement. + +How to fix: incorrect `serviceAccountName` must be corrected and TestRun or PrivateLoadZone resource must be re-deployed. + +### Non-existent `nodeSelector` + +`nodeSelector` can be defined as `nodeSelector` and `runner.nodeSelector` in PrivateLoadZone and TestRun CRD respectfully. + +This case is very similar to [ServiceAccount one](#non-existent-serviceaccount): the Pod creation will fail, only the error would be somewhat different: + +```bash +kubectl describe pod plz-test-xxxxxx-initializer-xxxxx +... +Events: + Warning FailedScheduling 48s (x5 over 4m6s) default-scheduler 0/1 nodes are available: 1 node(s) didn't match Pod's node affinity/selector. +``` + +How to fix: incorrect `nodeSelector` must be corrected and TestRun or PrivateLoadZone resource must be re-deployed. + +### Insufficient resources + +A related problem can happen when the cluster does not have sufficient resources to deploy the runners. There is a higher probability of hitting this issue when setting small CPU and memory limits for runners or using options like `nodeSelector`, `runner.affinity` or `runner.topologySpreadConstraints`, and not having a set of nodes matching the spec. Alternatively, it can happen if there is a high number of runners required for the test (via `parallelism` in TestRun or during PLZ test run) and autoscaling of the cluster has limits on maximum number of nodes and cannot provide the required resources on time or at all. + +This case is somewhat similar to the previous two: the k6-operator will wait indefinitely and can be monitored with events in Jobs and Pods. If it is possible to fix the issue with insufficient resources on-the-fly, e.g. by adding more nodes, k6-operator will attempt to continue executing a test run. + +### OOM of a runner Pod + +If there's at least one runner Pod that OOM-ed, the whole test will be [stuck](https://github.com/grafana/k6-operator/issues/251) and will have to be deleted manually: + +```bash +kubectl -f my-test.yaml delete +# or +kubectl delete testrun my-test +``` + +In case of OOM, it makes sense to review k6 script to understand what kind of resource usage this script requires. It may be that the k6 script can be improved to be more performant. Then, set `spec.runner.resources` in TestRun CRD or `spec.resources` in PrivateLoadZone CRD accordingly. + +### PrivateLoadZone: subscription error + +If there's something off with your k6 Cloud subscription, there will be a 400 error in the logs with the message detailing the problem. For example: + +```bash +"Received error `(400) You have reached the maximum Number of private load zones your organization is allowed to have. Please contact support if you want to create more.`. Message from server ``" +``` + +The most likely course of action in this case is either to check your organization settings in GCk6 or to contact k6 Cloud support. + +### PrivateLoadZone: wrong token + +There can be two major problems with the token. + +1. If token was not created or was created in a wrong location, there will be the following in the logs: + ```bash + Failed to load k6 Cloud token {"namespace": "plz-ns", "name": "my-plz", "reconcileID": "67c8bc73-f45b-4c7f-a9ad-4fd0ffb4d5f6", "name": "token-with-wrong-name", "secretNamespace": "plz-ns", "error": "Secret \"token-with-wrong-name\" not found"} + ``` + +2. If token contains a corrupted value or it's not an organizational token, there will be the following error in the logs: + ```bash + "Received error `(403) Authentication token incorrect or expired`. Message from server ``" + ``` + +### PrivateLoadZone: networking setup + +If you see any dial or connection errors in the logs of k6-operator, it makes sense to double-check the networking setup. For PrivateLoadZone to operate, outbound traffic to k6 Cloud [must be allowed](https://grafana.com/docs/grafana-cloud/k6/author-run/private-load-zone-v2/#before-you-begin). The basic way to check the reachability of k6 Cloud endpoints: + +```bash +kubectl apply -f https://k8s.io/examples/admin/dns/dnsutils.yaml +kubectl exec -it dnsutils -- nslookup ingest.k6.io +kubectl exec -it dnsutils -- nslookup api.k6.io +``` + +For more resources on troubleshooting networking, see Kubernetes [official docs](https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/). + +### PrivateLoadZone: insufficient resources + +The problem is similar to [insufficient resources in general case](#insufficient-resources). But when running a PrivateLoadZone test, k6-operator will wait only for a timeout period (10 minutes at the moment). When the timeout period is up, the test will be aborted by k6 Cloud and marked as such both in PrivateLoadZone and in GCk6. In other words, there is a time limit to fix this issue without restarting the test run. \ No newline at end of file From 9d046edc8b4712151a77490c4d8f71b66ffb39b8 Mon Sep 17 00:00:00 2001 From: Heitor Tashiro Sergent Date: Mon, 1 Jul 2024 15:57:41 -0500 Subject: [PATCH 15/23] chore: replaces instances of k6-operator with k6 Operator --- .../set-up/set-up-distributed-k6/_index.md | 2 +- .../install-k6-operator.md | 26 +++--- .../set-up-distributed-k6/troubleshooting.md | 84 ++++++++++--------- .../upgrade-k6-operator.md | 4 +- .../usage/common-options.md | 2 +- .../executing-k6-scripts-with-testrun-crd.md | 4 +- .../set-up-distributed-k6/usage/extensions.md | 47 +---------- .../usage/scheduling-tests.md | 2 +- 8 files changed, 67 insertions(+), 104 deletions(-) diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/_index.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/_index.md index 2b038b5e2d..06687090b0 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/_index.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/_index.md @@ -12,7 +12,7 @@ A couple of reasons why you might want to do this: - You run your application in Kubernetes and would like k6 to be executed in the same fashion as all your other infrastructure components. - You want to run your tests within your private network for security and/or privacy reasons. -[k6 operator](https://github.com/grafana/k6-operator) is a Kubernetes operator that you can use to run distributed k6 tests in your cluster. +[k6 Operator](https://github.com/grafana/k6-operator) is a Kubernetes operator that you can use to run distributed k6 tests in your cluster. This section includes the following topics: diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md index b4febacf2b..82cc822a04 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md @@ -1,22 +1,22 @@ --- weight: 100 -title: Install k6-operator +title: Install k6 Operator --- -# Install k6-operator +# Install k6 Operator -This guide provides step-by-step instructions on how to install k6 operator. +This guide provides step-by-step instructions on how to install k6 Operator. ## Before you begin -To install k6 operator, you'll need: +To install k6 Operator, you'll need: - A Kubernetes cluster, along with access to it. - [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl). ## Deploy the operator -There are three different options that you can use to deploy the k6-operator. +There are three different options that you can use to deploy the k6 Operator. ### Deploy with bundle @@ -26,11 +26,11 @@ The easiest way to install the operator is with bundle: curl https://raw.githubusercontent.com/grafana/k6-operator/main/bundle.yaml | kubectl apply -f - ``` -Bundle includes default manifests for k6-operator, including a `k6-operator-system` namespace and k6-operator deployment with the latest tagged Docker image. Customizations can be made on top of this manifest as needed, for example, with `kustomize`. +Bundle includes default manifests for k6 Operator, including a `k6-operator-system` namespace and k6 Operator deployment with the latest tagged Docker image. Customizations can be made on top of this manifest as needed, for example, with `kustomize`. ### Deploy with Helm -Helm releases of k6-operator are published together with other Grafana Helm charts. You can install it with the following commands: +Helm releases of k6 Operator are published together with other Grafana Helm charts. You can install it with the following commands: ```bash helm repo add grafana https://grafana.github.io/helm-charts @@ -44,9 +44,9 @@ You can also pass additional configuration options with a `values.yaml` file: helm install k6-operator grafana/k6-operator -f values.yaml ``` -Refer to the [k6-operator samples folder](https://github.com/grafana/k6-operator/blob/main/charts/k6-operator/samples/customAnnotationsAndLabels.yaml) for an example file. +Refer to the [k6 Operator samples folder](https://github.com/grafana/k6-operator/blob/main/charts/k6-operator/samples/customAnnotationsAndLabels.yaml) for an example file. -You can find a complete list of Helm options in the [k6 operator charts folder](https://github.com/grafana/k6-operator/blob/main/charts/k6-operator/README.md). +You can find a complete list of Helm options in the [k6 Operator charts folder](https://github.com/grafana/k6-operator/blob/main/charts/k6-operator/README.md). ### Deploy with Makefile @@ -61,11 +61,11 @@ A more manual, low-level way to install the k6 operator is by running the comman make deploy ``` -This method may be more useful for development of the k6-operator, depending on specifics of the setup. +This method may be more useful for development of the k6 Operator, depending on specifics of the setup. ## Install the CRD -The k6-operator includes custom resources called `TestRun`, `PrivateLoadZone`, and `K6`. They're automatically installed when you do a deployment or install a bundle, but you can also manually install them by running: +The k6 Operator includes custom resources called `TestRun`, `PrivateLoadZone`, and `K6`. They're automatically installed when you do a deployment or install a bundle, but you can also manually install them by running: ```bash make install @@ -77,9 +77,9 @@ The `K6` CRD has been replaced by the `TestRun` CRD and will be deprecated in th {{< /admonition >}} -## Deploy with custom namespace +## Namespaced deployment -By default, the k6-operator watches `TestRun` and `PrivateLoadZone` custom resources in all namespaces. You can also configure the k6-operator to watch a specific namespace by setting the `WATCH_NAMESPACE` environment variable for the operator's deployment: +By default, the k6 Operator watches `TestRun` and `PrivateLoadZone` custom resources in all namespaces. You can also configure the k6 Operator to watch a specific namespace by setting the `WATCH_NAMESPACE` environment variable for the operator's deployment: ```yaml apiVersion: apps/v1 diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md index 14fa34b9f4..27aef9c6b4 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md @@ -5,7 +5,7 @@ title: Troubleshooting # Troubleshooting -Just as any Kubernetes application, k6-operator can get into error scenarios which are sometimes a result of a misconfigured test or setup. This document is meant to help troubleshoot such scenarios quicker. +Just as any Kubernetes application, k6 Operator can get into error scenarios which are sometimes a result of a misconfigured test or setup. This document is meant to help troubleshoot such scenarios quicker. ## Common tricks @@ -13,7 +13,7 @@ Just as any Kubernetes application, k6-operator can get into error scenarios whi {{% admonition type="warning" %}} -Before trying to run a script with k6-operator, be it via `TestRun` or via `PrivateLoadZone`, always run it locally: +Before trying to run a script with k6 Operator, be it via `TestRun` or via `PrivateLoadZone`, always run it locally: ```bash k6 run script.js @@ -22,6 +22,7 @@ k6 run script.js {{% /admonition %}} If there are going to be environment variables or CLI options, pass them in as well: + ```bash MY_ENV_VAR=foo k6 run script.js --tag my_tag=bar ``` @@ -36,14 +37,14 @@ In case of one `TestRun` Custom Resource (CR) creation with `parallelism: n`, th 1. There will be `n + 2` Jobs (with corresponding Pods) created: initializer, starter, `n` runners. 1. If any of these Jobs did not result in a Pod being deployed, there must be an issue with that Job. Some commands that can help here: - ```bash - kubectl get jobs -A - kubectl describe job mytest-initializer - ``` + ```bash + kubectl get jobs -A + kubectl describe job mytest-initializer + ``` 1. If one of the Pods was deployed but finished with `Error`, it makes sense to check its logs: - ```bash - kubectl logs mytest-initializer-xxxxx - ``` + ```bash + kubectl logs mytest-initializer-xxxxx + ``` If the Pods seem to be working but not producing an expected result and there's not enough information in the logs of the Pods, it might make sense to turn on k6 [verbose option](https://grafana.com/docs/k6/latest/using-k6/k6-options/#options) in `TestRun` spec: @@ -56,14 +57,14 @@ spec: parallelism: 2 script: configMap: - name: "test" - file: "test.js" + name: 'test' + file: 'test.js' arguments: --verbose ``` -#### k6-operator +#### k6 Operator -Another source of info is k6-operator itself. It is deployed as a Kubernetes `Deployment`, with `replicas: 1` by default, and its logs together with observations about the Pods from [previous subsection](#the-pods) usually contain enough information to glean correct diagnosis. With the standard deployment, the logs of k6-operator can be checked with: +Another source of info is k6 Operator itself. It is deployed as a Kubernetes `Deployment`, with `replicas: 1` by default, and its logs together with observations about the Pods from [previous subsection](#the-pods) usually contain enough information to glean correct diagnosis. With the standard deployment, the logs of k6 Operator can be checked with: ```bash kubectl -n k6-operator-system -c manager logs k6-operator-controller-manager-xxxxxxxx-xxxxx @@ -83,29 +84,29 @@ Firstly, check if the spec is as expected. Then, see the current status: Status: Conditions: Last Transition Time: 2024-01-17T10:30:01Z - Message: + Message: Reason: CloudTestRunFalse Status: False Type: CloudTestRun Last Transition Time: 2024-01-17T10:29:58Z - Message: + Message: Reason: TestRunPreparation Status: Unknown Type: TestRunRunning Last Transition Time: 2024-01-17T10:29:58Z - Message: + Message: Reason: CloudTestRunAbortedFalse Status: False Type: CloudTestRunAborted Last Transition Time: 2024-01-17T10:29:58Z - Message: + Message: Reason: CloudPLZTestRunFalse Status: False Type: CloudPLZTestRun Stage: error ``` -If `Stage` is equal to `error` then it definitely makes sense to check the logs of k6-operator. +If `Stage` is equal to `error` then it definitely makes sense to check the logs of k6 Operator. Conditions can be used as a source of info as well, but it is a more advanced troubleshooting option that should be used if previous suggestions are insufficient. Note, that conditions that start with `Cloud` prefix matter only in the setting of k6 Cloud test runs, i.e. cloud output and PLZ test runs. @@ -115,14 +116,15 @@ If `PrivateLoadZone` CR was successfully created in Kubernetes, it should become Firstly, go over the [guide](https://grafana.com/docs/grafana-cloud/k6/author-run/private-load-zone-v2/) to double-check if all the steps have been done correctly and successfully. -Unlike `TestRun` deployment, when `PrivateLoadZone` is first created, there are no additional resources deployed. So the only source for troubleshooting are the logs of k6-operator. See the [above subsection](#k6-operator) on how to access its logs. Any errors there might be a hint to what is wrong. See [below](#privateloadzone-subscription-error) for some potential errors explained in more detail. +Unlike `TestRun` deployment, when `PrivateLoadZone` is first created, there are no additional resources deployed. So the only source for troubleshooting are the logs of k6 Operator. See the [above subsection](#k6-operator) on how to access its logs. Any errors there might be a hint to what is wrong. See [below](#privateloadzone-subscription-error) for some potential errors explained in more detail. ### Running tests in `PrivateLoadZone` -Each time a user runs a test in a PLZ, for example with `k6 cloud script.js`, there is a corresponding `TestRun` being deployed by k6-operator. This `TestRun` will be deployed in the same namespace as its `PrivateLoadZone`. If such test is misbehaving (errors out, does not produce expected result, etc.), then one should check: -1) if there are any messages in GCk6 UI -2) if there are any messages in the output of `k6 cloud` command -3) the resources and their logs, the same way as with [standalone `TestRun` deployment](#testrun-deployment) +Each time a user runs a test in a PLZ, for example with `k6 cloud script.js`, there is a corresponding `TestRun` being deployed by k6 Operator. This `TestRun` will be deployed in the same namespace as its `PrivateLoadZone`. If such test is misbehaving (errors out, does not produce expected result, etc.), then one should check: + +1. if there are any messages in GCk6 UI +2. if there are any messages in the output of `k6 cloud` command +3. the resources and their logs, the same way as with [standalone `TestRun` deployment](#testrun-deployment) ## Common scenarios @@ -132,7 +134,7 @@ Some tricky cases with environment variables are described in [this doc](https:/ ### Tags are not working?! -Currently, tags are a rather common source of frustration in usage of k6-operator. For example: +Currently, tags are a rather common source of frustration in usage of k6 Operator. For example: ```yaml arguments: --tag product_id="Test A" @@ -141,6 +143,7 @@ Currently, tags are a rather common source of frustration in usage of k6-operato ``` Passing the above leads to parsing errors which can be seen in the logs of either initializer or runner Pod, e.g.: + ```bash time="2024-01-11T11:11:27Z" level=error msg="invalid argument \"product_id=\\\"Test\" for \"--tag\" flag: parse error on line 1, column 12: bare \" in non-quoted-field" ``` @@ -155,16 +158,17 @@ Often, this happens because of lack of attention to the [preparation](#preparati k6 inspect --execution-requirements script.js ``` -This command is a shortened version of what initializer Pod is executing. If the above command produces an error, it is definitely a problem with the script and should be first solved outside of k6-operator. The error itself may contain a hint to what is wrong, for instance a syntax error. +This command is a shortened version of what initializer Pod is executing. If the above command produces an error, it is definitely a problem with the script and should be first solved outside of k6 Operator. The error itself may contain a hint to what is wrong, for instance a syntax error. If standalone `k6 inspect --execution-requirements` executes successfully, then it's likely a problem with `TestRun` deployment specific to your Kubernetes setup. Recommendations here: + - read carefully the output in initializer Pod: is it logged by k6 process or by something else? - - :information_source: k6-operator expects initializer logs to contain only the output of `k6 inspect`. If there's any other log line present, then k6-operator will fail to parse it and the test will not start. ([issue](https://github.com/grafana/k6-operator/issues/193)) + - :information_source: k6 Operator expects initializer logs to contain only the output of `k6 inspect`. If there's any other log line present, then k6 Operator will fail to parse it and the test will not start. ([issue](https://github.com/grafana/k6-operator/issues/193)) - check events in initializer Job and Pod as they may contain another hint about what is wrong ### Non-existent ServiceAccount -ServiceAccount can be defined as `serviceAccountName` and `runner.serviceAccountName` in PrivateLoadZone and TestRun CRD respectfully. If the specified ServiceAccount does not exist, k6-operator will successfully create Jobs but corresponding Pods will fail to be deployed, and k6-operator will wait indefinitely for Pods to be `Ready`. This error can be best seen in the events of the Job: +ServiceAccount can be defined as `serviceAccountName` and `runner.serviceAccountName` in PrivateLoadZone and TestRun CRD respectfully. If the specified ServiceAccount does not exist, k6 Operator will successfully create Jobs but corresponding Pods will fail to be deployed, and k6 Operator will wait indefinitely for Pods to be `Ready`. This error can be best seen in the events of the Job: ```bash kubectl describe job plz-test-xxxxxx-initializer @@ -173,13 +177,13 @@ Events: Warning FailedCreate 57s (x4 over 2m7s) job-controller Error creating: pods "plz-test-xxxxxx-initializer-" is forbidden: error looking up service account plz-ns/plz-sa: serviceaccount "plz-sa" not found ``` -Currently, k6-operator does not try to analyze such scenarios on its own but we have an [issue](https://github.com/grafana/k6-operator/issues/260) for improvement. +Currently, k6 Operator does not try to analyze such scenarios on its own but we have an [issue](https://github.com/grafana/k6-operator/issues/260) for improvement. How to fix: incorrect `serviceAccountName` must be corrected and TestRun or PrivateLoadZone resource must be re-deployed. ### Non-existent `nodeSelector` -`nodeSelector` can be defined as `nodeSelector` and `runner.nodeSelector` in PrivateLoadZone and TestRun CRD respectfully. +`nodeSelector` can be defined as `nodeSelector` and `runner.nodeSelector` in PrivateLoadZone and TestRun CRD respectfully. This case is very similar to [ServiceAccount one](#non-existent-serviceaccount): the Pod creation will fail, only the error would be somewhat different: @@ -196,7 +200,7 @@ How to fix: incorrect `nodeSelector` must be corrected and TestRun or PrivateLoa A related problem can happen when the cluster does not have sufficient resources to deploy the runners. There is a higher probability of hitting this issue when setting small CPU and memory limits for runners or using options like `nodeSelector`, `runner.affinity` or `runner.topologySpreadConstraints`, and not having a set of nodes matching the spec. Alternatively, it can happen if there is a high number of runners required for the test (via `parallelism` in TestRun or during PLZ test run) and autoscaling of the cluster has limits on maximum number of nodes and cannot provide the required resources on time or at all. -This case is somewhat similar to the previous two: the k6-operator will wait indefinitely and can be monitored with events in Jobs and Pods. If it is possible to fix the issue with insufficient resources on-the-fly, e.g. by adding more nodes, k6-operator will attempt to continue executing a test run. +This case is somewhat similar to the previous two: the k6 Operator will wait indefinitely and can be monitored with events in Jobs and Pods. If it is possible to fix the issue with insufficient resources on-the-fly, e.g. by adding more nodes, k6 Operator will attempt to continue executing a test run. ### OOM of a runner Pod @@ -206,7 +210,7 @@ If there's at least one runner Pod that OOM-ed, the whole test will be [stuck](h kubectl -f my-test.yaml delete # or kubectl delete testrun my-test -``` +``` In case of OOM, it makes sense to review k6 script to understand what kind of resource usage this script requires. It may be that the k6 script can be improved to be more performant. Then, set `spec.runner.resources` in TestRun CRD or `spec.resources` in PrivateLoadZone CRD accordingly. @@ -225,18 +229,20 @@ The most likely course of action in this case is either to check your organizati There can be two major problems with the token. 1. If token was not created or was created in a wrong location, there will be the following in the logs: - ```bash - Failed to load k6 Cloud token {"namespace": "plz-ns", "name": "my-plz", "reconcileID": "67c8bc73-f45b-4c7f-a9ad-4fd0ffb4d5f6", "name": "token-with-wrong-name", "secretNamespace": "plz-ns", "error": "Secret \"token-with-wrong-name\" not found"} - ``` + +```bash +Failed to load k6 Cloud token {"namespace": "plz-ns", "name": "my-plz", "reconcileID": "67c8bc73-f45b-4c7f-a9ad-4fd0ffb4d5f6", "name": "token-with-wrong-name", "secretNamespace": "plz-ns", "error": "Secret \"token-with-wrong-name\" not found"} +``` 2. If token contains a corrupted value or it's not an organizational token, there will be the following error in the logs: - ```bash - "Received error `(403) Authentication token incorrect or expired`. Message from server ``" - ``` + +```bash +"Received error `(403) Authentication token incorrect or expired`. Message from server ``" +``` ### PrivateLoadZone: networking setup -If you see any dial or connection errors in the logs of k6-operator, it makes sense to double-check the networking setup. For PrivateLoadZone to operate, outbound traffic to k6 Cloud [must be allowed](https://grafana.com/docs/grafana-cloud/k6/author-run/private-load-zone-v2/#before-you-begin). The basic way to check the reachability of k6 Cloud endpoints: +If you see any dial or connection errors in the logs of k6 Operator, it makes sense to double-check the networking setup. For PrivateLoadZone to operate, outbound traffic to k6 Cloud [must be allowed](https://grafana.com/docs/grafana-cloud/k6/author-run/private-load-zone-v2/#before-you-begin). The basic way to check the reachability of k6 Cloud endpoints: ```bash kubectl apply -f https://k8s.io/examples/admin/dns/dnsutils.yaml @@ -248,4 +254,4 @@ For more resources on troubleshooting networking, see Kubernetes [official docs] ### PrivateLoadZone: insufficient resources -The problem is similar to [insufficient resources in general case](#insufficient-resources). But when running a PrivateLoadZone test, k6-operator will wait only for a timeout period (10 minutes at the moment). When the timeout period is up, the test will be aborted by k6 Cloud and marked as such both in PrivateLoadZone and in GCk6. In other words, there is a time limit to fix this issue without restarting the test run. \ No newline at end of file +The problem is similar to [insufficient resources in general case](#insufficient-resources). But when running a PrivateLoadZone test, k6 Operator will wait only for a timeout period (10 minutes at the moment). When the timeout period is up, the test will be aborted by k6 Cloud and marked as such both in PrivateLoadZone and in GCk6. In other words, there is a time limit to fix this issue without restarting the test run. diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md index 299649f874..cb47c83b3e 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md @@ -1,8 +1,8 @@ --- weight: 200 -title: Upgrade k6-operator +title: Upgrade k6 Operator --- -# Upgrade k6-operator +# Upgrade k6 Operator diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/common-options.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/common-options.md index 8be3aef9c5..fe92f5c9da 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/common-options.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/common-options.md @@ -11,7 +11,7 @@ The only options that are required as part of the `TestRun` CRD spec are `script ## Parallelism -`parallelism` defines how many instances of k6 runners you want to create. Each instance is assigned an equal execution segment. For instance, if your test script is configured to run 200 VUs and `parallelism` is set to 4, the k6-operator creates four k6 jobs, each running 50 VUs to achieve the desired VU count. +`parallelism` defines how many instances of k6 runners you want to create. Each instance is assigned an equal execution segment. For instance, if your test script is configured to run 200 VUs and `parallelism` is set to 4, the k6 Operator creates four k6 jobs, each running 50 VUs to achieve the desired VU count. ## Separate diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md index 16eceffa41..d6bbcb7523 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md @@ -148,7 +148,7 @@ spec: ## Run tests -Tests are executed by applying the custom resource `TestRun` to a cluster where the k6-operator is running. Additional optional properties of the `TestRun` CRD allow you to control some key aspects of a distributed execution. For example: +Tests are executed by applying the custom resource `TestRun` to a cluster where the k6 Operator is running. Additional optional properties of the `TestRun` CRD allow you to control some key aspects of a distributed execution. For example: ```yaml # k6-resource.yml @@ -216,4 +216,4 @@ spec: cleanup: 'post' ``` -With the `cleanup` option set, k6-operator removes the `TestRun` CRD and all created resources once the test run ends. +With the `cleanup` option set, k6 Operator removes the `TestRun` CRD and all created resources once the test run ends. diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md index d3520a81a7..8abaabc920 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md @@ -1,9 +1,9 @@ --- weight: 200 -title: Use k6-operator with k6 extensions +title: Use k6 Operator with k6 extensions --- -# Use k6-operator with k6 extensions +# Use k6 Operator with k6 extensions By default, the k6 operator uses `grafana/k6:latest`, or the latest version of k6, as the container image for the test jobs. @@ -59,46 +59,3 @@ spec: ``` Note that this examples overrides the default image with `k6-extended:latest`, and it includes environment variables that are required by the `xk6-output-influxdb` extension. - -## Output to Grafana Cloud k6 - -With k6, you can send the [output from a test run to Grafana Cloud k6](https://grafana.com/docs/k6//results-output/real-time/cloud) with the `k6 run --out cloud script.js` command. This feature is also available in k6-operator, if you have a Grafana Cloud account. Note that it supports only `parallelism: 20` or less. - -To use this option in k6-operator, set the argument in YAML: - -```yaml -# ... -script: - configMap: - name: '' -arguments: --out cloud -# ... -``` - -Then, if you installed operator with bundle or Helm, create a secret with the following command: - -```bash -kubectl -n k6-operator-system create secret generic my-cloud-token \ - --from-literal=token= && kubectl -n k6-operator-system label secret my-cloud-token "k6cloud=token" -``` - -Alternatively, if you installed operator with Makefile, you can uncomment the cloud output section in `config/default/kustomization.yaml` and copy your token from Grafana Cloud k6 there: - -```yaml -# Uncomment this section if you need cloud output and copy-paste your token -secretGenerator: - - name: cloud-token - literals: - - token= - options: - annotations: - kubernetes.io/service-account.name: k6-operator-controller - labels: - k6cloud: token -``` - -After updating the file, run `make deploy`. - -After these steps, you can run k6 with the cloud output and default values of `projectID` and `name`. - -Refer to [Cloud options](https://grafana.com/docs/grafana-cloud/testing/k6/author-run/cloud-scripting-extras/cloud-options/#cloud-options) for details on how to change the `projectID` and `name` options. diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/scheduling-tests.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/scheduling-tests.md index bffbe9d993..02fc7503a5 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/scheduling-tests.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/scheduling-tests.md @@ -5,7 +5,7 @@ title: Schedule k6 tests # Schedule k6 tests -While the k6-operator doesn't support scheduling k6 tests directly, you can schedule tests with the `CronJob` object from Kubernetes directly. The `CronJob` would run on a schedule and execute the creation and deletion of the `TestRun` object. +While the k6 Operator doesn't support scheduling k6 tests directly, you can schedule tests with the `CronJob` object from Kubernetes directly. The `CronJob` would run on a schedule and execute the creation and deletion of the `TestRun` object. Running these tests requires a little more setup than a standalone test run. From b8b8104fe319c6fdbad943946ac0101cfd8bff6b Mon Sep 17 00:00:00 2001 From: Heitor Tashiro Sergent Date: Mon, 1 Jul 2024 16:00:18 -0500 Subject: [PATCH 16/23] chore: add uninstall instructions --- .../set-up-distributed-k6/install-k6-operator.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md index 82cc822a04..3fa52941f9 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md @@ -98,3 +98,17 @@ spec: value: 'some-ns' # ... ``` + +## Uninstall k6 Operator + +You can remove all of the resources created by the k6 Operator with `bundle`: + +```bash +curl https://raw.githubusercontent.com/grafana/k6-operator/main/bundle.yaml | kubectl delete -f - +``` + +Or with the `make` command: + +```bash +make delete +``` From 1f999adfe08a6f367dd17e620e57ff480bc2c823 Mon Sep 17 00:00:00 2001 From: Heitor Tashiro Sergent Date: Mon, 1 Jul 2024 16:01:05 -0500 Subject: [PATCH 17/23] chore: hide Upgrade k6 Operator page --- .../v0.50.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md index cb47c83b3e..2a46ef392e 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md @@ -1,6 +1,8 @@ --- weight: 200 title: Upgrade k6 Operator +_build: + list: false --- # Upgrade k6 Operator From c77a6298428f53010f7662bfb7845861577e71d8 Mon Sep 17 00:00:00 2001 From: Heitor Tashiro Sergent Date: Mon, 1 Jul 2024 16:16:09 -0500 Subject: [PATCH 18/23] chore: add Use the k6 operator with Grafana Cloud k6 page --- .../usage/k6-operator-to-gck6.md | 73 +++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/k6-operator-to-gck6.md diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/k6-operator-to-gck6.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/k6-operator-to-gck6.md new file mode 100644 index 0000000000..91f37925d4 --- /dev/null +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/k6-operator-to-gck6.md @@ -0,0 +1,73 @@ +--- +weight: 250 +title: Use the k6 Operator with Grafana Cloud k6 +--- + +# Use the k6 Operator with Grafana Cloud k6 + +Grafana Cloud k6 is the Grafana Cloud offering of k6, which gives you access to all of k6 capabilities, while Grafana handles the infrastructure, storage, and metrics aggregation and insights from your tests. + +When using the k6 Operator, you can still leverage Grafana Cloud k6 to get access to the metric analysis that the platform offers. + +There are two ways to use the k6 Operator with Grafana Cloud k6: Private Load Zones and Cloud output. + +## Before you begin + +To use the k6 Operator with Grafana Cloud k6, you’ll need: + +- A [Grafana Cloud account](https://grafana.com/auth/sign-up/create-user). + +## Private Load Zones + +Private Load Zones (PLZ) are load zones that you can host inside your network by using the k6 Operator. You can start a cloud test in a PLZ by referencing it by name from your script, and the test will run in the nodes of your Kubernetes cluster. + +Refer to [Set up private load zones](https://grafana.com/docs/grafana-cloud/testing/k6/author-run/private-load-zone-v2/) for more details. + +## Cloud output + +With k6, you can send the [output from a test run to Grafana Cloud k6](https://grafana.com/docs/k6//results-output/real-time/cloud) with the `k6 run --out cloud script.js` command. This feature is also available in the k6 Operator if you have a Grafana Cloud account. + +{{< admonition type="note" >}} + +The cloud output option only supports a `parallelism` value of 20 or less. + +{{< /admonition >}} + +To use this option in k6 Operator, set the argument in YAML: + +```yaml +# ... +script: + configMap: + name: '' +arguments: --out cloud +# ... +``` + +Then, if you installed operator with bundle or Helm, create a secret with the following command: + +```bash +kubectl -n k6-operator-system create secret generic my-cloud-token \ + --from-literal=token= && kubectl -n k6-operator-system label secret my-cloud-token "k6cloud=token" +``` + +Alternatively, if you installed operator with a Makefile, you can uncomment the cloud output section in `config/default/kustomization.yaml` and copy your token from Grafana Cloud k6 there: + +```yaml +# Uncomment this section if you need cloud output and copy-paste your token +secretGenerator: + - name: cloud-token + literals: + - token= + options: + annotations: + kubernetes.io/service-account.name: k6-operator-controller + labels: + k6cloud: token +``` + +After updating the file, run `make deploy`. + +After these steps, you can run k6 with the cloud output and default values of `projectID` and `name`. + +Refer to [Cloud options](https://grafana.com/docs/grafana-cloud/testing/k6/author-run/cloud-scripting-extras/cloud-options/#cloud-options) for details on how to change the `projectID` and `name` options. From 1d27807dce5a6160d27c38f487cb2f64dc39fbc5 Mon Sep 17 00:00:00 2001 From: Heitor Tashiro Sergent Date: Mon, 1 Jul 2024 17:04:53 -0500 Subject: [PATCH 19/23] chore: review troubleshooting doc --- .../set-up-distributed-k6/troubleshooting.md | 127 +++++++++--------- 1 file changed, 65 insertions(+), 62 deletions(-) diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md index 27aef9c6b4..a8bf60698b 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md @@ -5,29 +5,25 @@ title: Troubleshooting # Troubleshooting -Just as any Kubernetes application, k6 Operator can get into error scenarios which are sometimes a result of a misconfigured test or setup. This document is meant to help troubleshoot such scenarios quicker. +This topic includes instructions to help you troubleshoot common issues with the k6 Operator. ## Common tricks -### Preparation +### Test your script locally -{{% admonition type="warning" %}} - -Before trying to run a script with k6 Operator, be it via `TestRun` or via `PrivateLoadZone`, always run it locally: +Always run your script locally before trying to run it with the k6 Operator: ```bash k6 run script.js ``` -{{% /admonition %}} - -If there are going to be environment variables or CLI options, pass them in as well: +If you're using environment variables or CLI options, pass them in as well: ```bash MY_ENV_VAR=foo k6 run script.js --tag my_tag=bar ``` -This ensures that the script has correct syntax and can be parsed with k6 in the first place. Additionally, running locally will make it obvious if the configured options are doing what is expected. If there are any errors or unexpected results in the output of `k6 run`, make sure to fix those prior to deploying the script elsewhere. +That ensures that the script has correct syntax and can be parsed with k6 in the first place. Additionally, running locally can help you check if the configured options are doing what you expect. If there are any errors or unexpected results in the output of `k6 run`, make sure to fix those prior to deploying the script elsewhere. ### `TestRun` deployment @@ -35,18 +31,21 @@ This ensures that the script has correct syntax and can be parsed with k6 in the In case of one `TestRun` Custom Resource (CR) creation with `parallelism: n`, there are certain repeating patterns: -1. There will be `n + 2` Jobs (with corresponding Pods) created: initializer, starter, `n` runners. -1. If any of these Jobs did not result in a Pod being deployed, there must be an issue with that Job. Some commands that can help here: +1. There will be `n + 2` Jobs (with corresponding Pods) created: initializer, starter, and `n` runners. +1. If any of these Jobs didn't result in a Pod being deployed, there must be an issue with that Job. Some commands that can help here: + ```bash kubectl get jobs -A kubectl describe job mytest-initializer ``` -1. If one of the Pods was deployed but finished with `Error`, it makes sense to check its logs: + +1. If one of the Pods was deployed but finished with `Error`, you can check its logs with the following command: + ```bash kubectl logs mytest-initializer-xxxxx ``` -If the Pods seem to be working but not producing an expected result and there's not enough information in the logs of the Pods, it might make sense to turn on k6 [verbose option](https://grafana.com/docs/k6/latest/using-k6/k6-options/#options) in `TestRun` spec: +If the Pods seem to be working but not producing an expected result and there's not enough information in the logs, you can use the k6 [verbose option](https://grafana.com/docs/k6//using-k6/k6-options/#options) in the `TestRun` spec: ```yaml apiVersion: k6.io/v1alpha1 @@ -64,7 +63,7 @@ spec: #### k6 Operator -Another source of info is k6 Operator itself. It is deployed as a Kubernetes `Deployment`, with `replicas: 1` by default, and its logs together with observations about the Pods from [previous subsection](#the-pods) usually contain enough information to glean correct diagnosis. With the standard deployment, the logs of k6 Operator can be checked with: +Another source of info is the k6 Operator itself. It's deployed as a Kubernetes `Deployment`, with `replicas: 1` by default, and its logs together with observations about the Pods from the previous section usually contain enough information to help you diagnose any issues. With the standard deployment, the logs of the k6 Operator can be checked with: ```bash kubectl -n k6-operator-system -c manager logs k6-operator-controller-manager-xxxxxxxx-xxxxx @@ -72,7 +71,7 @@ kubectl -n k6-operator-system -c manager logs k6-operator-controller-manager-xxx #### Inspect `TestRun` resource -One `TestRun` CR is deployed, it can be inspected the same way as any other resource: +After you deploy a `TestRun` CR, you can inspect it the same way as any other resource: ```bash kubectl describe testrun my-testrun @@ -106,35 +105,35 @@ Status: Stage: error ``` -If `Stage` is equal to `error` then it definitely makes sense to check the logs of k6 Operator. +If `Stage` is equal to `error`, you can check the logs of k6 Operator. -Conditions can be used as a source of info as well, but it is a more advanced troubleshooting option that should be used if previous suggestions are insufficient. Note, that conditions that start with `Cloud` prefix matter only in the setting of k6 Cloud test runs, i.e. cloud output and PLZ test runs. +Conditions can be used as a source of info as well, but it's a more advanced troubleshooting option that should be used if the previous steps weren't enough to diagnose the issue. Note that conditions that start with the `Cloud` prefix only matter in the setting of k6 Cloud test runs, for example, for cloud output and PLZ test runs. ### `PrivateLoadZone` deployment -If `PrivateLoadZone` CR was successfully created in Kubernetes, it should become visible in your account in Grafana Cloud k6 (GCk6) interface soon afterwards. If it doesn't appear in the UI, then there is likely a problem to troubleshoot. +If the `PrivateLoadZone` CR was successfully created in Kubernetes, it should become visible in your account in Grafana Cloud k6 (GCk6) interface soon afterwards. If it doesn't appear in the UI, then there is likely a problem to troubleshoot. -Firstly, go over the [guide](https://grafana.com/docs/grafana-cloud/k6/author-run/private-load-zone-v2/) to double-check if all the steps have been done correctly and successfully. +First, go over the [guide](https://grafana.com/docs/grafana-cloud/k6/author-run/private-load-zone-v2/) to double-check if all the steps have been done correctly and successfully. -Unlike `TestRun` deployment, when `PrivateLoadZone` is first created, there are no additional resources deployed. So the only source for troubleshooting are the logs of k6 Operator. See the [above subsection](#k6-operator) on how to access its logs. Any errors there might be a hint to what is wrong. See [below](#privateloadzone-subscription-error) for some potential errors explained in more detail. +Unlike `TestRun` deployment, when a `PrivateLoadZone` is first created, there are no additional resources deployed. So, the only source for troubleshooting are the logs of k6 Operator. See the [previous subsection](#k6-operator) on how to access its logs. Any errors there might be a hint to diagnose the issue. Refer to [PrivateLoadZone: subscription error](#privateloadzone-subscription-error) for more details. ### Running tests in `PrivateLoadZone` -Each time a user runs a test in a PLZ, for example with `k6 cloud script.js`, there is a corresponding `TestRun` being deployed by k6 Operator. This `TestRun` will be deployed in the same namespace as its `PrivateLoadZone`. If such test is misbehaving (errors out, does not produce expected result, etc.), then one should check: +Each time a user runs a test in a PLZ, for example with `k6 cloud script.js`, there is a corresponding `TestRun` being deployed by the k6 Operator. This `TestRun` will be deployed in the same namespace as its `PrivateLoadZone`. If the test is misbehaving, for example, it errors out, or doesn't produce the expected result, then you can check: -1. if there are any messages in GCk6 UI -2. if there are any messages in the output of `k6 cloud` command -3. the resources and their logs, the same way as with [standalone `TestRun` deployment](#testrun-deployment) +1. If there are any messages in the GCk6 UI. +2. If there are any messages in the output of the `k6 cloud` command. +3. The resources and their logs, the same way as with a [standalone `TestRun` deployment](#testrun-deployment) ## Common scenarios -### Where are my env vars... +### Issues with environment variables -Some tricky cases with environment variables are described in [this doc](https://github.com/grafana/k6-operator/blob/main/docs/env-vars.md). +Refer to [Environment variables](https://github.com/grafana/k6-operator/blob/main/docs/env-vars.md) for details on how to pass environment variables to the k6 Operator. -### Tags are not working?! +### Tags not working -Currently, tags are a rather common source of frustration in usage of k6 Operator. For example: +Tags are a rather common source of errors when using the k6 Operator. For example, the following tags would lead to parsing errors: ```yaml arguments: --tag product_id="Test A" @@ -142,33 +141,33 @@ Currently, tags are a rather common source of frustration in usage of k6 Operato arguments: --tag foo=\"bar\" ``` -Passing the above leads to parsing errors which can be seen in the logs of either initializer or runner Pod, e.g.: +You can see those errors in the logs of either the initializer or the runner Pod, for example: ```bash time="2024-01-11T11:11:27Z" level=error msg="invalid argument \"product_id=\\\"Test\" for \"--tag\" flag: parse error on line 1, column 12: bare \" in non-quoted-field" ``` -This is a standard problem with escaping the characters, and there's even an [issue](https://github.com/grafana/k6-operator/issues/211) that can be upvoted. +This is a common problem with escaping the characters. You can find an [issue](https://github.com/grafana/k6-operator/issues/211) in the k6 Operator repository that can be upvoted. ### Initializer logs an error but it's not about tags -Often, this happens because of lack of attention to the [preparation](#preparation) step. One more command that can be tried here is to run the following: +This can happen because of lack of attention to the [preparation](#preparation) step. One command that you can use to help diagnose issues with your script is the following: ```bash k6 inspect --execution-requirements script.js ``` -This command is a shortened version of what initializer Pod is executing. If the above command produces an error, it is definitely a problem with the script and should be first solved outside of k6 Operator. The error itself may contain a hint to what is wrong, for instance a syntax error. +That command is a shortened version of what the initializer Pod is executing. If the command produces an error, there's a problem with the script itself and it should be solved outside of the k6 Operator. The error itself may contain a hint to what's wrong, such as a syntax error. -If standalone `k6 inspect --execution-requirements` executes successfully, then it's likely a problem with `TestRun` deployment specific to your Kubernetes setup. Recommendations here: +If the standalone `k6 inspect --execution-requirements` executes successfully, then it's likely a problem with `TestRun` deployment specific to your Kubernetes setup. A couple of recommendations here are: -- read carefully the output in initializer Pod: is it logged by k6 process or by something else? - - :information_source: k6 Operator expects initializer logs to contain only the output of `k6 inspect`. If there's any other log line present, then k6 Operator will fail to parse it and the test will not start. ([issue](https://github.com/grafana/k6-operator/issues/193)) -- check events in initializer Job and Pod as they may contain another hint about what is wrong +- Review the output of the initializer Pod: is it logged by the k6 process or by something else? + - :information_source: k6 Operator expects the initializer logs to contain only the output of `k6 inspect`. If there are any other log lines present, then the k6 Operator will fail to parse it and the test won't start. Refer to this [issue](https://github.com/grafana/k6-operator/issues/193) for more details. +- Check events in the initializer Job and Pod as they may contain another hint about what's wrong. ### Non-existent ServiceAccount -ServiceAccount can be defined as `serviceAccountName` and `runner.serviceAccountName` in PrivateLoadZone and TestRun CRD respectfully. If the specified ServiceAccount does not exist, k6 Operator will successfully create Jobs but corresponding Pods will fail to be deployed, and k6 Operator will wait indefinitely for Pods to be `Ready`. This error can be best seen in the events of the Job: +A ServiceAccount can be defined as `serviceAccountName` in a PrivateLoadZone, and as `runner.serviceAccountName` in a TestRun CRD. If the specified ServiceAccount doesn't exist, k6 Operator will successfully create Jobs but corresponding Pods will fail to be deployed, and the k6 Operator will wait indefinitely for Pods to be `Ready`. This error can be best seen in the events of the Job: ```bash kubectl describe job plz-test-xxxxxx-initializer @@ -177,15 +176,17 @@ Events: Warning FailedCreate 57s (x4 over 2m7s) job-controller Error creating: pods "plz-test-xxxxxx-initializer-" is forbidden: error looking up service account plz-ns/plz-sa: serviceaccount "plz-sa" not found ``` -Currently, k6 Operator does not try to analyze such scenarios on its own but we have an [issue](https://github.com/grafana/k6-operator/issues/260) for improvement. +k6 Operator doesn't try to analyze such scenarios on its own, but you can refer to the following [issue](https://github.com/grafana/k6-operator/issues/260) for improvements. + +#### How to fix -How to fix: incorrect `serviceAccountName` must be corrected and TestRun or PrivateLoadZone resource must be re-deployed. +To fix this issue, the incorrect `serviceAccountName` must be corrected, and the TestRun or PrivateLoadZone resource must be re-deployed. ### Non-existent `nodeSelector` -`nodeSelector` can be defined as `nodeSelector` and `runner.nodeSelector` in PrivateLoadZone and TestRun CRD respectfully. +`nodeSelector` can be defined as `nodeSelector` in a PrivateLoadZone, and as `runner.nodeSelector` in the TestRun CRD. -This case is very similar to [ServiceAccount one](#non-existent-serviceaccount): the Pod creation will fail, only the error would be somewhat different: +This case is very similar to the [ServiceAccount](#non-existent-serviceaccount): the Pod creation will fail, but the error is slightly different: ```bash kubectl describe pod plz-test-xxxxxx-initializer-xxxxx @@ -194,13 +195,15 @@ Events: Warning FailedScheduling 48s (x5 over 4m6s) default-scheduler 0/1 nodes are available: 1 node(s) didn't match Pod's node affinity/selector. ``` -How to fix: incorrect `nodeSelector` must be corrected and TestRun or PrivateLoadZone resource must be re-deployed. +#### How to fix + +To fix this issue, the incorrect `nodeSelector` must be corrected and the TestRun or PrivateLoadZone resource must be re-deployed. ### Insufficient resources -A related problem can happen when the cluster does not have sufficient resources to deploy the runners. There is a higher probability of hitting this issue when setting small CPU and memory limits for runners or using options like `nodeSelector`, `runner.affinity` or `runner.topologySpreadConstraints`, and not having a set of nodes matching the spec. Alternatively, it can happen if there is a high number of runners required for the test (via `parallelism` in TestRun or during PLZ test run) and autoscaling of the cluster has limits on maximum number of nodes and cannot provide the required resources on time or at all. +A related problem can happen when the cluster does not have sufficient resources to deploy the runners. There's a higher probability of hitting this issue when setting small CPU and memory limits for runners or using options like `nodeSelector`, `runner.affinity` or `runner.topologySpreadConstraints`, and not having a set of nodes matching the spec. Alternatively, it can happen if there is a high number of runners required for the test (via `parallelism` in TestRun or during PLZ test run) and autoscaling of the cluster has limits on the maximum number of nodes, and can't provide the required resources on time or at all. -This case is somewhat similar to the previous two: the k6 Operator will wait indefinitely and can be monitored with events in Jobs and Pods. If it is possible to fix the issue with insufficient resources on-the-fly, e.g. by adding more nodes, k6 Operator will attempt to continue executing a test run. +This case is somewhat similar to the previous two: the k6 Operator will wait indefinitely and can be monitored with events in Jobs and Pods. If it's possible to fix the issue with insufficient resources on-the-fly, for example, by adding more nodes, k6 Operator will attempt to continue executing a test run. ### OOM of a runner Pod @@ -212,37 +215,37 @@ kubectl -f my-test.yaml delete kubectl delete testrun my-test ``` -In case of OOM, it makes sense to review k6 script to understand what kind of resource usage this script requires. It may be that the k6 script can be improved to be more performant. Then, set `spec.runner.resources` in TestRun CRD or `spec.resources` in PrivateLoadZone CRD accordingly. +In case of OOM, it makes sense to review the k6 script to understand what kind of resource usage this script requires. It may be that the k6 script can be improved to be more performant. Then, set the `spec.runner.resources` in the TestRun CRD, or `spec.resources` in the PrivateLoadZone CRD accordingly. ### PrivateLoadZone: subscription error -If there's something off with your k6 Cloud subscription, there will be a 400 error in the logs with the message detailing the problem. For example: +If there's an issue with your Grafana Cloud k6 subscription, there will be a 400 error in the logs with the message detailing the problem. For example: ```bash "Received error `(400) You have reached the maximum Number of private load zones your organization is allowed to have. Please contact support if you want to create more.`. Message from server ``" ``` -The most likely course of action in this case is either to check your organization settings in GCk6 or to contact k6 Cloud support. +To fix this issue, check your organization settings in Grafana Cloud k6 or contact Support. -### PrivateLoadZone: wrong token +### PrivateLoadZone: Wrong token -There can be two major problems with the token. +There can be two major problems with the authentication token: -1. If token was not created or was created in a wrong location, there will be the following in the logs: +1. If the token wasn't created, or was created in a wrong location, the logs will show the following error: -```bash -Failed to load k6 Cloud token {"namespace": "plz-ns", "name": "my-plz", "reconcileID": "67c8bc73-f45b-4c7f-a9ad-4fd0ffb4d5f6", "name": "token-with-wrong-name", "secretNamespace": "plz-ns", "error": "Secret \"token-with-wrong-name\" not found"} -``` + ```bash + Failed to load k6 Cloud token {"namespace": "plz-ns", "name": "my-plz", "reconcileID": "67c8bc73-f45b-4c7f-a9ad-4fd0ffb4d5f6", "name": "token-with-wrong-name", "secretNamespace": "plz-ns", "error": "Secret \"token-with-wrong-name\" not found"} + ``` -2. If token contains a corrupted value or it's not an organizational token, there will be the following error in the logs: +2. If the token contains a corrupted value, or it's not an organizational token, the logs will show the following error: -```bash -"Received error `(403) Authentication token incorrect or expired`. Message from server ``" -``` + ```bash + "Received error `(403) Authentication token incorrect or expired`. Message from server ``" + ``` -### PrivateLoadZone: networking setup +### PrivateLoadZone: Networking setup -If you see any dial or connection errors in the logs of k6 Operator, it makes sense to double-check the networking setup. For PrivateLoadZone to operate, outbound traffic to k6 Cloud [must be allowed](https://grafana.com/docs/grafana-cloud/k6/author-run/private-load-zone-v2/#before-you-begin). The basic way to check the reachability of k6 Cloud endpoints: +If you see any dial or connection errors in the logs of the k6 Operator, it makes sense to double-check the networking setup. For a PrivateLoadZone to operate, outbound traffic to Grafana Cloud k6 [must be allowed](https://grafana.com/docs/grafana-cloud/k6/author-run/private-load-zone-v2/#before-you-begin). To check the reachability of Grafana Cloud k6 endpoints: ```bash kubectl apply -f https://k8s.io/examples/admin/dns/dnsutils.yaml @@ -250,8 +253,8 @@ kubectl exec -it dnsutils -- nslookup ingest.k6.io kubectl exec -it dnsutils -- nslookup api.k6.io ``` -For more resources on troubleshooting networking, see Kubernetes [official docs](https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/). +For more resources on troubleshooting networking, refer to the [Kubernetes docs](https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/). -### PrivateLoadZone: insufficient resources +### PrivateLoadZone: Insufficient resources -The problem is similar to [insufficient resources in general case](#insufficient-resources). But when running a PrivateLoadZone test, k6 Operator will wait only for a timeout period (10 minutes at the moment). When the timeout period is up, the test will be aborted by k6 Cloud and marked as such both in PrivateLoadZone and in GCk6. In other words, there is a time limit to fix this issue without restarting the test run. +The PrivateLoadZone insufficient resources problem is similar to [insufficient resources issue](#insufficient-resources). But, when running a PrivateLoadZone test, the k6 Operator will wait only for a timeout period. When the timeout period is up, the test will be aborted by Grafana Cloud k6 and marked as such, both in the PrivateLoadZone and in Grafana Cloud k6. In other words, there is a time limit to fix this issue without restarting the test run. From bc802924c774027f43dad0c686155b2676485fa3 Mon Sep 17 00:00:00 2001 From: Heitor Tashiro Sergent Date: Mon, 1 Jul 2024 17:09:43 -0500 Subject: [PATCH 20/23] chore: update Namespaced deployment heading to Watch namespace --- .../set-up/set-up-distributed-k6/install-k6-operator.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md index 3fa52941f9..315ec003ca 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md @@ -77,9 +77,9 @@ The `K6` CRD has been replaced by the `TestRun` CRD and will be deprecated in th {{< /admonition >}} -## Namespaced deployment +## Watch namespace -By default, the k6 Operator watches `TestRun` and `PrivateLoadZone` custom resources in all namespaces. You can also configure the k6 Operator to watch a specific namespace by setting the `WATCH_NAMESPACE` environment variable for the operator's deployment: +By default, the k6 Operator watches the `TestRun` and `PrivateLoadZone` custom resources in all namespaces. You can also configure the k6 Operator to watch a specific namespace by setting the `WATCH_NAMESPACE` environment variable for the operator's deployment: ```yaml apiVersion: apps/v1 From 3f08b761df99524f166dc33024eb82b9e4376687 Mon Sep 17 00:00:00 2001 From: Heitor Tashiro Sergent Date: Fri, 19 Jul 2024 15:11:42 -0500 Subject: [PATCH 21/23] Apply suggestions from code review Co-authored-by: Olha Yevtushenko --- .../set-up/set-up-distributed-k6/usage/common-options.md | 7 +++++-- .../usage/executing-k6-scripts-with-testrun-crd.md | 2 +- .../set-up/set-up-distributed-k6/usage/extensions.md | 2 +- .../set-up-distributed-k6/usage/k6-operator-to-gck6.md | 2 +- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/common-options.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/common-options.md index fe92f5c9da..43d95d8625 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/common-options.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/common-options.md @@ -15,8 +15,7 @@ The only options that are required as part of the `TestRun` CRD spec are `script ## Separate -`separate: true` indicates that the jobs created need to be distributed across different nodes. This is useful if you're running a -test with a really high VU count and want to make sure the resources of each node won't become a bottleneck. +`separate: true` indicates that the jobs created need to be distributed across different nodes. This is useful if you're running a test with a really high VU count and want to make sure the resources of each node won't become a bottleneck. ## Service account @@ -52,3 +51,7 @@ Defines options for the starter pod. The non-exhaustive list includes: - Passing in a custom image. - Passing in labels and annotations. + +## Initializer + +By default, the initializer Job is defined with the same options as the runner Jobs, but its options can be overwritten by setting `.spec.initializer`. diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md index d6bbcb7523..64a7c626cb 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md @@ -5,7 +5,7 @@ title: Run k6 scripts with TestRun CRD # Run k6 scripts with TestRun CRD -This guide covers how you can configure your k6 scripts to run using the k6 operator. +This guide covers how you can configure your k6 scripts to run using the k6 Operator. ## Defining test scripts diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md index 8abaabc920..e654032755 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md @@ -5,7 +5,7 @@ title: Use k6 Operator with k6 extensions # Use k6 Operator with k6 extensions -By default, the k6 operator uses `grafana/k6:latest`, or the latest version of k6, as the container image for the test jobs. +By default, the k6 Operator uses `ghcr.io/grafana/k6-operator:latest-runner` as the container image for the test jobs. If you want to use k6 [extensions](https://grafana.com/docs/k6//extensions/) built with [xk6](https://github.com/grafana/xk6), you'll need to create your own image and override the `image` property on the `TestRun` Kubernetes resource. diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/k6-operator-to-gck6.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/k6-operator-to-gck6.md index 91f37925d4..600decab73 100644 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/k6-operator-to-gck6.md +++ b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/k6-operator-to-gck6.md @@ -7,7 +7,7 @@ title: Use the k6 Operator with Grafana Cloud k6 Grafana Cloud k6 is the Grafana Cloud offering of k6, which gives you access to all of k6 capabilities, while Grafana handles the infrastructure, storage, and metrics aggregation and insights from your tests. -When using the k6 Operator, you can still leverage Grafana Cloud k6 to get access to the metric analysis that the platform offers. +When using the k6 Operator, you can still leverage Grafana Cloud k6 to get access to the metric storage and analysis that the platform offers. There are two ways to use the k6 Operator with Grafana Cloud k6: Private Load Zones and Cloud output. From ac060ef17d634f65dc67999cfce2a14852ed0a01 Mon Sep 17 00:00:00 2001 From: Heitor Tashiro Sergent Date: Fri, 19 Jul 2024 15:13:54 -0500 Subject: [PATCH 22/23] Move docs to next and v0.52.x folders --- .../set-up/set-up-distributed-k6/_index.md | 19 ++ .../install-k6-operator.md | 114 ++++++++ .../set-up-distributed-k6/troubleshooting.md | 260 ++++++++++++++++++ .../upgrade-k6-operator.md | 10 + .../set-up-distributed-k6/usage/_index.md | 10 + .../usage/common-options.md | 57 ++++ .../executing-k6-scripts-with-testrun-crd.md | 219 +++++++++++++++ .../set-up-distributed-k6/usage/extensions.md | 61 ++++ .../usage/k6-operator-to-gck6.md | 73 +++++ .../set-up-distributed-k6/usage/reference.md | 12 + .../usage/scheduling-tests.md | 106 +++++++ .../set-up/set-up-distributed-k6/_index.md | 19 ++ .../install-k6-operator.md | 114 ++++++++ .../set-up-distributed-k6/troubleshooting.md | 260 ++++++++++++++++++ .../upgrade-k6-operator.md | 10 + .../set-up-distributed-k6/usage/_index.md | 10 + .../usage/common-options.md | 57 ++++ .../executing-k6-scripts-with-testrun-crd.md | 219 +++++++++++++++ .../set-up-distributed-k6/usage/extensions.md | 61 ++++ .../usage/k6-operator-to-gck6.md | 73 +++++ .../set-up-distributed-k6/usage/reference.md | 12 + .../usage/scheduling-tests.md | 106 +++++++ 22 files changed, 1882 insertions(+) create mode 100644 docs/sources/next/set-up/set-up-distributed-k6/_index.md create mode 100644 docs/sources/next/set-up/set-up-distributed-k6/install-k6-operator.md create mode 100644 docs/sources/next/set-up/set-up-distributed-k6/troubleshooting.md create mode 100644 docs/sources/next/set-up/set-up-distributed-k6/upgrade-k6-operator.md create mode 100644 docs/sources/next/set-up/set-up-distributed-k6/usage/_index.md create mode 100644 docs/sources/next/set-up/set-up-distributed-k6/usage/common-options.md create mode 100644 docs/sources/next/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md create mode 100644 docs/sources/next/set-up/set-up-distributed-k6/usage/extensions.md create mode 100644 docs/sources/next/set-up/set-up-distributed-k6/usage/k6-operator-to-gck6.md create mode 100644 docs/sources/next/set-up/set-up-distributed-k6/usage/reference.md create mode 100644 docs/sources/next/set-up/set-up-distributed-k6/usage/scheduling-tests.md create mode 100644 docs/sources/v0.52.x/set-up/set-up-distributed-k6/_index.md create mode 100644 docs/sources/v0.52.x/set-up/set-up-distributed-k6/install-k6-operator.md create mode 100644 docs/sources/v0.52.x/set-up/set-up-distributed-k6/troubleshooting.md create mode 100644 docs/sources/v0.52.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md create mode 100644 docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/_index.md create mode 100644 docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/common-options.md create mode 100644 docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md create mode 100644 docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/extensions.md create mode 100644 docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/k6-operator-to-gck6.md create mode 100644 docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/reference.md create mode 100644 docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/scheduling-tests.md diff --git a/docs/sources/next/set-up/set-up-distributed-k6/_index.md b/docs/sources/next/set-up/set-up-distributed-k6/_index.md new file mode 100644 index 0000000000..06687090b0 --- /dev/null +++ b/docs/sources/next/set-up/set-up-distributed-k6/_index.md @@ -0,0 +1,19 @@ +--- +weight: 150 +title: Set up distributed k6 +--- + +# Set up distributed k6 + +It's possible to run large load tests even when using a single node, or single machine. But, depending on your use case, you might also want to run a distributed Grafana k6 test in your own infrastructure. + +A couple of reasons why you might want to do this: + +- You run your application in Kubernetes and would like k6 to be executed in the same fashion as all your other infrastructure components. +- You want to run your tests within your private network for security and/or privacy reasons. + +[k6 Operator](https://github.com/grafana/k6-operator) is a Kubernetes operator that you can use to run distributed k6 tests in your cluster. + +This section includes the following topics: + +{{< section depth=2 >}} diff --git a/docs/sources/next/set-up/set-up-distributed-k6/install-k6-operator.md b/docs/sources/next/set-up/set-up-distributed-k6/install-k6-operator.md new file mode 100644 index 0000000000..315ec003ca --- /dev/null +++ b/docs/sources/next/set-up/set-up-distributed-k6/install-k6-operator.md @@ -0,0 +1,114 @@ +--- +weight: 100 +title: Install k6 Operator +--- + +# Install k6 Operator + +This guide provides step-by-step instructions on how to install k6 Operator. + +## Before you begin + +To install k6 Operator, you'll need: + +- A Kubernetes cluster, along with access to it. +- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl). + +## Deploy the operator + +There are three different options that you can use to deploy the k6 Operator. + +### Deploy with bundle + +The easiest way to install the operator is with bundle: + +```bash +curl https://raw.githubusercontent.com/grafana/k6-operator/main/bundle.yaml | kubectl apply -f - +``` + +Bundle includes default manifests for k6 Operator, including a `k6-operator-system` namespace and k6 Operator deployment with the latest tagged Docker image. Customizations can be made on top of this manifest as needed, for example, with `kustomize`. + +### Deploy with Helm + +Helm releases of k6 Operator are published together with other Grafana Helm charts. You can install it with the following commands: + +```bash +helm repo add grafana https://grafana.github.io/helm-charts +helm repo update +helm install k6-operator grafana/k6-operator +``` + +You can also pass additional configuration options with a `values.yaml` file: + +```bash +helm install k6-operator grafana/k6-operator -f values.yaml +``` + +Refer to the [k6 Operator samples folder](https://github.com/grafana/k6-operator/blob/main/charts/k6-operator/samples/customAnnotationsAndLabels.yaml) for an example file. + +You can find a complete list of Helm options in the [k6 Operator charts folder](https://github.com/grafana/k6-operator/blob/main/charts/k6-operator/README.md). + +### Deploy with Makefile + +In order to install the operator with a Makefile, you'll need: + +- [go](https://go.dev/doc/install) +- [kustomize](https://kubectl.docs.kubernetes.io/installation/kustomize/) + +A more manual, low-level way to install the k6 operator is by running the command below: + +```bash +make deploy +``` + +This method may be more useful for development of the k6 Operator, depending on specifics of the setup. + +## Install the CRD + +The k6 Operator includes custom resources called `TestRun`, `PrivateLoadZone`, and `K6`. They're automatically installed when you do a deployment or install a bundle, but you can also manually install them by running: + +```bash +make install +``` + +{{< admonition type="warning" >}} + +The `K6` CRD has been replaced by the `TestRun` CRD and will be deprecated in the future. We recommend using the `TestRun` CRD. + +{{< /admonition >}} + +## Watch namespace + +By default, the k6 Operator watches the `TestRun` and `PrivateLoadZone` custom resources in all namespaces. You can also configure the k6 Operator to watch a specific namespace by setting the `WATCH_NAMESPACE` environment variable for the operator's deployment: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: k6-operator-controller-manager + namespace: k6-operator-system +spec: + template: + spec: + containers: + - name: manager + image: ghcr.io/grafana/k6-operator:controller-v0.0.14 + env: + - name: WATCH_NAMESPACE + value: 'some-ns' +# ... +``` + +## Uninstall k6 Operator + +You can remove all of the resources created by the k6 Operator with `bundle`: + +```bash +curl https://raw.githubusercontent.com/grafana/k6-operator/main/bundle.yaml | kubectl delete -f - +``` + +Or with the `make` command: + +```bash +make delete +``` diff --git a/docs/sources/next/set-up/set-up-distributed-k6/troubleshooting.md b/docs/sources/next/set-up/set-up-distributed-k6/troubleshooting.md new file mode 100644 index 0000000000..a8bf60698b --- /dev/null +++ b/docs/sources/next/set-up/set-up-distributed-k6/troubleshooting.md @@ -0,0 +1,260 @@ +--- +weight: 400 +title: Troubleshooting +--- + +# Troubleshooting + +This topic includes instructions to help you troubleshoot common issues with the k6 Operator. + +## Common tricks + +### Test your script locally + +Always run your script locally before trying to run it with the k6 Operator: + +```bash +k6 run script.js +``` + +If you're using environment variables or CLI options, pass them in as well: + +```bash +MY_ENV_VAR=foo k6 run script.js --tag my_tag=bar +``` + +That ensures that the script has correct syntax and can be parsed with k6 in the first place. Additionally, running locally can help you check if the configured options are doing what you expect. If there are any errors or unexpected results in the output of `k6 run`, make sure to fix those prior to deploying the script elsewhere. + +### `TestRun` deployment + +#### The pods + +In case of one `TestRun` Custom Resource (CR) creation with `parallelism: n`, there are certain repeating patterns: + +1. There will be `n + 2` Jobs (with corresponding Pods) created: initializer, starter, and `n` runners. +1. If any of these Jobs didn't result in a Pod being deployed, there must be an issue with that Job. Some commands that can help here: + + ```bash + kubectl get jobs -A + kubectl describe job mytest-initializer + ``` + +1. If one of the Pods was deployed but finished with `Error`, you can check its logs with the following command: + + ```bash + kubectl logs mytest-initializer-xxxxx + ``` + +If the Pods seem to be working but not producing an expected result and there's not enough information in the logs, you can use the k6 [verbose option](https://grafana.com/docs/k6//using-k6/k6-options/#options) in the `TestRun` spec: + +```yaml +apiVersion: k6.io/v1alpha1 +kind: TestRun +metadata: + name: k6-sample +spec: + parallelism: 2 + script: + configMap: + name: 'test' + file: 'test.js' + arguments: --verbose +``` + +#### k6 Operator + +Another source of info is the k6 Operator itself. It's deployed as a Kubernetes `Deployment`, with `replicas: 1` by default, and its logs together with observations about the Pods from the previous section usually contain enough information to help you diagnose any issues. With the standard deployment, the logs of the k6 Operator can be checked with: + +```bash +kubectl -n k6-operator-system -c manager logs k6-operator-controller-manager-xxxxxxxx-xxxxx +``` + +#### Inspect `TestRun` resource + +After you deploy a `TestRun` CR, you can inspect it the same way as any other resource: + +```bash +kubectl describe testrun my-testrun +``` + +Firstly, check if the spec is as expected. Then, see the current status: + +```yaml +Status: + Conditions: + Last Transition Time: 2024-01-17T10:30:01Z + Message: + Reason: CloudTestRunFalse + Status: False + Type: CloudTestRun + Last Transition Time: 2024-01-17T10:29:58Z + Message: + Reason: TestRunPreparation + Status: Unknown + Type: TestRunRunning + Last Transition Time: 2024-01-17T10:29:58Z + Message: + Reason: CloudTestRunAbortedFalse + Status: False + Type: CloudTestRunAborted + Last Transition Time: 2024-01-17T10:29:58Z + Message: + Reason: CloudPLZTestRunFalse + Status: False + Type: CloudPLZTestRun + Stage: error +``` + +If `Stage` is equal to `error`, you can check the logs of k6 Operator. + +Conditions can be used as a source of info as well, but it's a more advanced troubleshooting option that should be used if the previous steps weren't enough to diagnose the issue. Note that conditions that start with the `Cloud` prefix only matter in the setting of k6 Cloud test runs, for example, for cloud output and PLZ test runs. + +### `PrivateLoadZone` deployment + +If the `PrivateLoadZone` CR was successfully created in Kubernetes, it should become visible in your account in Grafana Cloud k6 (GCk6) interface soon afterwards. If it doesn't appear in the UI, then there is likely a problem to troubleshoot. + +First, go over the [guide](https://grafana.com/docs/grafana-cloud/k6/author-run/private-load-zone-v2/) to double-check if all the steps have been done correctly and successfully. + +Unlike `TestRun` deployment, when a `PrivateLoadZone` is first created, there are no additional resources deployed. So, the only source for troubleshooting are the logs of k6 Operator. See the [previous subsection](#k6-operator) on how to access its logs. Any errors there might be a hint to diagnose the issue. Refer to [PrivateLoadZone: subscription error](#privateloadzone-subscription-error) for more details. + +### Running tests in `PrivateLoadZone` + +Each time a user runs a test in a PLZ, for example with `k6 cloud script.js`, there is a corresponding `TestRun` being deployed by the k6 Operator. This `TestRun` will be deployed in the same namespace as its `PrivateLoadZone`. If the test is misbehaving, for example, it errors out, or doesn't produce the expected result, then you can check: + +1. If there are any messages in the GCk6 UI. +2. If there are any messages in the output of the `k6 cloud` command. +3. The resources and their logs, the same way as with a [standalone `TestRun` deployment](#testrun-deployment) + +## Common scenarios + +### Issues with environment variables + +Refer to [Environment variables](https://github.com/grafana/k6-operator/blob/main/docs/env-vars.md) for details on how to pass environment variables to the k6 Operator. + +### Tags not working + +Tags are a rather common source of errors when using the k6 Operator. For example, the following tags would lead to parsing errors: + +```yaml + arguments: --tag product_id="Test A" + # or + arguments: --tag foo=\"bar\" +``` + +You can see those errors in the logs of either the initializer or the runner Pod, for example: + +```bash +time="2024-01-11T11:11:27Z" level=error msg="invalid argument \"product_id=\\\"Test\" for \"--tag\" flag: parse error on line 1, column 12: bare \" in non-quoted-field" +``` + +This is a common problem with escaping the characters. You can find an [issue](https://github.com/grafana/k6-operator/issues/211) in the k6 Operator repository that can be upvoted. + +### Initializer logs an error but it's not about tags + +This can happen because of lack of attention to the [preparation](#preparation) step. One command that you can use to help diagnose issues with your script is the following: + +```bash +k6 inspect --execution-requirements script.js +``` + +That command is a shortened version of what the initializer Pod is executing. If the command produces an error, there's a problem with the script itself and it should be solved outside of the k6 Operator. The error itself may contain a hint to what's wrong, such as a syntax error. + +If the standalone `k6 inspect --execution-requirements` executes successfully, then it's likely a problem with `TestRun` deployment specific to your Kubernetes setup. A couple of recommendations here are: + +- Review the output of the initializer Pod: is it logged by the k6 process or by something else? + - :information_source: k6 Operator expects the initializer logs to contain only the output of `k6 inspect`. If there are any other log lines present, then the k6 Operator will fail to parse it and the test won't start. Refer to this [issue](https://github.com/grafana/k6-operator/issues/193) for more details. +- Check events in the initializer Job and Pod as they may contain another hint about what's wrong. + +### Non-existent ServiceAccount + +A ServiceAccount can be defined as `serviceAccountName` in a PrivateLoadZone, and as `runner.serviceAccountName` in a TestRun CRD. If the specified ServiceAccount doesn't exist, k6 Operator will successfully create Jobs but corresponding Pods will fail to be deployed, and the k6 Operator will wait indefinitely for Pods to be `Ready`. This error can be best seen in the events of the Job: + +```bash +kubectl describe job plz-test-xxxxxx-initializer +... +Events: + Warning FailedCreate 57s (x4 over 2m7s) job-controller Error creating: pods "plz-test-xxxxxx-initializer-" is forbidden: error looking up service account plz-ns/plz-sa: serviceaccount "plz-sa" not found +``` + +k6 Operator doesn't try to analyze such scenarios on its own, but you can refer to the following [issue](https://github.com/grafana/k6-operator/issues/260) for improvements. + +#### How to fix + +To fix this issue, the incorrect `serviceAccountName` must be corrected, and the TestRun or PrivateLoadZone resource must be re-deployed. + +### Non-existent `nodeSelector` + +`nodeSelector` can be defined as `nodeSelector` in a PrivateLoadZone, and as `runner.nodeSelector` in the TestRun CRD. + +This case is very similar to the [ServiceAccount](#non-existent-serviceaccount): the Pod creation will fail, but the error is slightly different: + +```bash +kubectl describe pod plz-test-xxxxxx-initializer-xxxxx +... +Events: + Warning FailedScheduling 48s (x5 over 4m6s) default-scheduler 0/1 nodes are available: 1 node(s) didn't match Pod's node affinity/selector. +``` + +#### How to fix + +To fix this issue, the incorrect `nodeSelector` must be corrected and the TestRun or PrivateLoadZone resource must be re-deployed. + +### Insufficient resources + +A related problem can happen when the cluster does not have sufficient resources to deploy the runners. There's a higher probability of hitting this issue when setting small CPU and memory limits for runners or using options like `nodeSelector`, `runner.affinity` or `runner.topologySpreadConstraints`, and not having a set of nodes matching the spec. Alternatively, it can happen if there is a high number of runners required for the test (via `parallelism` in TestRun or during PLZ test run) and autoscaling of the cluster has limits on the maximum number of nodes, and can't provide the required resources on time or at all. + +This case is somewhat similar to the previous two: the k6 Operator will wait indefinitely and can be monitored with events in Jobs and Pods. If it's possible to fix the issue with insufficient resources on-the-fly, for example, by adding more nodes, k6 Operator will attempt to continue executing a test run. + +### OOM of a runner Pod + +If there's at least one runner Pod that OOM-ed, the whole test will be [stuck](https://github.com/grafana/k6-operator/issues/251) and will have to be deleted manually: + +```bash +kubectl -f my-test.yaml delete +# or +kubectl delete testrun my-test +``` + +In case of OOM, it makes sense to review the k6 script to understand what kind of resource usage this script requires. It may be that the k6 script can be improved to be more performant. Then, set the `spec.runner.resources` in the TestRun CRD, or `spec.resources` in the PrivateLoadZone CRD accordingly. + +### PrivateLoadZone: subscription error + +If there's an issue with your Grafana Cloud k6 subscription, there will be a 400 error in the logs with the message detailing the problem. For example: + +```bash +"Received error `(400) You have reached the maximum Number of private load zones your organization is allowed to have. Please contact support if you want to create more.`. Message from server ``" +``` + +To fix this issue, check your organization settings in Grafana Cloud k6 or contact Support. + +### PrivateLoadZone: Wrong token + +There can be two major problems with the authentication token: + +1. If the token wasn't created, or was created in a wrong location, the logs will show the following error: + + ```bash + Failed to load k6 Cloud token {"namespace": "plz-ns", "name": "my-plz", "reconcileID": "67c8bc73-f45b-4c7f-a9ad-4fd0ffb4d5f6", "name": "token-with-wrong-name", "secretNamespace": "plz-ns", "error": "Secret \"token-with-wrong-name\" not found"} + ``` + +2. If the token contains a corrupted value, or it's not an organizational token, the logs will show the following error: + + ```bash + "Received error `(403) Authentication token incorrect or expired`. Message from server ``" + ``` + +### PrivateLoadZone: Networking setup + +If you see any dial or connection errors in the logs of the k6 Operator, it makes sense to double-check the networking setup. For a PrivateLoadZone to operate, outbound traffic to Grafana Cloud k6 [must be allowed](https://grafana.com/docs/grafana-cloud/k6/author-run/private-load-zone-v2/#before-you-begin). To check the reachability of Grafana Cloud k6 endpoints: + +```bash +kubectl apply -f https://k8s.io/examples/admin/dns/dnsutils.yaml +kubectl exec -it dnsutils -- nslookup ingest.k6.io +kubectl exec -it dnsutils -- nslookup api.k6.io +``` + +For more resources on troubleshooting networking, refer to the [Kubernetes docs](https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/). + +### PrivateLoadZone: Insufficient resources + +The PrivateLoadZone insufficient resources problem is similar to [insufficient resources issue](#insufficient-resources). But, when running a PrivateLoadZone test, the k6 Operator will wait only for a timeout period. When the timeout period is up, the test will be aborted by Grafana Cloud k6 and marked as such, both in the PrivateLoadZone and in Grafana Cloud k6. In other words, there is a time limit to fix this issue without restarting the test run. diff --git a/docs/sources/next/set-up/set-up-distributed-k6/upgrade-k6-operator.md b/docs/sources/next/set-up/set-up-distributed-k6/upgrade-k6-operator.md new file mode 100644 index 0000000000..2a46ef392e --- /dev/null +++ b/docs/sources/next/set-up/set-up-distributed-k6/upgrade-k6-operator.md @@ -0,0 +1,10 @@ +--- +weight: 200 +title: Upgrade k6 Operator +_build: + list: false +--- + +# Upgrade k6 Operator + + diff --git a/docs/sources/next/set-up/set-up-distributed-k6/usage/_index.md b/docs/sources/next/set-up/set-up-distributed-k6/usage/_index.md new file mode 100644 index 0000000000..48ddb3b67c --- /dev/null +++ b/docs/sources/next/set-up/set-up-distributed-k6/usage/_index.md @@ -0,0 +1,10 @@ +--- +weight: 300 +title: Usage +--- + +# Usage + +This section includes the following topics: + +{{< section depth=2 >}} diff --git a/docs/sources/next/set-up/set-up-distributed-k6/usage/common-options.md b/docs/sources/next/set-up/set-up-distributed-k6/usage/common-options.md new file mode 100644 index 0000000000..43d95d8625 --- /dev/null +++ b/docs/sources/next/set-up/set-up-distributed-k6/usage/common-options.md @@ -0,0 +1,57 @@ +--- +weight: 300 +title: Common options +--- + +# Common options + + + +The only options that are required as part of the `TestRun` CRD spec are `script` and `parallelism`. This guide covers some of the most common options. + +## Parallelism + +`parallelism` defines how many instances of k6 runners you want to create. Each instance is assigned an equal execution segment. For instance, if your test script is configured to run 200 VUs and `parallelism` is set to 4, the k6 Operator creates four k6 jobs, each running 50 VUs to achieve the desired VU count. + +## Separate + +`separate: true` indicates that the jobs created need to be distributed across different nodes. This is useful if you're running a test with a really high VU count and want to make sure the resources of each node won't become a bottleneck. + +## Service account + +If you want to use a custom Service Account you'll need to pass it into both the starter and the runner object: + +```yaml +apiVersion: k6.io/v1alpha1 +kind: TestRun +metadata: + name: +spec: + script: + configMap: + name: '' + runner: + serviceAccountName: + starter: + serviceAccountName: +``` + +## Runner + +Defines options for the test runner pods. The non-exhaustive list includes: + +- Passing resource limits and requests. +- Passing in labels and annotations. +- Passing in affinity and anti-affinity. +- Passing in a custom image. + +## Starter + +Defines options for the starter pod. The non-exhaustive list includes: + +- Passing in a custom image. +- Passing in labels and annotations. + +## Initializer + +By default, the initializer Job is defined with the same options as the runner Jobs, but its options can be overwritten by setting `.spec.initializer`. diff --git a/docs/sources/next/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md b/docs/sources/next/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md new file mode 100644 index 0000000000..64a7c626cb --- /dev/null +++ b/docs/sources/next/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md @@ -0,0 +1,219 @@ +--- +weight: 100 +title: Run k6 scripts with TestRun CRD +--- + +# Run k6 scripts with TestRun CRD + +This guide covers how you can configure your k6 scripts to run using the k6 Operator. + +## Defining test scripts + +There are several ways to configure scripts in the `TestRun` CRD. The operator uses `configMap`, `volumeClaim` and `localFile` to serve test scripts to the jobs. + +### ConfigMap + +The main way to configure a script is to create a `ConfigMap` with the script contents: + +```bash +kubectl create configmap my-test --from-file /path/to/my/test.js +``` + +Then specify it in `TestRun`: + +```bash + script: + configMap: + name: my-test + file: test.js +``` + +{{< admonition type="note" >}} + +A single `ConfigMap` has a character limit of 1048576 bytes. If you need to have a larger test file, you have to use a `volumeClaim` or a `localFile` instead. + +{{< /admonition >}} + +### VolumeClaim + +If you have a PVC with the name `stress-test-volumeClaim` containing your script and any other supporting files, you can pass it to the test like this: + +```yaml +spec: + script: + volumeClaim: + name: 'stress-test-volumeClaim' + # test.js should exist inside /test/ folder. + # All the js files and directories test.js is importing + # should be inside the same directory as well. + file: 'test.js' +``` + +The pods will expect to find the script files in the `/test/` folder. If `volumeClaim` fails, that's the first place to check. The latest initializer pod doesn't generate any logs and when it can't find the file, it exits with an error. Refer to [this GitHub issue](https://github.com/grafana/k6-operator/issues/143) for potential improvements. + +#### Sample directory structure + +``` +├── test +│ ├── requests +│ │ ├── stress-test.js +│ ├── test.js +``` + +In the preceding example, `test.js` imports a function from `stress-test.js` and these files would look like this: + +```js +// test.js +import stressTest from './requests/stress-test.js'; + +export const options = { + vus: 50, + duration: '10s', +}; + +export default function () { + stressTest(); +} +``` + +```js +// stress-test.js +import { sleep, check } from 'k6'; +import http from 'k6/http'; + +export default () => { + const res = http.get('https://test-api.k6.io'); + check(res, { + 'status is 200': () => res.status === 200, + }); + sleep(1); +}; +``` + +### LocalFile + +If the script is present in the filesystem of a custom runner image, it can be accessed with the `localFile` option: + +```yaml +spec: + parallelism: 4 + script: + localFile: /test/test.js + runner: + image: +``` + +{{< admonition type="note" >}} + +If there is any limitation on the usage of `volumeClaim` in your cluster, you can use the `localFile` option. We recommend using `volumeClaim` if possible. + +{{< /admonition >}} + +### Multi-file tests + +In case your k6 script is split between multiple JavaScript files, you can create a `ConfigMap` with several data entries like this: + +```bash +kubectl create configmap scenarios-test --from-file test.js --from-file utils.js +``` + +If there are too many files to specify manually, using `kubectl` with a folder might be an option as well: + +```bash +kubectl create configmap scenarios-test --from-file=./test +``` + +Alternatively, you can create an archive with k6: + +```bash +k6 archive test.js [args] +``` + +The `k6 archive` command creates an `archive.tar` in your current folder. You can then use that file in the `configmap`, similarly to a JavaScript script: + +```bash +kubectl create configmap scenarios-test --from-file=archive.tar +``` + +If you use an archive, you must edit your YAML file for the `TestRun` deployment so that the `file` option is set to the correct entrypoint for the `k6 run` command: + +```yaml +# ... +spec: + script: + configMap: + name: 'crocodile-stress-test' + file: 'archive.tar' # <-- change here +``` + +## Run tests + +Tests are executed by applying the custom resource `TestRun` to a cluster where the k6 Operator is running. Additional optional properties of the `TestRun` CRD allow you to control some key aspects of a distributed execution. For example: + +```yaml +# k6-resource.yml + +apiVersion: k6.io/v1alpha1 +kind: TestRun +metadata: + name: k6-sample +spec: + parallelism: 4 + script: + configMap: + name: k6-test + file: test.js + separate: false + runner: + image: + metadata: + labels: + cool-label: foo + annotations: + cool-annotation: bar + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + runAsNonRoot: true + resources: + limits: + cpu: 200m + memory: 1000Mi + requests: + cpu: 100m + memory: 500Mi + starter: + image: + metadata: + labels: + cool-label: foo + annotations: + cool-annotation: bar + securityContext: + runAsUser: 2000 + runAsGroup: 2000 + runAsNonRoot: true +``` + +A `TestRun` CR is created with this command: + +```bash +kubectl apply -f /path/to/your/k6-resource.yml +``` + +## Clean up resources + +After completing a test run, you need to clean up the test jobs that were created: + +```bash +kubectl delete -f /path/to/your/k6-resource.yml +``` + +Alternatively, you can configure the automatic deletion of all resources with the `cleanup` option: + +```yaml +spec: + cleanup: 'post' +``` + +With the `cleanup` option set, k6 Operator removes the `TestRun` CRD and all created resources once the test run ends. diff --git a/docs/sources/next/set-up/set-up-distributed-k6/usage/extensions.md b/docs/sources/next/set-up/set-up-distributed-k6/usage/extensions.md new file mode 100644 index 0000000000..e654032755 --- /dev/null +++ b/docs/sources/next/set-up/set-up-distributed-k6/usage/extensions.md @@ -0,0 +1,61 @@ +--- +weight: 200 +title: Use k6 Operator with k6 extensions +--- + +# Use k6 Operator with k6 extensions + +By default, the k6 Operator uses `ghcr.io/grafana/k6-operator:latest-runner` as the container image for the test jobs. + +If you want to use k6 [extensions](https://grafana.com/docs/k6//extensions/) built with [xk6](https://github.com/grafana/xk6), you'll need to create your own image and override the `image` property on the `TestRun` Kubernetes resource. + +For example, this is a `Dockerfile` that builds a k6 binary with the `xk6-output-influxdb` extension: + +```Dockerfile +# Build the k6 binary with the extension +FROM golang:1.20 as builder + +RUN go install go.k6.io/xk6/cmd/xk6@latest + +# For our example, we'll add support for output of test metrics to InfluxDB v2. +# Feel free to add other extensions using the '--with ...'. +RUN xk6 build \ + --with github.com/grafana/xk6-output-influxdb@latest \ + --output /k6 + +# Use the operator's base image and override the k6 binary +FROM grafana/k6:latest +COPY --from=builder /k6 /usr/bin/k6 +``` + +You can build the image based on this `Dockerfile` by executing: + +```bash +docker build -t k6-extended:local . +``` + +After the build completes, you can push the resulting `k6-extended:local` image to an image repository accessible to your Kubernetes cluster. + +You can then use that image as follows: + +```yaml +# k6-resource-with-extensions.yml + +apiVersion: k6.io/v1alpha1 +kind: TestRun +metadata: + name: k6-sample-with-extensions +spec: + parallelism: 4 + script: + configMap: + name: my-stress-test + file: test.js + runner: + image: k6-extended:local + env: + - name: K6_OUT + value: xk6-influxdb=http://influxdb.somewhere:8086/demo +``` + +Note that this examples overrides the default image with `k6-extended:latest`, and it includes environment variables that are required by the `xk6-output-influxdb` extension. diff --git a/docs/sources/next/set-up/set-up-distributed-k6/usage/k6-operator-to-gck6.md b/docs/sources/next/set-up/set-up-distributed-k6/usage/k6-operator-to-gck6.md new file mode 100644 index 0000000000..600decab73 --- /dev/null +++ b/docs/sources/next/set-up/set-up-distributed-k6/usage/k6-operator-to-gck6.md @@ -0,0 +1,73 @@ +--- +weight: 250 +title: Use the k6 Operator with Grafana Cloud k6 +--- + +# Use the k6 Operator with Grafana Cloud k6 + +Grafana Cloud k6 is the Grafana Cloud offering of k6, which gives you access to all of k6 capabilities, while Grafana handles the infrastructure, storage, and metrics aggregation and insights from your tests. + +When using the k6 Operator, you can still leverage Grafana Cloud k6 to get access to the metric storage and analysis that the platform offers. + +There are two ways to use the k6 Operator with Grafana Cloud k6: Private Load Zones and Cloud output. + +## Before you begin + +To use the k6 Operator with Grafana Cloud k6, you’ll need: + +- A [Grafana Cloud account](https://grafana.com/auth/sign-up/create-user). + +## Private Load Zones + +Private Load Zones (PLZ) are load zones that you can host inside your network by using the k6 Operator. You can start a cloud test in a PLZ by referencing it by name from your script, and the test will run in the nodes of your Kubernetes cluster. + +Refer to [Set up private load zones](https://grafana.com/docs/grafana-cloud/testing/k6/author-run/private-load-zone-v2/) for more details. + +## Cloud output + +With k6, you can send the [output from a test run to Grafana Cloud k6](https://grafana.com/docs/k6//results-output/real-time/cloud) with the `k6 run --out cloud script.js` command. This feature is also available in the k6 Operator if you have a Grafana Cloud account. + +{{< admonition type="note" >}} + +The cloud output option only supports a `parallelism` value of 20 or less. + +{{< /admonition >}} + +To use this option in k6 Operator, set the argument in YAML: + +```yaml +# ... +script: + configMap: + name: '' +arguments: --out cloud +# ... +``` + +Then, if you installed operator with bundle or Helm, create a secret with the following command: + +```bash +kubectl -n k6-operator-system create secret generic my-cloud-token \ + --from-literal=token= && kubectl -n k6-operator-system label secret my-cloud-token "k6cloud=token" +``` + +Alternatively, if you installed operator with a Makefile, you can uncomment the cloud output section in `config/default/kustomization.yaml` and copy your token from Grafana Cloud k6 there: + +```yaml +# Uncomment this section if you need cloud output and copy-paste your token +secretGenerator: + - name: cloud-token + literals: + - token= + options: + annotations: + kubernetes.io/service-account.name: k6-operator-controller + labels: + k6cloud: token +``` + +After updating the file, run `make deploy`. + +After these steps, you can run k6 with the cloud output and default values of `projectID` and `name`. + +Refer to [Cloud options](https://grafana.com/docs/grafana-cloud/testing/k6/author-run/cloud-scripting-extras/cloud-options/#cloud-options) for details on how to change the `projectID` and `name` options. diff --git a/docs/sources/next/set-up/set-up-distributed-k6/usage/reference.md b/docs/sources/next/set-up/set-up-distributed-k6/usage/reference.md new file mode 100644 index 0000000000..f6f8b6d06f --- /dev/null +++ b/docs/sources/next/set-up/set-up-distributed-k6/usage/reference.md @@ -0,0 +1,12 @@ +--- +weight: 500 +title: Reference +_build: + list: false +--- + +# Reference + + + +{{< section depth=2 >}} diff --git a/docs/sources/next/set-up/set-up-distributed-k6/usage/scheduling-tests.md b/docs/sources/next/set-up/set-up-distributed-k6/usage/scheduling-tests.md new file mode 100644 index 0000000000..02fc7503a5 --- /dev/null +++ b/docs/sources/next/set-up/set-up-distributed-k6/usage/scheduling-tests.md @@ -0,0 +1,106 @@ +--- +weight: 400 +title: Schedule k6 tests +--- + +# Schedule k6 tests + +While the k6 Operator doesn't support scheduling k6 tests directly, you can schedule tests with the `CronJob` object from Kubernetes directly. The `CronJob` would run on a schedule and execute the creation and deletion of the `TestRun` object. + +Running these tests requires a little more setup than a standalone test run. + +## Create a `ConfigMap` with k6 scripts + +Refer to [Run k6 scripts with `TestRun` CRD](https://grafana.com/docs/k6//set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd/) for details on how to create a `ConfigMap` with k6 scripts. + +## Create a ConfigMap of the YAML file for the `TestRun` job + + + +When using the `make deploy` installation method, add a `configMapGenerator` to the `kustomization.yaml`: + +```yaml +configMapGenerator: + - name: -config + files: + - .yaml +``` + +## Create a `ServiceAccount` for the `CronJob` + +For the `CronJob` to be able to create and delete `TestRun` objects, create a service account: + +```yaml +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: k6- +rules: + - apiGroups: + - k6.io + resources: + - testruns + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: k6- +roleRef: + kind: Role + name: k6- + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: k6- + namespace: +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: k6- +``` + +## Create a `CronJob` + +This is an example of how to define a `CronJob` in a YAML file: + +```yaml +# snapshotter.yml +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: -cron +spec: + schedule: '' + concurrencyPolicy: Forbid + jobTemplate: + spec: + template: + spec: + serviceAccount: k6 + containers: + - name: kubectl + image: bitnami/kubectl + volumeMounts: + - name: k6-yaml + mountPath: /tmp/ + command: + - /bin/bash + args: + - -c + - 'kubectl delete -f /tmp/.yaml; kubectl apply -f /tmp/.yaml' + restartPolicy: OnFailure + volumes: + - name: k6-yaml + configMap: + name: -config +``` diff --git a/docs/sources/v0.52.x/set-up/set-up-distributed-k6/_index.md b/docs/sources/v0.52.x/set-up/set-up-distributed-k6/_index.md new file mode 100644 index 0000000000..06687090b0 --- /dev/null +++ b/docs/sources/v0.52.x/set-up/set-up-distributed-k6/_index.md @@ -0,0 +1,19 @@ +--- +weight: 150 +title: Set up distributed k6 +--- + +# Set up distributed k6 + +It's possible to run large load tests even when using a single node, or single machine. But, depending on your use case, you might also want to run a distributed Grafana k6 test in your own infrastructure. + +A couple of reasons why you might want to do this: + +- You run your application in Kubernetes and would like k6 to be executed in the same fashion as all your other infrastructure components. +- You want to run your tests within your private network for security and/or privacy reasons. + +[k6 Operator](https://github.com/grafana/k6-operator) is a Kubernetes operator that you can use to run distributed k6 tests in your cluster. + +This section includes the following topics: + +{{< section depth=2 >}} diff --git a/docs/sources/v0.52.x/set-up/set-up-distributed-k6/install-k6-operator.md b/docs/sources/v0.52.x/set-up/set-up-distributed-k6/install-k6-operator.md new file mode 100644 index 0000000000..315ec003ca --- /dev/null +++ b/docs/sources/v0.52.x/set-up/set-up-distributed-k6/install-k6-operator.md @@ -0,0 +1,114 @@ +--- +weight: 100 +title: Install k6 Operator +--- + +# Install k6 Operator + +This guide provides step-by-step instructions on how to install k6 Operator. + +## Before you begin + +To install k6 Operator, you'll need: + +- A Kubernetes cluster, along with access to it. +- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl). + +## Deploy the operator + +There are three different options that you can use to deploy the k6 Operator. + +### Deploy with bundle + +The easiest way to install the operator is with bundle: + +```bash +curl https://raw.githubusercontent.com/grafana/k6-operator/main/bundle.yaml | kubectl apply -f - +``` + +Bundle includes default manifests for k6 Operator, including a `k6-operator-system` namespace and k6 Operator deployment with the latest tagged Docker image. Customizations can be made on top of this manifest as needed, for example, with `kustomize`. + +### Deploy with Helm + +Helm releases of k6 Operator are published together with other Grafana Helm charts. You can install it with the following commands: + +```bash +helm repo add grafana https://grafana.github.io/helm-charts +helm repo update +helm install k6-operator grafana/k6-operator +``` + +You can also pass additional configuration options with a `values.yaml` file: + +```bash +helm install k6-operator grafana/k6-operator -f values.yaml +``` + +Refer to the [k6 Operator samples folder](https://github.com/grafana/k6-operator/blob/main/charts/k6-operator/samples/customAnnotationsAndLabels.yaml) for an example file. + +You can find a complete list of Helm options in the [k6 Operator charts folder](https://github.com/grafana/k6-operator/blob/main/charts/k6-operator/README.md). + +### Deploy with Makefile + +In order to install the operator with a Makefile, you'll need: + +- [go](https://go.dev/doc/install) +- [kustomize](https://kubectl.docs.kubernetes.io/installation/kustomize/) + +A more manual, low-level way to install the k6 operator is by running the command below: + +```bash +make deploy +``` + +This method may be more useful for development of the k6 Operator, depending on specifics of the setup. + +## Install the CRD + +The k6 Operator includes custom resources called `TestRun`, `PrivateLoadZone`, and `K6`. They're automatically installed when you do a deployment or install a bundle, but you can also manually install them by running: + +```bash +make install +``` + +{{< admonition type="warning" >}} + +The `K6` CRD has been replaced by the `TestRun` CRD and will be deprecated in the future. We recommend using the `TestRun` CRD. + +{{< /admonition >}} + +## Watch namespace + +By default, the k6 Operator watches the `TestRun` and `PrivateLoadZone` custom resources in all namespaces. You can also configure the k6 Operator to watch a specific namespace by setting the `WATCH_NAMESPACE` environment variable for the operator's deployment: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: k6-operator-controller-manager + namespace: k6-operator-system +spec: + template: + spec: + containers: + - name: manager + image: ghcr.io/grafana/k6-operator:controller-v0.0.14 + env: + - name: WATCH_NAMESPACE + value: 'some-ns' +# ... +``` + +## Uninstall k6 Operator + +You can remove all of the resources created by the k6 Operator with `bundle`: + +```bash +curl https://raw.githubusercontent.com/grafana/k6-operator/main/bundle.yaml | kubectl delete -f - +``` + +Or with the `make` command: + +```bash +make delete +``` diff --git a/docs/sources/v0.52.x/set-up/set-up-distributed-k6/troubleshooting.md b/docs/sources/v0.52.x/set-up/set-up-distributed-k6/troubleshooting.md new file mode 100644 index 0000000000..a8bf60698b --- /dev/null +++ b/docs/sources/v0.52.x/set-up/set-up-distributed-k6/troubleshooting.md @@ -0,0 +1,260 @@ +--- +weight: 400 +title: Troubleshooting +--- + +# Troubleshooting + +This topic includes instructions to help you troubleshoot common issues with the k6 Operator. + +## Common tricks + +### Test your script locally + +Always run your script locally before trying to run it with the k6 Operator: + +```bash +k6 run script.js +``` + +If you're using environment variables or CLI options, pass them in as well: + +```bash +MY_ENV_VAR=foo k6 run script.js --tag my_tag=bar +``` + +That ensures that the script has correct syntax and can be parsed with k6 in the first place. Additionally, running locally can help you check if the configured options are doing what you expect. If there are any errors or unexpected results in the output of `k6 run`, make sure to fix those prior to deploying the script elsewhere. + +### `TestRun` deployment + +#### The pods + +In case of one `TestRun` Custom Resource (CR) creation with `parallelism: n`, there are certain repeating patterns: + +1. There will be `n + 2` Jobs (with corresponding Pods) created: initializer, starter, and `n` runners. +1. If any of these Jobs didn't result in a Pod being deployed, there must be an issue with that Job. Some commands that can help here: + + ```bash + kubectl get jobs -A + kubectl describe job mytest-initializer + ``` + +1. If one of the Pods was deployed but finished with `Error`, you can check its logs with the following command: + + ```bash + kubectl logs mytest-initializer-xxxxx + ``` + +If the Pods seem to be working but not producing an expected result and there's not enough information in the logs, you can use the k6 [verbose option](https://grafana.com/docs/k6//using-k6/k6-options/#options) in the `TestRun` spec: + +```yaml +apiVersion: k6.io/v1alpha1 +kind: TestRun +metadata: + name: k6-sample +spec: + parallelism: 2 + script: + configMap: + name: 'test' + file: 'test.js' + arguments: --verbose +``` + +#### k6 Operator + +Another source of info is the k6 Operator itself. It's deployed as a Kubernetes `Deployment`, with `replicas: 1` by default, and its logs together with observations about the Pods from the previous section usually contain enough information to help you diagnose any issues. With the standard deployment, the logs of the k6 Operator can be checked with: + +```bash +kubectl -n k6-operator-system -c manager logs k6-operator-controller-manager-xxxxxxxx-xxxxx +``` + +#### Inspect `TestRun` resource + +After you deploy a `TestRun` CR, you can inspect it the same way as any other resource: + +```bash +kubectl describe testrun my-testrun +``` + +Firstly, check if the spec is as expected. Then, see the current status: + +```yaml +Status: + Conditions: + Last Transition Time: 2024-01-17T10:30:01Z + Message: + Reason: CloudTestRunFalse + Status: False + Type: CloudTestRun + Last Transition Time: 2024-01-17T10:29:58Z + Message: + Reason: TestRunPreparation + Status: Unknown + Type: TestRunRunning + Last Transition Time: 2024-01-17T10:29:58Z + Message: + Reason: CloudTestRunAbortedFalse + Status: False + Type: CloudTestRunAborted + Last Transition Time: 2024-01-17T10:29:58Z + Message: + Reason: CloudPLZTestRunFalse + Status: False + Type: CloudPLZTestRun + Stage: error +``` + +If `Stage` is equal to `error`, you can check the logs of k6 Operator. + +Conditions can be used as a source of info as well, but it's a more advanced troubleshooting option that should be used if the previous steps weren't enough to diagnose the issue. Note that conditions that start with the `Cloud` prefix only matter in the setting of k6 Cloud test runs, for example, for cloud output and PLZ test runs. + +### `PrivateLoadZone` deployment + +If the `PrivateLoadZone` CR was successfully created in Kubernetes, it should become visible in your account in Grafana Cloud k6 (GCk6) interface soon afterwards. If it doesn't appear in the UI, then there is likely a problem to troubleshoot. + +First, go over the [guide](https://grafana.com/docs/grafana-cloud/k6/author-run/private-load-zone-v2/) to double-check if all the steps have been done correctly and successfully. + +Unlike `TestRun` deployment, when a `PrivateLoadZone` is first created, there are no additional resources deployed. So, the only source for troubleshooting are the logs of k6 Operator. See the [previous subsection](#k6-operator) on how to access its logs. Any errors there might be a hint to diagnose the issue. Refer to [PrivateLoadZone: subscription error](#privateloadzone-subscription-error) for more details. + +### Running tests in `PrivateLoadZone` + +Each time a user runs a test in a PLZ, for example with `k6 cloud script.js`, there is a corresponding `TestRun` being deployed by the k6 Operator. This `TestRun` will be deployed in the same namespace as its `PrivateLoadZone`. If the test is misbehaving, for example, it errors out, or doesn't produce the expected result, then you can check: + +1. If there are any messages in the GCk6 UI. +2. If there are any messages in the output of the `k6 cloud` command. +3. The resources and their logs, the same way as with a [standalone `TestRun` deployment](#testrun-deployment) + +## Common scenarios + +### Issues with environment variables + +Refer to [Environment variables](https://github.com/grafana/k6-operator/blob/main/docs/env-vars.md) for details on how to pass environment variables to the k6 Operator. + +### Tags not working + +Tags are a rather common source of errors when using the k6 Operator. For example, the following tags would lead to parsing errors: + +```yaml + arguments: --tag product_id="Test A" + # or + arguments: --tag foo=\"bar\" +``` + +You can see those errors in the logs of either the initializer or the runner Pod, for example: + +```bash +time="2024-01-11T11:11:27Z" level=error msg="invalid argument \"product_id=\\\"Test\" for \"--tag\" flag: parse error on line 1, column 12: bare \" in non-quoted-field" +``` + +This is a common problem with escaping the characters. You can find an [issue](https://github.com/grafana/k6-operator/issues/211) in the k6 Operator repository that can be upvoted. + +### Initializer logs an error but it's not about tags + +This can happen because of lack of attention to the [preparation](#preparation) step. One command that you can use to help diagnose issues with your script is the following: + +```bash +k6 inspect --execution-requirements script.js +``` + +That command is a shortened version of what the initializer Pod is executing. If the command produces an error, there's a problem with the script itself and it should be solved outside of the k6 Operator. The error itself may contain a hint to what's wrong, such as a syntax error. + +If the standalone `k6 inspect --execution-requirements` executes successfully, then it's likely a problem with `TestRun` deployment specific to your Kubernetes setup. A couple of recommendations here are: + +- Review the output of the initializer Pod: is it logged by the k6 process or by something else? + - :information_source: k6 Operator expects the initializer logs to contain only the output of `k6 inspect`. If there are any other log lines present, then the k6 Operator will fail to parse it and the test won't start. Refer to this [issue](https://github.com/grafana/k6-operator/issues/193) for more details. +- Check events in the initializer Job and Pod as they may contain another hint about what's wrong. + +### Non-existent ServiceAccount + +A ServiceAccount can be defined as `serviceAccountName` in a PrivateLoadZone, and as `runner.serviceAccountName` in a TestRun CRD. If the specified ServiceAccount doesn't exist, k6 Operator will successfully create Jobs but corresponding Pods will fail to be deployed, and the k6 Operator will wait indefinitely for Pods to be `Ready`. This error can be best seen in the events of the Job: + +```bash +kubectl describe job plz-test-xxxxxx-initializer +... +Events: + Warning FailedCreate 57s (x4 over 2m7s) job-controller Error creating: pods "plz-test-xxxxxx-initializer-" is forbidden: error looking up service account plz-ns/plz-sa: serviceaccount "plz-sa" not found +``` + +k6 Operator doesn't try to analyze such scenarios on its own, but you can refer to the following [issue](https://github.com/grafana/k6-operator/issues/260) for improvements. + +#### How to fix + +To fix this issue, the incorrect `serviceAccountName` must be corrected, and the TestRun or PrivateLoadZone resource must be re-deployed. + +### Non-existent `nodeSelector` + +`nodeSelector` can be defined as `nodeSelector` in a PrivateLoadZone, and as `runner.nodeSelector` in the TestRun CRD. + +This case is very similar to the [ServiceAccount](#non-existent-serviceaccount): the Pod creation will fail, but the error is slightly different: + +```bash +kubectl describe pod plz-test-xxxxxx-initializer-xxxxx +... +Events: + Warning FailedScheduling 48s (x5 over 4m6s) default-scheduler 0/1 nodes are available: 1 node(s) didn't match Pod's node affinity/selector. +``` + +#### How to fix + +To fix this issue, the incorrect `nodeSelector` must be corrected and the TestRun or PrivateLoadZone resource must be re-deployed. + +### Insufficient resources + +A related problem can happen when the cluster does not have sufficient resources to deploy the runners. There's a higher probability of hitting this issue when setting small CPU and memory limits for runners or using options like `nodeSelector`, `runner.affinity` or `runner.topologySpreadConstraints`, and not having a set of nodes matching the spec. Alternatively, it can happen if there is a high number of runners required for the test (via `parallelism` in TestRun or during PLZ test run) and autoscaling of the cluster has limits on the maximum number of nodes, and can't provide the required resources on time or at all. + +This case is somewhat similar to the previous two: the k6 Operator will wait indefinitely and can be monitored with events in Jobs and Pods. If it's possible to fix the issue with insufficient resources on-the-fly, for example, by adding more nodes, k6 Operator will attempt to continue executing a test run. + +### OOM of a runner Pod + +If there's at least one runner Pod that OOM-ed, the whole test will be [stuck](https://github.com/grafana/k6-operator/issues/251) and will have to be deleted manually: + +```bash +kubectl -f my-test.yaml delete +# or +kubectl delete testrun my-test +``` + +In case of OOM, it makes sense to review the k6 script to understand what kind of resource usage this script requires. It may be that the k6 script can be improved to be more performant. Then, set the `spec.runner.resources` in the TestRun CRD, or `spec.resources` in the PrivateLoadZone CRD accordingly. + +### PrivateLoadZone: subscription error + +If there's an issue with your Grafana Cloud k6 subscription, there will be a 400 error in the logs with the message detailing the problem. For example: + +```bash +"Received error `(400) You have reached the maximum Number of private load zones your organization is allowed to have. Please contact support if you want to create more.`. Message from server ``" +``` + +To fix this issue, check your organization settings in Grafana Cloud k6 or contact Support. + +### PrivateLoadZone: Wrong token + +There can be two major problems with the authentication token: + +1. If the token wasn't created, or was created in a wrong location, the logs will show the following error: + + ```bash + Failed to load k6 Cloud token {"namespace": "plz-ns", "name": "my-plz", "reconcileID": "67c8bc73-f45b-4c7f-a9ad-4fd0ffb4d5f6", "name": "token-with-wrong-name", "secretNamespace": "plz-ns", "error": "Secret \"token-with-wrong-name\" not found"} + ``` + +2. If the token contains a corrupted value, or it's not an organizational token, the logs will show the following error: + + ```bash + "Received error `(403) Authentication token incorrect or expired`. Message from server ``" + ``` + +### PrivateLoadZone: Networking setup + +If you see any dial or connection errors in the logs of the k6 Operator, it makes sense to double-check the networking setup. For a PrivateLoadZone to operate, outbound traffic to Grafana Cloud k6 [must be allowed](https://grafana.com/docs/grafana-cloud/k6/author-run/private-load-zone-v2/#before-you-begin). To check the reachability of Grafana Cloud k6 endpoints: + +```bash +kubectl apply -f https://k8s.io/examples/admin/dns/dnsutils.yaml +kubectl exec -it dnsutils -- nslookup ingest.k6.io +kubectl exec -it dnsutils -- nslookup api.k6.io +``` + +For more resources on troubleshooting networking, refer to the [Kubernetes docs](https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/). + +### PrivateLoadZone: Insufficient resources + +The PrivateLoadZone insufficient resources problem is similar to [insufficient resources issue](#insufficient-resources). But, when running a PrivateLoadZone test, the k6 Operator will wait only for a timeout period. When the timeout period is up, the test will be aborted by Grafana Cloud k6 and marked as such, both in the PrivateLoadZone and in Grafana Cloud k6. In other words, there is a time limit to fix this issue without restarting the test run. diff --git a/docs/sources/v0.52.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md b/docs/sources/v0.52.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md new file mode 100644 index 0000000000..2a46ef392e --- /dev/null +++ b/docs/sources/v0.52.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md @@ -0,0 +1,10 @@ +--- +weight: 200 +title: Upgrade k6 Operator +_build: + list: false +--- + +# Upgrade k6 Operator + + diff --git a/docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/_index.md b/docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/_index.md new file mode 100644 index 0000000000..48ddb3b67c --- /dev/null +++ b/docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/_index.md @@ -0,0 +1,10 @@ +--- +weight: 300 +title: Usage +--- + +# Usage + +This section includes the following topics: + +{{< section depth=2 >}} diff --git a/docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/common-options.md b/docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/common-options.md new file mode 100644 index 0000000000..43d95d8625 --- /dev/null +++ b/docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/common-options.md @@ -0,0 +1,57 @@ +--- +weight: 300 +title: Common options +--- + +# Common options + + + +The only options that are required as part of the `TestRun` CRD spec are `script` and `parallelism`. This guide covers some of the most common options. + +## Parallelism + +`parallelism` defines how many instances of k6 runners you want to create. Each instance is assigned an equal execution segment. For instance, if your test script is configured to run 200 VUs and `parallelism` is set to 4, the k6 Operator creates four k6 jobs, each running 50 VUs to achieve the desired VU count. + +## Separate + +`separate: true` indicates that the jobs created need to be distributed across different nodes. This is useful if you're running a test with a really high VU count and want to make sure the resources of each node won't become a bottleneck. + +## Service account + +If you want to use a custom Service Account you'll need to pass it into both the starter and the runner object: + +```yaml +apiVersion: k6.io/v1alpha1 +kind: TestRun +metadata: + name: +spec: + script: + configMap: + name: '' + runner: + serviceAccountName: + starter: + serviceAccountName: +``` + +## Runner + +Defines options for the test runner pods. The non-exhaustive list includes: + +- Passing resource limits and requests. +- Passing in labels and annotations. +- Passing in affinity and anti-affinity. +- Passing in a custom image. + +## Starter + +Defines options for the starter pod. The non-exhaustive list includes: + +- Passing in a custom image. +- Passing in labels and annotations. + +## Initializer + +By default, the initializer Job is defined with the same options as the runner Jobs, but its options can be overwritten by setting `.spec.initializer`. diff --git a/docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md b/docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md new file mode 100644 index 0000000000..64a7c626cb --- /dev/null +++ b/docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md @@ -0,0 +1,219 @@ +--- +weight: 100 +title: Run k6 scripts with TestRun CRD +--- + +# Run k6 scripts with TestRun CRD + +This guide covers how you can configure your k6 scripts to run using the k6 Operator. + +## Defining test scripts + +There are several ways to configure scripts in the `TestRun` CRD. The operator uses `configMap`, `volumeClaim` and `localFile` to serve test scripts to the jobs. + +### ConfigMap + +The main way to configure a script is to create a `ConfigMap` with the script contents: + +```bash +kubectl create configmap my-test --from-file /path/to/my/test.js +``` + +Then specify it in `TestRun`: + +```bash + script: + configMap: + name: my-test + file: test.js +``` + +{{< admonition type="note" >}} + +A single `ConfigMap` has a character limit of 1048576 bytes. If you need to have a larger test file, you have to use a `volumeClaim` or a `localFile` instead. + +{{< /admonition >}} + +### VolumeClaim + +If you have a PVC with the name `stress-test-volumeClaim` containing your script and any other supporting files, you can pass it to the test like this: + +```yaml +spec: + script: + volumeClaim: + name: 'stress-test-volumeClaim' + # test.js should exist inside /test/ folder. + # All the js files and directories test.js is importing + # should be inside the same directory as well. + file: 'test.js' +``` + +The pods will expect to find the script files in the `/test/` folder. If `volumeClaim` fails, that's the first place to check. The latest initializer pod doesn't generate any logs and when it can't find the file, it exits with an error. Refer to [this GitHub issue](https://github.com/grafana/k6-operator/issues/143) for potential improvements. + +#### Sample directory structure + +``` +├── test +│ ├── requests +│ │ ├── stress-test.js +│ ├── test.js +``` + +In the preceding example, `test.js` imports a function from `stress-test.js` and these files would look like this: + +```js +// test.js +import stressTest from './requests/stress-test.js'; + +export const options = { + vus: 50, + duration: '10s', +}; + +export default function () { + stressTest(); +} +``` + +```js +// stress-test.js +import { sleep, check } from 'k6'; +import http from 'k6/http'; + +export default () => { + const res = http.get('https://test-api.k6.io'); + check(res, { + 'status is 200': () => res.status === 200, + }); + sleep(1); +}; +``` + +### LocalFile + +If the script is present in the filesystem of a custom runner image, it can be accessed with the `localFile` option: + +```yaml +spec: + parallelism: 4 + script: + localFile: /test/test.js + runner: + image: +``` + +{{< admonition type="note" >}} + +If there is any limitation on the usage of `volumeClaim` in your cluster, you can use the `localFile` option. We recommend using `volumeClaim` if possible. + +{{< /admonition >}} + +### Multi-file tests + +In case your k6 script is split between multiple JavaScript files, you can create a `ConfigMap` with several data entries like this: + +```bash +kubectl create configmap scenarios-test --from-file test.js --from-file utils.js +``` + +If there are too many files to specify manually, using `kubectl` with a folder might be an option as well: + +```bash +kubectl create configmap scenarios-test --from-file=./test +``` + +Alternatively, you can create an archive with k6: + +```bash +k6 archive test.js [args] +``` + +The `k6 archive` command creates an `archive.tar` in your current folder. You can then use that file in the `configmap`, similarly to a JavaScript script: + +```bash +kubectl create configmap scenarios-test --from-file=archive.tar +``` + +If you use an archive, you must edit your YAML file for the `TestRun` deployment so that the `file` option is set to the correct entrypoint for the `k6 run` command: + +```yaml +# ... +spec: + script: + configMap: + name: 'crocodile-stress-test' + file: 'archive.tar' # <-- change here +``` + +## Run tests + +Tests are executed by applying the custom resource `TestRun` to a cluster where the k6 Operator is running. Additional optional properties of the `TestRun` CRD allow you to control some key aspects of a distributed execution. For example: + +```yaml +# k6-resource.yml + +apiVersion: k6.io/v1alpha1 +kind: TestRun +metadata: + name: k6-sample +spec: + parallelism: 4 + script: + configMap: + name: k6-test + file: test.js + separate: false + runner: + image: + metadata: + labels: + cool-label: foo + annotations: + cool-annotation: bar + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + runAsNonRoot: true + resources: + limits: + cpu: 200m + memory: 1000Mi + requests: + cpu: 100m + memory: 500Mi + starter: + image: + metadata: + labels: + cool-label: foo + annotations: + cool-annotation: bar + securityContext: + runAsUser: 2000 + runAsGroup: 2000 + runAsNonRoot: true +``` + +A `TestRun` CR is created with this command: + +```bash +kubectl apply -f /path/to/your/k6-resource.yml +``` + +## Clean up resources + +After completing a test run, you need to clean up the test jobs that were created: + +```bash +kubectl delete -f /path/to/your/k6-resource.yml +``` + +Alternatively, you can configure the automatic deletion of all resources with the `cleanup` option: + +```yaml +spec: + cleanup: 'post' +``` + +With the `cleanup` option set, k6 Operator removes the `TestRun` CRD and all created resources once the test run ends. diff --git a/docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/extensions.md b/docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/extensions.md new file mode 100644 index 0000000000..e654032755 --- /dev/null +++ b/docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/extensions.md @@ -0,0 +1,61 @@ +--- +weight: 200 +title: Use k6 Operator with k6 extensions +--- + +# Use k6 Operator with k6 extensions + +By default, the k6 Operator uses `ghcr.io/grafana/k6-operator:latest-runner` as the container image for the test jobs. + +If you want to use k6 [extensions](https://grafana.com/docs/k6//extensions/) built with [xk6](https://github.com/grafana/xk6), you'll need to create your own image and override the `image` property on the `TestRun` Kubernetes resource. + +For example, this is a `Dockerfile` that builds a k6 binary with the `xk6-output-influxdb` extension: + +```Dockerfile +# Build the k6 binary with the extension +FROM golang:1.20 as builder + +RUN go install go.k6.io/xk6/cmd/xk6@latest + +# For our example, we'll add support for output of test metrics to InfluxDB v2. +# Feel free to add other extensions using the '--with ...'. +RUN xk6 build \ + --with github.com/grafana/xk6-output-influxdb@latest \ + --output /k6 + +# Use the operator's base image and override the k6 binary +FROM grafana/k6:latest +COPY --from=builder /k6 /usr/bin/k6 +``` + +You can build the image based on this `Dockerfile` by executing: + +```bash +docker build -t k6-extended:local . +``` + +After the build completes, you can push the resulting `k6-extended:local` image to an image repository accessible to your Kubernetes cluster. + +You can then use that image as follows: + +```yaml +# k6-resource-with-extensions.yml + +apiVersion: k6.io/v1alpha1 +kind: TestRun +metadata: + name: k6-sample-with-extensions +spec: + parallelism: 4 + script: + configMap: + name: my-stress-test + file: test.js + runner: + image: k6-extended:local + env: + - name: K6_OUT + value: xk6-influxdb=http://influxdb.somewhere:8086/demo +``` + +Note that this examples overrides the default image with `k6-extended:latest`, and it includes environment variables that are required by the `xk6-output-influxdb` extension. diff --git a/docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/k6-operator-to-gck6.md b/docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/k6-operator-to-gck6.md new file mode 100644 index 0000000000..600decab73 --- /dev/null +++ b/docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/k6-operator-to-gck6.md @@ -0,0 +1,73 @@ +--- +weight: 250 +title: Use the k6 Operator with Grafana Cloud k6 +--- + +# Use the k6 Operator with Grafana Cloud k6 + +Grafana Cloud k6 is the Grafana Cloud offering of k6, which gives you access to all of k6 capabilities, while Grafana handles the infrastructure, storage, and metrics aggregation and insights from your tests. + +When using the k6 Operator, you can still leverage Grafana Cloud k6 to get access to the metric storage and analysis that the platform offers. + +There are two ways to use the k6 Operator with Grafana Cloud k6: Private Load Zones and Cloud output. + +## Before you begin + +To use the k6 Operator with Grafana Cloud k6, you’ll need: + +- A [Grafana Cloud account](https://grafana.com/auth/sign-up/create-user). + +## Private Load Zones + +Private Load Zones (PLZ) are load zones that you can host inside your network by using the k6 Operator. You can start a cloud test in a PLZ by referencing it by name from your script, and the test will run in the nodes of your Kubernetes cluster. + +Refer to [Set up private load zones](https://grafana.com/docs/grafana-cloud/testing/k6/author-run/private-load-zone-v2/) for more details. + +## Cloud output + +With k6, you can send the [output from a test run to Grafana Cloud k6](https://grafana.com/docs/k6//results-output/real-time/cloud) with the `k6 run --out cloud script.js` command. This feature is also available in the k6 Operator if you have a Grafana Cloud account. + +{{< admonition type="note" >}} + +The cloud output option only supports a `parallelism` value of 20 or less. + +{{< /admonition >}} + +To use this option in k6 Operator, set the argument in YAML: + +```yaml +# ... +script: + configMap: + name: '' +arguments: --out cloud +# ... +``` + +Then, if you installed operator with bundle or Helm, create a secret with the following command: + +```bash +kubectl -n k6-operator-system create secret generic my-cloud-token \ + --from-literal=token= && kubectl -n k6-operator-system label secret my-cloud-token "k6cloud=token" +``` + +Alternatively, if you installed operator with a Makefile, you can uncomment the cloud output section in `config/default/kustomization.yaml` and copy your token from Grafana Cloud k6 there: + +```yaml +# Uncomment this section if you need cloud output and copy-paste your token +secretGenerator: + - name: cloud-token + literals: + - token= + options: + annotations: + kubernetes.io/service-account.name: k6-operator-controller + labels: + k6cloud: token +``` + +After updating the file, run `make deploy`. + +After these steps, you can run k6 with the cloud output and default values of `projectID` and `name`. + +Refer to [Cloud options](https://grafana.com/docs/grafana-cloud/testing/k6/author-run/cloud-scripting-extras/cloud-options/#cloud-options) for details on how to change the `projectID` and `name` options. diff --git a/docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/reference.md b/docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/reference.md new file mode 100644 index 0000000000..f6f8b6d06f --- /dev/null +++ b/docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/reference.md @@ -0,0 +1,12 @@ +--- +weight: 500 +title: Reference +_build: + list: false +--- + +# Reference + + + +{{< section depth=2 >}} diff --git a/docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/scheduling-tests.md b/docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/scheduling-tests.md new file mode 100644 index 0000000000..02fc7503a5 --- /dev/null +++ b/docs/sources/v0.52.x/set-up/set-up-distributed-k6/usage/scheduling-tests.md @@ -0,0 +1,106 @@ +--- +weight: 400 +title: Schedule k6 tests +--- + +# Schedule k6 tests + +While the k6 Operator doesn't support scheduling k6 tests directly, you can schedule tests with the `CronJob` object from Kubernetes directly. The `CronJob` would run on a schedule and execute the creation and deletion of the `TestRun` object. + +Running these tests requires a little more setup than a standalone test run. + +## Create a `ConfigMap` with k6 scripts + +Refer to [Run k6 scripts with `TestRun` CRD](https://grafana.com/docs/k6//set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd/) for details on how to create a `ConfigMap` with k6 scripts. + +## Create a ConfigMap of the YAML file for the `TestRun` job + + + +When using the `make deploy` installation method, add a `configMapGenerator` to the `kustomization.yaml`: + +```yaml +configMapGenerator: + - name: -config + files: + - .yaml +``` + +## Create a `ServiceAccount` for the `CronJob` + +For the `CronJob` to be able to create and delete `TestRun` objects, create a service account: + +```yaml +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: k6- +rules: + - apiGroups: + - k6.io + resources: + - testruns + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: k6- +roleRef: + kind: Role + name: k6- + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: k6- + namespace: +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: k6- +``` + +## Create a `CronJob` + +This is an example of how to define a `CronJob` in a YAML file: + +```yaml +# snapshotter.yml +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: -cron +spec: + schedule: '' + concurrencyPolicy: Forbid + jobTemplate: + spec: + template: + spec: + serviceAccount: k6 + containers: + - name: kubectl + image: bitnami/kubectl + volumeMounts: + - name: k6-yaml + mountPath: /tmp/ + command: + - /bin/bash + args: + - -c + - 'kubectl delete -f /tmp/.yaml; kubectl apply -f /tmp/.yaml' + restartPolicy: OnFailure + volumes: + - name: k6-yaml + configMap: + name: -config +``` From 63606943d0de9db068ca8c3cb6c9c3f1220ec27f Mon Sep 17 00:00:00 2001 From: Heitor Tashiro Sergent Date: Fri, 19 Jul 2024 15:14:20 -0500 Subject: [PATCH 23/23] Remove docs from v0.50.x folder --- .../set-up/set-up-distributed-k6/_index.md | 19 -- .../install-k6-operator.md | 114 -------- .../set-up-distributed-k6/troubleshooting.md | 260 ------------------ .../upgrade-k6-operator.md | 10 - .../set-up-distributed-k6/usage/_index.md | 10 - .../usage/common-options.md | 57 ---- .../executing-k6-scripts-with-testrun-crd.md | 219 --------------- .../set-up-distributed-k6/usage/extensions.md | 61 ---- .../usage/k6-operator-to-gck6.md | 73 ----- .../set-up-distributed-k6/usage/reference.md | 12 - .../usage/scheduling-tests.md | 106 ------- 11 files changed, 941 deletions(-) delete mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/_index.md delete mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md delete mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md delete mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md delete mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/_index.md delete mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/common-options.md delete mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md delete mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md delete mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/k6-operator-to-gck6.md delete mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/reference.md delete mode 100644 docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/scheduling-tests.md diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/_index.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/_index.md deleted file mode 100644 index 06687090b0..0000000000 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/_index.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -weight: 150 -title: Set up distributed k6 ---- - -# Set up distributed k6 - -It's possible to run large load tests even when using a single node, or single machine. But, depending on your use case, you might also want to run a distributed Grafana k6 test in your own infrastructure. - -A couple of reasons why you might want to do this: - -- You run your application in Kubernetes and would like k6 to be executed in the same fashion as all your other infrastructure components. -- You want to run your tests within your private network for security and/or privacy reasons. - -[k6 Operator](https://github.com/grafana/k6-operator) is a Kubernetes operator that you can use to run distributed k6 tests in your cluster. - -This section includes the following topics: - -{{< section depth=2 >}} diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md deleted file mode 100644 index 315ec003ca..0000000000 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/install-k6-operator.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -weight: 100 -title: Install k6 Operator ---- - -# Install k6 Operator - -This guide provides step-by-step instructions on how to install k6 Operator. - -## Before you begin - -To install k6 Operator, you'll need: - -- A Kubernetes cluster, along with access to it. -- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl). - -## Deploy the operator - -There are three different options that you can use to deploy the k6 Operator. - -### Deploy with bundle - -The easiest way to install the operator is with bundle: - -```bash -curl https://raw.githubusercontent.com/grafana/k6-operator/main/bundle.yaml | kubectl apply -f - -``` - -Bundle includes default manifests for k6 Operator, including a `k6-operator-system` namespace and k6 Operator deployment with the latest tagged Docker image. Customizations can be made on top of this manifest as needed, for example, with `kustomize`. - -### Deploy with Helm - -Helm releases of k6 Operator are published together with other Grafana Helm charts. You can install it with the following commands: - -```bash -helm repo add grafana https://grafana.github.io/helm-charts -helm repo update -helm install k6-operator grafana/k6-operator -``` - -You can also pass additional configuration options with a `values.yaml` file: - -```bash -helm install k6-operator grafana/k6-operator -f values.yaml -``` - -Refer to the [k6 Operator samples folder](https://github.com/grafana/k6-operator/blob/main/charts/k6-operator/samples/customAnnotationsAndLabels.yaml) for an example file. - -You can find a complete list of Helm options in the [k6 Operator charts folder](https://github.com/grafana/k6-operator/blob/main/charts/k6-operator/README.md). - -### Deploy with Makefile - -In order to install the operator with a Makefile, you'll need: - -- [go](https://go.dev/doc/install) -- [kustomize](https://kubectl.docs.kubernetes.io/installation/kustomize/) - -A more manual, low-level way to install the k6 operator is by running the command below: - -```bash -make deploy -``` - -This method may be more useful for development of the k6 Operator, depending on specifics of the setup. - -## Install the CRD - -The k6 Operator includes custom resources called `TestRun`, `PrivateLoadZone`, and `K6`. They're automatically installed when you do a deployment or install a bundle, but you can also manually install them by running: - -```bash -make install -``` - -{{< admonition type="warning" >}} - -The `K6` CRD has been replaced by the `TestRun` CRD and will be deprecated in the future. We recommend using the `TestRun` CRD. - -{{< /admonition >}} - -## Watch namespace - -By default, the k6 Operator watches the `TestRun` and `PrivateLoadZone` custom resources in all namespaces. You can also configure the k6 Operator to watch a specific namespace by setting the `WATCH_NAMESPACE` environment variable for the operator's deployment: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: k6-operator-controller-manager - namespace: k6-operator-system -spec: - template: - spec: - containers: - - name: manager - image: ghcr.io/grafana/k6-operator:controller-v0.0.14 - env: - - name: WATCH_NAMESPACE - value: 'some-ns' -# ... -``` - -## Uninstall k6 Operator - -You can remove all of the resources created by the k6 Operator with `bundle`: - -```bash -curl https://raw.githubusercontent.com/grafana/k6-operator/main/bundle.yaml | kubectl delete -f - -``` - -Or with the `make` command: - -```bash -make delete -``` diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md deleted file mode 100644 index a8bf60698b..0000000000 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/troubleshooting.md +++ /dev/null @@ -1,260 +0,0 @@ ---- -weight: 400 -title: Troubleshooting ---- - -# Troubleshooting - -This topic includes instructions to help you troubleshoot common issues with the k6 Operator. - -## Common tricks - -### Test your script locally - -Always run your script locally before trying to run it with the k6 Operator: - -```bash -k6 run script.js -``` - -If you're using environment variables or CLI options, pass them in as well: - -```bash -MY_ENV_VAR=foo k6 run script.js --tag my_tag=bar -``` - -That ensures that the script has correct syntax and can be parsed with k6 in the first place. Additionally, running locally can help you check if the configured options are doing what you expect. If there are any errors or unexpected results in the output of `k6 run`, make sure to fix those prior to deploying the script elsewhere. - -### `TestRun` deployment - -#### The pods - -In case of one `TestRun` Custom Resource (CR) creation with `parallelism: n`, there are certain repeating patterns: - -1. There will be `n + 2` Jobs (with corresponding Pods) created: initializer, starter, and `n` runners. -1. If any of these Jobs didn't result in a Pod being deployed, there must be an issue with that Job. Some commands that can help here: - - ```bash - kubectl get jobs -A - kubectl describe job mytest-initializer - ``` - -1. If one of the Pods was deployed but finished with `Error`, you can check its logs with the following command: - - ```bash - kubectl logs mytest-initializer-xxxxx - ``` - -If the Pods seem to be working but not producing an expected result and there's not enough information in the logs, you can use the k6 [verbose option](https://grafana.com/docs/k6//using-k6/k6-options/#options) in the `TestRun` spec: - -```yaml -apiVersion: k6.io/v1alpha1 -kind: TestRun -metadata: - name: k6-sample -spec: - parallelism: 2 - script: - configMap: - name: 'test' - file: 'test.js' - arguments: --verbose -``` - -#### k6 Operator - -Another source of info is the k6 Operator itself. It's deployed as a Kubernetes `Deployment`, with `replicas: 1` by default, and its logs together with observations about the Pods from the previous section usually contain enough information to help you diagnose any issues. With the standard deployment, the logs of the k6 Operator can be checked with: - -```bash -kubectl -n k6-operator-system -c manager logs k6-operator-controller-manager-xxxxxxxx-xxxxx -``` - -#### Inspect `TestRun` resource - -After you deploy a `TestRun` CR, you can inspect it the same way as any other resource: - -```bash -kubectl describe testrun my-testrun -``` - -Firstly, check if the spec is as expected. Then, see the current status: - -```yaml -Status: - Conditions: - Last Transition Time: 2024-01-17T10:30:01Z - Message: - Reason: CloudTestRunFalse - Status: False - Type: CloudTestRun - Last Transition Time: 2024-01-17T10:29:58Z - Message: - Reason: TestRunPreparation - Status: Unknown - Type: TestRunRunning - Last Transition Time: 2024-01-17T10:29:58Z - Message: - Reason: CloudTestRunAbortedFalse - Status: False - Type: CloudTestRunAborted - Last Transition Time: 2024-01-17T10:29:58Z - Message: - Reason: CloudPLZTestRunFalse - Status: False - Type: CloudPLZTestRun - Stage: error -``` - -If `Stage` is equal to `error`, you can check the logs of k6 Operator. - -Conditions can be used as a source of info as well, but it's a more advanced troubleshooting option that should be used if the previous steps weren't enough to diagnose the issue. Note that conditions that start with the `Cloud` prefix only matter in the setting of k6 Cloud test runs, for example, for cloud output and PLZ test runs. - -### `PrivateLoadZone` deployment - -If the `PrivateLoadZone` CR was successfully created in Kubernetes, it should become visible in your account in Grafana Cloud k6 (GCk6) interface soon afterwards. If it doesn't appear in the UI, then there is likely a problem to troubleshoot. - -First, go over the [guide](https://grafana.com/docs/grafana-cloud/k6/author-run/private-load-zone-v2/) to double-check if all the steps have been done correctly and successfully. - -Unlike `TestRun` deployment, when a `PrivateLoadZone` is first created, there are no additional resources deployed. So, the only source for troubleshooting are the logs of k6 Operator. See the [previous subsection](#k6-operator) on how to access its logs. Any errors there might be a hint to diagnose the issue. Refer to [PrivateLoadZone: subscription error](#privateloadzone-subscription-error) for more details. - -### Running tests in `PrivateLoadZone` - -Each time a user runs a test in a PLZ, for example with `k6 cloud script.js`, there is a corresponding `TestRun` being deployed by the k6 Operator. This `TestRun` will be deployed in the same namespace as its `PrivateLoadZone`. If the test is misbehaving, for example, it errors out, or doesn't produce the expected result, then you can check: - -1. If there are any messages in the GCk6 UI. -2. If there are any messages in the output of the `k6 cloud` command. -3. The resources and their logs, the same way as with a [standalone `TestRun` deployment](#testrun-deployment) - -## Common scenarios - -### Issues with environment variables - -Refer to [Environment variables](https://github.com/grafana/k6-operator/blob/main/docs/env-vars.md) for details on how to pass environment variables to the k6 Operator. - -### Tags not working - -Tags are a rather common source of errors when using the k6 Operator. For example, the following tags would lead to parsing errors: - -```yaml - arguments: --tag product_id="Test A" - # or - arguments: --tag foo=\"bar\" -``` - -You can see those errors in the logs of either the initializer or the runner Pod, for example: - -```bash -time="2024-01-11T11:11:27Z" level=error msg="invalid argument \"product_id=\\\"Test\" for \"--tag\" flag: parse error on line 1, column 12: bare \" in non-quoted-field" -``` - -This is a common problem with escaping the characters. You can find an [issue](https://github.com/grafana/k6-operator/issues/211) in the k6 Operator repository that can be upvoted. - -### Initializer logs an error but it's not about tags - -This can happen because of lack of attention to the [preparation](#preparation) step. One command that you can use to help diagnose issues with your script is the following: - -```bash -k6 inspect --execution-requirements script.js -``` - -That command is a shortened version of what the initializer Pod is executing. If the command produces an error, there's a problem with the script itself and it should be solved outside of the k6 Operator. The error itself may contain a hint to what's wrong, such as a syntax error. - -If the standalone `k6 inspect --execution-requirements` executes successfully, then it's likely a problem with `TestRun` deployment specific to your Kubernetes setup. A couple of recommendations here are: - -- Review the output of the initializer Pod: is it logged by the k6 process or by something else? - - :information_source: k6 Operator expects the initializer logs to contain only the output of `k6 inspect`. If there are any other log lines present, then the k6 Operator will fail to parse it and the test won't start. Refer to this [issue](https://github.com/grafana/k6-operator/issues/193) for more details. -- Check events in the initializer Job and Pod as they may contain another hint about what's wrong. - -### Non-existent ServiceAccount - -A ServiceAccount can be defined as `serviceAccountName` in a PrivateLoadZone, and as `runner.serviceAccountName` in a TestRun CRD. If the specified ServiceAccount doesn't exist, k6 Operator will successfully create Jobs but corresponding Pods will fail to be deployed, and the k6 Operator will wait indefinitely for Pods to be `Ready`. This error can be best seen in the events of the Job: - -```bash -kubectl describe job plz-test-xxxxxx-initializer -... -Events: - Warning FailedCreate 57s (x4 over 2m7s) job-controller Error creating: pods "plz-test-xxxxxx-initializer-" is forbidden: error looking up service account plz-ns/plz-sa: serviceaccount "plz-sa" not found -``` - -k6 Operator doesn't try to analyze such scenarios on its own, but you can refer to the following [issue](https://github.com/grafana/k6-operator/issues/260) for improvements. - -#### How to fix - -To fix this issue, the incorrect `serviceAccountName` must be corrected, and the TestRun or PrivateLoadZone resource must be re-deployed. - -### Non-existent `nodeSelector` - -`nodeSelector` can be defined as `nodeSelector` in a PrivateLoadZone, and as `runner.nodeSelector` in the TestRun CRD. - -This case is very similar to the [ServiceAccount](#non-existent-serviceaccount): the Pod creation will fail, but the error is slightly different: - -```bash -kubectl describe pod plz-test-xxxxxx-initializer-xxxxx -... -Events: - Warning FailedScheduling 48s (x5 over 4m6s) default-scheduler 0/1 nodes are available: 1 node(s) didn't match Pod's node affinity/selector. -``` - -#### How to fix - -To fix this issue, the incorrect `nodeSelector` must be corrected and the TestRun or PrivateLoadZone resource must be re-deployed. - -### Insufficient resources - -A related problem can happen when the cluster does not have sufficient resources to deploy the runners. There's a higher probability of hitting this issue when setting small CPU and memory limits for runners or using options like `nodeSelector`, `runner.affinity` or `runner.topologySpreadConstraints`, and not having a set of nodes matching the spec. Alternatively, it can happen if there is a high number of runners required for the test (via `parallelism` in TestRun or during PLZ test run) and autoscaling of the cluster has limits on the maximum number of nodes, and can't provide the required resources on time or at all. - -This case is somewhat similar to the previous two: the k6 Operator will wait indefinitely and can be monitored with events in Jobs and Pods. If it's possible to fix the issue with insufficient resources on-the-fly, for example, by adding more nodes, k6 Operator will attempt to continue executing a test run. - -### OOM of a runner Pod - -If there's at least one runner Pod that OOM-ed, the whole test will be [stuck](https://github.com/grafana/k6-operator/issues/251) and will have to be deleted manually: - -```bash -kubectl -f my-test.yaml delete -# or -kubectl delete testrun my-test -``` - -In case of OOM, it makes sense to review the k6 script to understand what kind of resource usage this script requires. It may be that the k6 script can be improved to be more performant. Then, set the `spec.runner.resources` in the TestRun CRD, or `spec.resources` in the PrivateLoadZone CRD accordingly. - -### PrivateLoadZone: subscription error - -If there's an issue with your Grafana Cloud k6 subscription, there will be a 400 error in the logs with the message detailing the problem. For example: - -```bash -"Received error `(400) You have reached the maximum Number of private load zones your organization is allowed to have. Please contact support if you want to create more.`. Message from server ``" -``` - -To fix this issue, check your organization settings in Grafana Cloud k6 or contact Support. - -### PrivateLoadZone: Wrong token - -There can be two major problems with the authentication token: - -1. If the token wasn't created, or was created in a wrong location, the logs will show the following error: - - ```bash - Failed to load k6 Cloud token {"namespace": "plz-ns", "name": "my-plz", "reconcileID": "67c8bc73-f45b-4c7f-a9ad-4fd0ffb4d5f6", "name": "token-with-wrong-name", "secretNamespace": "plz-ns", "error": "Secret \"token-with-wrong-name\" not found"} - ``` - -2. If the token contains a corrupted value, or it's not an organizational token, the logs will show the following error: - - ```bash - "Received error `(403) Authentication token incorrect or expired`. Message from server ``" - ``` - -### PrivateLoadZone: Networking setup - -If you see any dial or connection errors in the logs of the k6 Operator, it makes sense to double-check the networking setup. For a PrivateLoadZone to operate, outbound traffic to Grafana Cloud k6 [must be allowed](https://grafana.com/docs/grafana-cloud/k6/author-run/private-load-zone-v2/#before-you-begin). To check the reachability of Grafana Cloud k6 endpoints: - -```bash -kubectl apply -f https://k8s.io/examples/admin/dns/dnsutils.yaml -kubectl exec -it dnsutils -- nslookup ingest.k6.io -kubectl exec -it dnsutils -- nslookup api.k6.io -``` - -For more resources on troubleshooting networking, refer to the [Kubernetes docs](https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/). - -### PrivateLoadZone: Insufficient resources - -The PrivateLoadZone insufficient resources problem is similar to [insufficient resources issue](#insufficient-resources). But, when running a PrivateLoadZone test, the k6 Operator will wait only for a timeout period. When the timeout period is up, the test will be aborted by Grafana Cloud k6 and marked as such, both in the PrivateLoadZone and in Grafana Cloud k6. In other words, there is a time limit to fix this issue without restarting the test run. diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md deleted file mode 100644 index 2a46ef392e..0000000000 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/upgrade-k6-operator.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -weight: 200 -title: Upgrade k6 Operator -_build: - list: false ---- - -# Upgrade k6 Operator - - diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/_index.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/_index.md deleted file mode 100644 index 48ddb3b67c..0000000000 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/_index.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -weight: 300 -title: Usage ---- - -# Usage - -This section includes the following topics: - -{{< section depth=2 >}} diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/common-options.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/common-options.md deleted file mode 100644 index 43d95d8625..0000000000 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/common-options.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -weight: 300 -title: Common options ---- - -# Common options - - - -The only options that are required as part of the `TestRun` CRD spec are `script` and `parallelism`. This guide covers some of the most common options. - -## Parallelism - -`parallelism` defines how many instances of k6 runners you want to create. Each instance is assigned an equal execution segment. For instance, if your test script is configured to run 200 VUs and `parallelism` is set to 4, the k6 Operator creates four k6 jobs, each running 50 VUs to achieve the desired VU count. - -## Separate - -`separate: true` indicates that the jobs created need to be distributed across different nodes. This is useful if you're running a test with a really high VU count and want to make sure the resources of each node won't become a bottleneck. - -## Service account - -If you want to use a custom Service Account you'll need to pass it into both the starter and the runner object: - -```yaml -apiVersion: k6.io/v1alpha1 -kind: TestRun -metadata: - name: -spec: - script: - configMap: - name: '' - runner: - serviceAccountName: - starter: - serviceAccountName: -``` - -## Runner - -Defines options for the test runner pods. The non-exhaustive list includes: - -- Passing resource limits and requests. -- Passing in labels and annotations. -- Passing in affinity and anti-affinity. -- Passing in a custom image. - -## Starter - -Defines options for the starter pod. The non-exhaustive list includes: - -- Passing in a custom image. -- Passing in labels and annotations. - -## Initializer - -By default, the initializer Job is defined with the same options as the runner Jobs, but its options can be overwritten by setting `.spec.initializer`. diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md deleted file mode 100644 index 64a7c626cb..0000000000 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd.md +++ /dev/null @@ -1,219 +0,0 @@ ---- -weight: 100 -title: Run k6 scripts with TestRun CRD ---- - -# Run k6 scripts with TestRun CRD - -This guide covers how you can configure your k6 scripts to run using the k6 Operator. - -## Defining test scripts - -There are several ways to configure scripts in the `TestRun` CRD. The operator uses `configMap`, `volumeClaim` and `localFile` to serve test scripts to the jobs. - -### ConfigMap - -The main way to configure a script is to create a `ConfigMap` with the script contents: - -```bash -kubectl create configmap my-test --from-file /path/to/my/test.js -``` - -Then specify it in `TestRun`: - -```bash - script: - configMap: - name: my-test - file: test.js -``` - -{{< admonition type="note" >}} - -A single `ConfigMap` has a character limit of 1048576 bytes. If you need to have a larger test file, you have to use a `volumeClaim` or a `localFile` instead. - -{{< /admonition >}} - -### VolumeClaim - -If you have a PVC with the name `stress-test-volumeClaim` containing your script and any other supporting files, you can pass it to the test like this: - -```yaml -spec: - script: - volumeClaim: - name: 'stress-test-volumeClaim' - # test.js should exist inside /test/ folder. - # All the js files and directories test.js is importing - # should be inside the same directory as well. - file: 'test.js' -``` - -The pods will expect to find the script files in the `/test/` folder. If `volumeClaim` fails, that's the first place to check. The latest initializer pod doesn't generate any logs and when it can't find the file, it exits with an error. Refer to [this GitHub issue](https://github.com/grafana/k6-operator/issues/143) for potential improvements. - -#### Sample directory structure - -``` -├── test -│ ├── requests -│ │ ├── stress-test.js -│ ├── test.js -``` - -In the preceding example, `test.js` imports a function from `stress-test.js` and these files would look like this: - -```js -// test.js -import stressTest from './requests/stress-test.js'; - -export const options = { - vus: 50, - duration: '10s', -}; - -export default function () { - stressTest(); -} -``` - -```js -// stress-test.js -import { sleep, check } from 'k6'; -import http from 'k6/http'; - -export default () => { - const res = http.get('https://test-api.k6.io'); - check(res, { - 'status is 200': () => res.status === 200, - }); - sleep(1); -}; -``` - -### LocalFile - -If the script is present in the filesystem of a custom runner image, it can be accessed with the `localFile` option: - -```yaml -spec: - parallelism: 4 - script: - localFile: /test/test.js - runner: - image: -``` - -{{< admonition type="note" >}} - -If there is any limitation on the usage of `volumeClaim` in your cluster, you can use the `localFile` option. We recommend using `volumeClaim` if possible. - -{{< /admonition >}} - -### Multi-file tests - -In case your k6 script is split between multiple JavaScript files, you can create a `ConfigMap` with several data entries like this: - -```bash -kubectl create configmap scenarios-test --from-file test.js --from-file utils.js -``` - -If there are too many files to specify manually, using `kubectl` with a folder might be an option as well: - -```bash -kubectl create configmap scenarios-test --from-file=./test -``` - -Alternatively, you can create an archive with k6: - -```bash -k6 archive test.js [args] -``` - -The `k6 archive` command creates an `archive.tar` in your current folder. You can then use that file in the `configmap`, similarly to a JavaScript script: - -```bash -kubectl create configmap scenarios-test --from-file=archive.tar -``` - -If you use an archive, you must edit your YAML file for the `TestRun` deployment so that the `file` option is set to the correct entrypoint for the `k6 run` command: - -```yaml -# ... -spec: - script: - configMap: - name: 'crocodile-stress-test' - file: 'archive.tar' # <-- change here -``` - -## Run tests - -Tests are executed by applying the custom resource `TestRun` to a cluster where the k6 Operator is running. Additional optional properties of the `TestRun` CRD allow you to control some key aspects of a distributed execution. For example: - -```yaml -# k6-resource.yml - -apiVersion: k6.io/v1alpha1 -kind: TestRun -metadata: - name: k6-sample -spec: - parallelism: 4 - script: - configMap: - name: k6-test - file: test.js - separate: false - runner: - image: - metadata: - labels: - cool-label: foo - annotations: - cool-annotation: bar - securityContext: - runAsUser: 1000 - runAsGroup: 1000 - runAsNonRoot: true - resources: - limits: - cpu: 200m - memory: 1000Mi - requests: - cpu: 100m - memory: 500Mi - starter: - image: - metadata: - labels: - cool-label: foo - annotations: - cool-annotation: bar - securityContext: - runAsUser: 2000 - runAsGroup: 2000 - runAsNonRoot: true -``` - -A `TestRun` CR is created with this command: - -```bash -kubectl apply -f /path/to/your/k6-resource.yml -``` - -## Clean up resources - -After completing a test run, you need to clean up the test jobs that were created: - -```bash -kubectl delete -f /path/to/your/k6-resource.yml -``` - -Alternatively, you can configure the automatic deletion of all resources with the `cleanup` option: - -```yaml -spec: - cleanup: 'post' -``` - -With the `cleanup` option set, k6 Operator removes the `TestRun` CRD and all created resources once the test run ends. diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md deleted file mode 100644 index e654032755..0000000000 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/extensions.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -weight: 200 -title: Use k6 Operator with k6 extensions ---- - -# Use k6 Operator with k6 extensions - -By default, the k6 Operator uses `ghcr.io/grafana/k6-operator:latest-runner` as the container image for the test jobs. - -If you want to use k6 [extensions](https://grafana.com/docs/k6//extensions/) built with [xk6](https://github.com/grafana/xk6), you'll need to create your own image and override the `image` property on the `TestRun` Kubernetes resource. - -For example, this is a `Dockerfile` that builds a k6 binary with the `xk6-output-influxdb` extension: - -```Dockerfile -# Build the k6 binary with the extension -FROM golang:1.20 as builder - -RUN go install go.k6.io/xk6/cmd/xk6@latest - -# For our example, we'll add support for output of test metrics to InfluxDB v2. -# Feel free to add other extensions using the '--with ...'. -RUN xk6 build \ - --with github.com/grafana/xk6-output-influxdb@latest \ - --output /k6 - -# Use the operator's base image and override the k6 binary -FROM grafana/k6:latest -COPY --from=builder /k6 /usr/bin/k6 -``` - -You can build the image based on this `Dockerfile` by executing: - -```bash -docker build -t k6-extended:local . -``` - -After the build completes, you can push the resulting `k6-extended:local` image to an image repository accessible to your Kubernetes cluster. - -You can then use that image as follows: - -```yaml -# k6-resource-with-extensions.yml - -apiVersion: k6.io/v1alpha1 -kind: TestRun -metadata: - name: k6-sample-with-extensions -spec: - parallelism: 4 - script: - configMap: - name: my-stress-test - file: test.js - runner: - image: k6-extended:local - env: - - name: K6_OUT - value: xk6-influxdb=http://influxdb.somewhere:8086/demo -``` - -Note that this examples overrides the default image with `k6-extended:latest`, and it includes environment variables that are required by the `xk6-output-influxdb` extension. diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/k6-operator-to-gck6.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/k6-operator-to-gck6.md deleted file mode 100644 index 600decab73..0000000000 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/k6-operator-to-gck6.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -weight: 250 -title: Use the k6 Operator with Grafana Cloud k6 ---- - -# Use the k6 Operator with Grafana Cloud k6 - -Grafana Cloud k6 is the Grafana Cloud offering of k6, which gives you access to all of k6 capabilities, while Grafana handles the infrastructure, storage, and metrics aggregation and insights from your tests. - -When using the k6 Operator, you can still leverage Grafana Cloud k6 to get access to the metric storage and analysis that the platform offers. - -There are two ways to use the k6 Operator with Grafana Cloud k6: Private Load Zones and Cloud output. - -## Before you begin - -To use the k6 Operator with Grafana Cloud k6, you’ll need: - -- A [Grafana Cloud account](https://grafana.com/auth/sign-up/create-user). - -## Private Load Zones - -Private Load Zones (PLZ) are load zones that you can host inside your network by using the k6 Operator. You can start a cloud test in a PLZ by referencing it by name from your script, and the test will run in the nodes of your Kubernetes cluster. - -Refer to [Set up private load zones](https://grafana.com/docs/grafana-cloud/testing/k6/author-run/private-load-zone-v2/) for more details. - -## Cloud output - -With k6, you can send the [output from a test run to Grafana Cloud k6](https://grafana.com/docs/k6//results-output/real-time/cloud) with the `k6 run --out cloud script.js` command. This feature is also available in the k6 Operator if you have a Grafana Cloud account. - -{{< admonition type="note" >}} - -The cloud output option only supports a `parallelism` value of 20 or less. - -{{< /admonition >}} - -To use this option in k6 Operator, set the argument in YAML: - -```yaml -# ... -script: - configMap: - name: '' -arguments: --out cloud -# ... -``` - -Then, if you installed operator with bundle or Helm, create a secret with the following command: - -```bash -kubectl -n k6-operator-system create secret generic my-cloud-token \ - --from-literal=token= && kubectl -n k6-operator-system label secret my-cloud-token "k6cloud=token" -``` - -Alternatively, if you installed operator with a Makefile, you can uncomment the cloud output section in `config/default/kustomization.yaml` and copy your token from Grafana Cloud k6 there: - -```yaml -# Uncomment this section if you need cloud output and copy-paste your token -secretGenerator: - - name: cloud-token - literals: - - token= - options: - annotations: - kubernetes.io/service-account.name: k6-operator-controller - labels: - k6cloud: token -``` - -After updating the file, run `make deploy`. - -After these steps, you can run k6 with the cloud output and default values of `projectID` and `name`. - -Refer to [Cloud options](https://grafana.com/docs/grafana-cloud/testing/k6/author-run/cloud-scripting-extras/cloud-options/#cloud-options) for details on how to change the `projectID` and `name` options. diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/reference.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/reference.md deleted file mode 100644 index f6f8b6d06f..0000000000 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/reference.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -weight: 500 -title: Reference -_build: - list: false ---- - -# Reference - - - -{{< section depth=2 >}} diff --git a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/scheduling-tests.md b/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/scheduling-tests.md deleted file mode 100644 index 02fc7503a5..0000000000 --- a/docs/sources/v0.50.x/set-up/set-up-distributed-k6/usage/scheduling-tests.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -weight: 400 -title: Schedule k6 tests ---- - -# Schedule k6 tests - -While the k6 Operator doesn't support scheduling k6 tests directly, you can schedule tests with the `CronJob` object from Kubernetes directly. The `CronJob` would run on a schedule and execute the creation and deletion of the `TestRun` object. - -Running these tests requires a little more setup than a standalone test run. - -## Create a `ConfigMap` with k6 scripts - -Refer to [Run k6 scripts with `TestRun` CRD](https://grafana.com/docs/k6//set-up/set-up-distributed-k6/usage/executing-k6-scripts-with-testrun-crd/) for details on how to create a `ConfigMap` with k6 scripts. - -## Create a ConfigMap of the YAML file for the `TestRun` job - - - -When using the `make deploy` installation method, add a `configMapGenerator` to the `kustomization.yaml`: - -```yaml -configMapGenerator: - - name: -config - files: - - .yaml -``` - -## Create a `ServiceAccount` for the `CronJob` - -For the `CronJob` to be able to create and delete `TestRun` objects, create a service account: - -```yaml ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: k6- -rules: - - apiGroups: - - k6.io - resources: - - testruns - verbs: - - create - - delete - - get - - list - - patch - - update - - watch ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: k6- -roleRef: - kind: Role - name: k6- - apiGroup: rbac.authorization.k8s.io -subjects: - - kind: ServiceAccount - name: k6- - namespace: ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: k6- -``` - -## Create a `CronJob` - -This is an example of how to define a `CronJob` in a YAML file: - -```yaml -# snapshotter.yml -apiVersion: batch/v1beta1 -kind: CronJob -metadata: - name: -cron -spec: - schedule: '' - concurrencyPolicy: Forbid - jobTemplate: - spec: - template: - spec: - serviceAccount: k6 - containers: - - name: kubectl - image: bitnami/kubectl - volumeMounts: - - name: k6-yaml - mountPath: /tmp/ - command: - - /bin/bash - args: - - -c - - 'kubectl delete -f /tmp/.yaml; kubectl apply -f /tmp/.yaml' - restartPolicy: OnFailure - volumes: - - name: k6-yaml - configMap: - name: -config -```