diff --git a/head/404.html b/head/404.html index 3602932c04f4..5c2a8b472e2e 100644 --- a/head/404.html +++ b/head/404.html @@ -14,8 +14,9 @@ + - + @@ -23,15 +24,18 @@ - + - + + + + @@ -68,7 +72,7 @@ -
+ @@ -113,6 +117,7 @@k0s bundles Kubernetes manifests for Calico. The manifests are retrieved -from the official Calico docs.
+from the official Calico repo.As fetching and modifying the entire multi-thousand line file is error-prone, you may follow these steps to upgrade Calico to the latest version:
./get-calico.sh
./hack/get-calico.sh <version>
make bindata-manifests
The k0s containers are published both on Docker Hub and GitHub. For reasons of simplicity, the examples given here use Docker Hub (GitHub requires a separate authentication that is not covered). Alternative links include:
Note: Due to Docker Hub tag validation scheme, we have to use -
as the k0s version separator instead of the usual +
. So for example k0s version v1.27.5+k0s.0
is tagged as docker.io/k0sproject/k0s:v1.27.5-k0s.0
.
Note: Due to Docker Hub tag validation scheme, we have to use -
as the k0s version separator instead of the usual +
. So for example k0s version v1.28.1+k0s.0
is tagged as docker.io/k0sproject/k0s:v1.28.1-k0s.0
.
You can run your own k0s in Docker:
@@ -1713,7 +2121,7 @@K0S_VERSION=v1.27.5+k0s.0
K0S_VERSION=v1.28.1+k0s.0
DEBUG=true
DEBUG=true
Note: If you require environment variables and use sudo, you can do:
-curl -sSLf https://get.k0s.sh | sudo K0S_VERSION=v1.27.5+k0s.0 sh
+curl -sSLf https://get.k0s.sh | sudo K0S_VERSION=v1.28.1+k0s.0 sh
2. Bootstrap a controller node#
Create a configuration file:
@@ -1674,7 +2082,7 @@ 6. Check k0s status sudo k0s status
Version: v1.27.5+k0s.0
+Version: v1.28.1+k0s.0
Process ID: 2769
Parent Process ID: 1
Role: controller
@@ -1686,7 +2094,7 @@ 7. Access your clustersudo k0s kubectl get nodes
NAME STATUS ROLES AGE VERSION
-k0s Ready <none> 4m6s v1.27.5+k0s
+k0s Ready <none> 4m6s v1.28.1+k0s
You can also access your cluster easily with Lens, simply by copying the kubeconfig and pasting it to Lens:
sudo cat /var/lib/k0s/pki/admin.conf
@@ -1734,7 +2142,7 @@ Next Steps
-
+
@@ -1742,7 +2150,7 @@ Next Steps
-
+
@@ -1750,7 +2158,7 @@ Next Steps
-
+
@@ -1758,7 +2166,7 @@ Next Steps
-
+
@@ -1766,7 +2174,7 @@ Next Steps
-
+
@@ -1780,10 +2188,10 @@ Next Steps
spec.api.externalAddress
.
-spec.api.tunneledNetworkingMode
as
- true
.--single
flag.k0sctl
name: k0s-cluster
spec:
k0s:
- version: v1.27.5+k0s.0
+ version: v1.28.1+k0s.0
config:
spec:
network:
@@ -1633,11 +2038,11 @@ k0sctl
level=info msg="==> Running phase: Gather k0s facts"
level=info msg="==> Running phase: Validate facts"
level=info msg="==> Running phase: Upload k0s binaries to hosts"
-level=info msg="[ssh] 10.81.146.254:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.27.5+k0s.0"
-level=info msg="[ssh] 10.81.146.113:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.27.5+k0s.0"
-level=info msg="[ssh] 10.81.146.51:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.27.5+k0s.0"
-level=info msg="[ssh] 10.81.146.198:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.27.5+k0s.0"
-level=info msg="[ssh] 10.81.146.184:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.27.5+k0s.0"
+level=info msg="[ssh] 10.81.146.254:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.28.1+k0s.0"
+level=info msg="[ssh] 10.81.146.113:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.28.1+k0s.0"
+level=info msg="[ssh] 10.81.146.51:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.28.1+k0s.0"
+level=info msg="[ssh] 10.81.146.198:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.28.1+k0s.0"
+level=info msg="[ssh] 10.81.146.184:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.28.1+k0s.0"
level=info msg="==> Running phase: Configure k0s"
level=info msg="[ssh] 10.81.146.254:22: validating configuration"
level=info msg="[ssh] 10.81.146.184:22: validating configuration"
@@ -1677,7 +2082,7 @@ k0sctl
level=info msg="==> Running phase: Release exclusive host lock"
level=info msg="==> Running phase: Disconnect from hosts"
level=info msg="==> Finished in 3m30s"
-level=info msg="k0s cluster version v1.27.5+k0s.0 is now installed"
+level=info msg="k0s cluster version v1.28.1+k0s.0 is now installed"
level=info msg="Tip: To access the cluster you can now fetch the admin kubeconfig using:"
level=info msg=" k0sctl kubeconfig"
k0sctl
$ kubectl get nodes -owide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
-k0s-worker-0 Ready <none> 2m16s v1.27.5+k0s 10.81.146.198 <none> Alpine Linux v3.17 5.15.83-0-virt containerd://1.7.1
-k0s-worker-1 Ready <none> 2m15s v1.27.5+k0s 10.81.146.51 <none> Alpine Linux v3.17 5.15.83-0-virt containerd://1.7.1
+k0s-worker-0 Ready <none> 2m16s v1.28.1+k0s 10.81.146.198 <none> Alpine Linux v3.17 5.15.83-0-virt containerd://1.7.1
+k0s-worker-1 Ready <none> 2m15s v1.28.1+k0s 10.81.146.51 <none> Alpine Linux v3.17 5.15.83-0-virt containerd://1.7.1
There is one node-local load balancer pod running for each worker node:
$ kubectl -n kube-system get pod -owide -l app.kubernetes.io/managed-by=k0s,app.kubernetes.io/component=nllb
@@ -1740,8 +2145,8 @@ Full example using k0sctl
$ kubectl get nodes -owide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
-k0s-worker-0 Ready <none> 3m35s v1.27.5+k0s 10.81.146.198 <none> Alpine Linux v3.17 5.15.83-0-virt containerd://1.7.1
-k0s-worker-1 Ready <none> 3m34s v1.27.5+k0s 10.81.146.51 <none> Alpine Linux v3.17 5.15.83-0-virt containerd://1.7.1
+k0s-worker-0 Ready <none> 3m35s v1.28.1+k0s 10.81.146.198 <none> Alpine Linux v3.17 5.15.83-0-virt containerd://1.7.1
+k0s-worker-1 Ready <none> 3m34s v1.28.1+k0s 10.81.146.51 <none> Alpine Linux v3.17 5.15.83-0-virt containerd://1.7.1
$ kubectl -n kube-system get pods -owide -l app.kubernetes.io/managed-by=k0s,app.kubernetes.io/component=nllb
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
@@ -1825,7 +2230,7 @@ Full example using k0sctl
-
+
@@ -1833,7 +2238,7 @@ Full example using k0sctl
-
+
@@ -1841,7 +2246,7 @@ Full example using k0sctl
-
+
@@ -1849,7 +2254,7 @@ Full example using k0sctl
-
+
@@ -1857,7 +2262,7 @@ Full example using k0sctl
-
+
k0sctl
Download a k0s release. For example:
-wget -O /tmp/k0s https://github.com/k0sproject/k0s/releases/download/v1.27.5+k0s.0/k0s-v1.27.5+k0s.0-arm64 # replace version number!
+wget -O /tmp/k0s https://github.com/k0sproject/k0s/releases/download/v1.28.1+k0s.0/k0s-v1.28.1+k0s.0-arm64 # replace version number!
sudo install /tmp/k0s /usr/local/bin/k0s
― or ―
@@ -1725,7 +2133,7 @@ Download k0s
At this point you can run k0s
:
ubuntu@ubuntu:~$ k0s version
-v1.27.5+k0s.0
+v1.28.1+k0s.0
To check if k0s's system requirements and external runtime dependencies are fulfilled by your current @@ -1737,6 +2145,7 @@
ubuntu@ubuntu:~$ sudo k0s kc get nodes -owide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
-ubuntu Ready control-plane 4m41s v1.27.5+k0s 10.152.56.54 <none> Ubuntu 22.04.1 LTS 5.15.0-1013-raspi containerd://1.7.2
+ubuntu Ready control-plane 4m41s v1.28.1+k0s 10.152.56.54 <none> Ubuntu 22.04.1 LTS 5.15.0-1013-raspi containerd://1.7.2
ubuntu@ubuntu:~$ sudo k0s kc get pod -owide -A
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-system kube-proxy-kkv2l 1/1 Running 0 4m44s 10.152.56.54 ubuntu <none> <none>
@@ -1974,7 +2386,7 @@ As a worker nodeubuntu@ubuntu:~$ sudo k0s status
-Version: v1.27.5+k0s.0
+Version: v1.28.1+k0s.0
Process ID: 1631
Role: worker
Workloads: true
@@ -2016,7 +2428,7 @@ Connect to the clusterUsing the above kubeconfig, you can now access and use the cluster:
ubuntu@ubuntu:~$ KUBECONFIG=/path/to/kubeconfig kubectl get nodes,deployments,pods -owide -A
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
-node/ubuntu Ready <none> 5m1s v1.27.5+k0s 10.152.56.54 <none> Ubuntu 22.04.1 LTS 5.15.0-1013-raspi containerd://1.7.2
+node/ubuntu Ready <none> 5m1s v1.28.1+k0s 10.152.56.54 <none> Ubuntu 22.04.1 LTS 5.15.0-1013-raspi containerd://1.7.2
NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
kube-system deployment.apps/coredns 1/1 1 1 33m coredns registry.k8s.io/coredns/coredns:v1.7.0 k8s-app=kube-dns
@@ -2063,7 +2475,7 @@ Connect to the cluster
-
+
@@ -2071,7 +2483,7 @@ Connect to the cluster
-
+
@@ -2079,7 +2491,7 @@ Connect to the cluster
-
+
@@ -2087,7 +2499,7 @@ Connect to the cluster
-
+
@@ -2095,7 +2507,7 @@ Connect to the cluster
-
+
@@ -2109,10 +2521,10 @@ Connect to the cluster
The k0s version string consists of the Kubernetes version and the k0s version. For example:
The Kubernetes version (1.27.5) is the first part, and the last part (k0s.0) reflects the k0s version, which is built on top of the certain Kubernetes version.
+The Kubernetes version (1.28.1) is the first part, and the last part (k0s.0) reflects the k0s version, which is built on top of the certain Kubernetes version.
@@ -1545,7 +1953,7 @@k0s is an open source, all-inclusive Kubernetes distribution, which is configured with all of the features needed to build a Kubernetes cluster. Due to its simple design, flexible deployment options and modest system requirements, k0s is well suited for
k0s drastically reduces the complexity of installing and running a CNCF certified Kubernetes distribution. With k0s new clusters can be bootstrapped in minutes and developer friction is reduced to zero. This allows anyone with no special skills or expertise in Kubernetes to easily get started.
k0s is distributed as a single binary with zero host OS dependencies besides the host OS kernel. It works with any Linux without additional software packages or configuration. Any security vulnerabilities or performance issues can be fixed directly in the k0s distribution that makes it extremely straightforward to keep the clusters up-to-date and secure.
"},{"location":"#what-happened-to-github-stargazers","title":"What happened to Github stargazers?","text":"In September 2022 we made a human error while creating some build automation scripts&tools for the Github repository. Our automation accidentally changed the repo to a private one for few minutes. That itself is not a big deal and everything was restored quickly. But the nasty side effect is that it also removed all the stargazers at that point. :(
Before that mishap we had 4776 stargazers, making k0s one of the most popular Kubernetes distro out there.
**So if you are reading this, and have not yet starred k0s repo we would highly appreciate the :star: to get our numbers closer to what they used to be.
"},{"location":"#key-features","title":"Key Features","text":"Quick Start Guide for creating a full Kubernetes cluster with a single node.
"},{"location":"#demo","title":"Demo","text":""},{"location":"#community-support","title":"Community Support","text":"We welcome your help in building k0s! If you are interested, we invite you to check out the Contributing Guide and the Code of Conduct.
"},{"location":"#commercial-support","title":"Commercial Support","text":"Mirantis offers technical support, professional services and training for k0s. The support subscriptions include, for example, prioritized support (Phone, Web, Email) and access to verified extensions on top of your k0s cluster.
For any k0s inquiries, please contact us via email info@k0sproject.io.
"},{"location":"CODE_OF_CONDUCT/","title":"k0s Community Code Of Conduct","text":"Please refer to our contributor code of conduct.
"},{"location":"FAQ/","title":"Frequently asked questions","text":""},{"location":"FAQ/#how-is-k0s-pronounced","title":"How is k0s pronounced?","text":"kay-zero-ess
"},{"location":"FAQ/#how-do-i-run-a-single-node-cluster","title":"How do I run a single node cluster?","text":"The cluster can be started with:
k0s controller --single\n
See also the Getting Started tutorial.
"},{"location":"FAQ/#how-do-i-connect-to-the-cluster","title":"How do I connect to the cluster?","text":"You find the config in ${DATADIR}/pki/admin.conf
(default: /var/lib/k0s/pki/admin.conf
). Copy this file, and change the localhost
entry to the public ip of the controller. Use the modified config to connect with kubectl:
export KUBECONFIG=/path/to/admin.conf\nkubectl ...\n
"},{"location":"FAQ/#why-doesnt-kubectl-get-nodes-list-the-k0s-controllers","title":"Why doesn't kubectl get nodes
list the k0s controllers?","text":"As a default, the control plane does not run kubelet at all, and will not accept any workloads, so the controller will not show up on the node list in kubectl. If you want your controller to accept workloads and run pods, you do so with: k0s controller --enable-worker
(recommended only as test/dev/POC environments).
Yes, k0sproject is 100% open source. The source code is under Apache 2 and the documentation is under the Creative Commons License. Mirantis, Inc. is the main contributor and sponsor for this OSS project: building all the binaries from upstream, performing necessary security scans and calculating checksums so that it's easy and safe to use. The use of these ready-made binaries are subject to Mirantis EULA and the binaries include only open source software.
"},{"location":"airgap-install/","title":"Airgap install","text":"You can install k0s in an environment with restricted Internet access. Airgap installation requires an image bundle, which contains all the needed container images. There are two options to get the image bundle:
In order to create your own image bundle, you need
ctr
, installed on the worker machine (refer to the ContainerD getting-started guide).k0s/containerd uses OCI (Open Container Initiative) bundles for airgap installation. OCI bundles must be uncompressed. As OCI bundles are built specifically for each architecture, create an OCI bundle that uses the same processor architecture (x86-64, ARM64, ARMv7) as on the target system.
k0s offers two methods for creating OCI bundles, one using Docker and the other using a previously set up k0s worker. Be aware, though, that you cannot use the Docker method for the ARM architectures due to kube-proxy image multiarch manifest problem.
Note: k0s strictly matches image architecture, e.g. arm/v7 images won't work for arm64.
"},{"location":"airgap-install/#docker","title":"Docker","text":"Pull the images.
k0s airgap list-images | xargs -I{} docker pull {}\n
Create a bundle.
docker image save $(k0s airgap list-images | xargs) -o bundle_file\n
As containerd pulls all the images during the k0s worker normal bootstrap, you can use it to build the OCI bundle with images.
Use the following commands on a machine with an installed k0s worker:
ctr --namespace k8s.io \\\n--address /run/k0s/containerd.sock \\\nimages export bundle_file $(k0s airgap list-images | xargs)\n
"},{"location":"airgap-install/#2a-sync-the-bundle-file-with-the-airgapped-machine-locally","title":"2a. Sync the bundle file with the airgapped machine (locally)","text":"Copy the bundle_file
you created in the previous step or downloaded from the releases page to the target machine into the images
directory in the k0s data directory. Copy the bundle only to the worker nodes. Controller nodes don't use it.
# mkdir -p /var/lib/k0s/images\n# cp bundle_file /var/lib/k0s/images/bundle_file\n
"},{"location":"airgap-install/#2b-sync-the-bundle-file-with-the-airgapped-machines-remotely-with-k0sctl","title":"2b. Sync the bundle file with the airgapped machines (remotely with k0sctl)","text":"As an alternative to the previous step, you can use k0sctl to upload the bundle file to the worker nodes. k0sctl can also be used to upload k0s binary file to all nodes. Take a look at this example (k0sctl.yaml) with one controller and one worker node to upload the bundle file and k0s binary:
apiVersion: k0sctl.k0sproject.io/v1beta1\nkind: ClusterConfig\nmetadata:\nname: k0s-cluster\nspec:\nk0s:\nversion: 1.27.5+k0s.0\nhosts:\n- role: controller\nssh:\naddress: <controller-ip-address>\nuser: ubuntu\nkeyPath: /path/.ssh/id_rsa\n\n# uploadBinary: <boolean>\n# When true the k0s binaries are cached and uploaded\n# from the host running k0sctl instead of downloading\n# directly to the target host.\nuploadBinary: true\n\n# k0sBinaryPath: <local filepath>\n# Upload a custom or manually downloaded k0s binary\n# from a local path on the host running k0sctl to the\n# target host.\n# k0sBinaryPath: path/to/k0s_binary/k0s\n\n- role: worker\nssh:\naddress: <worker-ip-address>\nuser: ubuntu\nkeyPath: /path/.ssh/id_rsa\nuploadBinary: true\nfiles:\n# This airgap bundle file will be uploaded from the k0sctl\n# host to the specified directory on the target host\n- src: /local/path/to/bundle-file/airgap-bundle-amd64.tar\ndstDir: /var/lib/k0s/images/\nperm: 0755\n
"},{"location":"airgap-install/#3-ensure-pull-policy-in-the-k0syaml-optional","title":"3. Ensure pull policy in the k0s.yaml (optional)","text":"Use the following k0s.yaml
to ensure that containerd does not pull images for k0s components from the Internet at any time.
apiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nmetadata:\nname: k0s\nspec:\nimages:\ndefault_pull_policy: Never\n
"},{"location":"airgap-install/#4-set-up-the-controller-and-worker-nodes","title":"4. Set up the controller and worker nodes","text":"Refer to the Manual Install for information on setting up the controller and worker nodes locally. Alternatively, you can use k0sctl.
Note: During the worker start up k0s imports all bundles from the $K0S_DATA_DIR/images
before starting kubelet
.
Note: As k0s is a new and dynamic project, the product architecture may occasionally outpace the documentation. The high level concepts and patterns, however, should always apply.
"},{"location":"architecture/#packaging","title":"Packaging","text":"The k0s package is a single, self-extracting binary that embeds Kubernetes binaries, the benefits of which include:
As a single binary, k0s acts as the process supervisor for all other control plane components. As such, there is no container engine or kubelet running on controllers by default, which thus means that a cluster user cannot schedule workloads onto controller nodes.
Using k0s you can create, manage, and configure each of the components, running each as a \"naked\" process. Thus, there is no container engine running on the controller node.
"},{"location":"architecture/#storage","title":"Storage","text":"Kubernetes control plane typically supports only etcd as the datastore. k0s, however, supports many other datastore options in addition to etcd, which it achieves by including kine. Kine allows the use of a wide variety of backend data stores, such as MySQL, PostgreSQL, SQLite, and dqlite (refer to the spec.storage
documentation).
In the case of k0s managed etcd, k0s manages the full lifecycle of the etcd cluster. For example, by joining a new controller node with k0s controller \"long-join-token\"
k0s atomatically adjusts the etcd cluster membership info to allow the new member to join the cluster.
Note: k0s cannot shrink the etcd cluster. As such, to shut down the k0s controller on a node that node must first be manually removed from the etcd cluster.
"},{"location":"architecture/#worker-node","title":"Worker node","text":"As with the control plane, with k0s you can create and manage the core worker components as naked processes on the worker node.
By default, k0s workers use containerd as a high-level runtime and runc as a low-level runtime. Custom runtimes are also supported, refer to Using custom CRI runtime.
"},{"location":"autopilot-multicommand/","title":"Multi-Command Plans","text":"Autopilot relies on a Plan for defining the Commands that should be executed, the Signal Nodes that each should be run on, and the status of each Command.
A Plan:
A Command:
A Signal Node:
The execution of a Plan is the result of processing Commands through a number of Processing States.
When a Plan is executed, each of the Commands are executed in the order of their appearance in the Plan.
The progress and state of each Command is recorded in the Plan status.
1
, and so does its status.The following is an example of a Plan that has been applied as is currently being processed by autopilot.
(line numbers added for commentary below)
1: apiVersion: autopilot.k0sproject.io/v1beta2\n2: kind: Plan\n3: metadata:\n4: annotations:\n5: <omitted>\n6: spec:\n7: commands:\n8: - airgapupdate:\n9: version: v1.27.5+k0s.0\n10: platforms:\n11: linux-amd64:\n12: url: https://github.com/k0sproject/k0s/releases/download/v1.27.5+k0s.0/k0s-airgap-bundle-v1.27.5+k0s.0-amd64\n13: workers:\n14: discovery:\n15: static:\n16: nodes:\n17: - worker0\n18: - k0supdate:\n19: version: v1.27.5+k0s.0\n20: platforms:\n21: linux-amd64:\n22: url: https://github.com/k0sproject/k0s/releases/download/v1.27.5+k0s.0/k0s-v1.27.5+k0s.0-amd64\n23: targets:\n24: controllers:\n25: discovery:\n26: static:\n27: nodes:\n28: - controller0\n29: workers:\n30: discovery:\n31: static:\n32: nodes:\n33: - worker0\n34: id: id123\n35: timestamp: now\n36: status:\n37: commands:\n38: - airgapupdate:\n39: workers:\n40: - lastUpdatedTimestamp: \"2022-05-11T19:13:02Z\"\n41: name: worker0\n42: state: SignalSent\n43: id: 0\n44: state: SchedulableWait\n45: - id: 1\n46: k0supdate:\n47: controllers:\n48: - lastUpdatedTimestamp: \"2022-05-11T19:13:02Z\"\n49: name: controller0\n50: state: SignalPending\n51: workers:\n52: - lastUpdatedTimestamp: \"2022-05-11T19:13:02Z\"\n53: name: worker0\n54: state: SignalPending\n55: state: SchedulableWait\n56: state: SchedulableWait\n
airgapupdate
and k0supdate
.The state of this Plan exerpt is that autopilot has successfully processed the Plan, and has begun processing the airgapupdate
Command. Its status indicates SignalSent which means that the Signal Node has been sent signaling information to perform an airgap update.
The following are the various states that both Plan
s and Command
s adhere to.
stateDiagram-v2\n [*]-->NewPlan\n NewPlan-->SchedulableWait\n NewPlan-->Errors***\n\n SchedulableWait-->Schedulable\n SchedulableWait-->Completed\n Schedulable-->SchedulableWait\n\n Errors***-->[*]\n Completed-->[*]
Note that the Errors state is elaborated in detail below in Error States*.
"},{"location":"autopilot-multicommand/#newplan","title":"NewPlan","text":"When a Plan is created with the name autopilot
, the NewPlan state processing takes effect.
It is the responsibility of NewPlan to ensure that the status of all the Commands are represented in the Plan status. This Plan status is needed at later points in Plan processing to determine if the entire Plan is completed.
The main difference between NewPlan and all the other states is that NewPlan will iterate over all commands; the other states deal with the active command.
"},{"location":"autopilot-multicommand/#schedulablewait","title":"SchedulableWait","text":"Used to evaluate a Command to determine if it can be scheduled for processing. If the Command is determined that it can be processed, the state is set to Schedulable.
"},{"location":"autopilot-multicommand/#schedulable","title":"Schedulable","text":"The Schedulable state is set by SchedulableWait to indicate that this command should execute. The execution of a Command in this state will be whichever logic is defined by the Command.
The ending of this state should either transition to SchedulableWait for further processing + completion detection, or transition to an error.
"},{"location":"autopilot-multicommand/#completed","title":"Completed","text":"The Completed state indicates that the command has finished processing. Once a plan/command are in the Completed state, no further processing will occur on this plan/command.
"},{"location":"autopilot-multicommand/#error-states","title":"Error States","text":"When a plan or command processing goes into one of the designated error states, this is considered fatal and the plan/command processing will terminate.
Error states are generally defined by the Command implementation. The core autopilot functionality is only interested when in the 4 core states (NewPlan, SchedulableWait, Schedulable, Completed), and treats all other states as an error.
flowchart TD\n Errors --> InconsistentTargets\n Errors --> IncompleteTargets\n Errors --> Restricted\n Errors --> MissingPlatform\n Errors --> MissingSignalNode
Error State Command States Description InconsistentTargets k0supdate
Schedulable Indicates that a Signal Node probe has failed for any node that was previously discovered during NewPlan. IncompleteTargets airgapupdate
, k0supdate
NewPlan, Schedulable Indicates that a Signal Node that existed during the discover phase in NewPlan no longer exists (ie. no ControlNode
or Node
object) Restricted airgapupdate
, k0supdate
NewPlan Indicates that a Plan has requested an update of a Signal Node type that contradicts the startup exclusions (the --exclude-from-plans
argument) MissingSignalNode airgapupdate
, k0supdate
Schedulable Indicates that a Signal Node that existed during the discover phase in NewPlan no longer exists (ie. no matching ControlNode
or Node
object)"},{"location":"autopilot-multicommand/#sequence-example","title":"Sequence: Example","text":"Using the example above as a reference, this outlines the basic sequence of events of state transitions to the operations performed on each object.
sequenceDiagram\n PlanStateHandler->>+AirgapUpdateCommand: State: NewPlan\n AirgapUpdateCommand->>-AirgapUpdateCommand: cmd.NewPlan() -- >SchedulableWait\n PlanStateHandler->>+K0sUpdateCommand: State: NewPlan\n K0sUpdateCommand->>-K0sUpdateCommand: cmd.NewPlan() --> SchedulableWait\n Note over PlanStateHandler,SignalNode(worker0): NewPlan Finished / All Commands\n\n PlanStateHandler->>+AirgapUpdateCommand: State: SchedulableWait\n AirgapUpdateCommand->>-AirgapUpdateCommand: cmd.SchedulableWait() --> Schedulable\n PlanStateHandler->>+AirgapUpdateCommand: State: Schedulable\n AirgapUpdateCommand->>-SignalNode(worker0): signal_v2(airgap-data) --> SchedulableWait\n PlanStateHandler->>+AirgapUpdateCommand: State: SchedulableWait\n AirgapUpdateCommand->>-AirgapUpdateCommand: cmd.SchedulableWait() --> Completed\n Note over PlanStateHandler,SignalNode(worker0): AirgapUpdate Finished / worker0\n\n PlanStateHandler->>+K0sUpdateCommand: State: SchedulableWait\n K0sUpdateCommand->>-K0sUpdateCommand: cmd.SchedulableWait() --> Schedulable\n PlanStateHandler->>+K0sUpdateCommand: State: Schedulable\n K0sUpdateCommand->>-SignalNode(controller0): signal_v2(k0s-data) --> SchedulableWait\n PlanStateHandler->>+K0sUpdateCommand: State: SchedulableWait\n K0sUpdateCommand->>-K0sUpdateCommand: cmd.SchedulableWait() --> Completed\n Note over PlanStateHandler,SignalNode(controller0): K0sUpdate Finished / controller0\n\n PlanStateHandler->>+K0sUpdateCommand: State: SchedulableWait\n K0sUpdateCommand->>-K0sUpdateCommand: cmd.SchedulableWait() --> Schedulable\n PlanStateHandler->>+K0sUpdateCommand: State: Schedulable\n K0sUpdateCommand->>-SignalNode(worker0): signal_v2(k0s-data) --> SchedulableWait\n PlanStateHandler->>+K0sUpdateCommand: State: SchedulableWait\n K0sUpdateCommand->>-K0sUpdateCommand: cmd.SchedulableWait() --> Completed\n Note over PlanStateHandler,SignalNode(worker0): K0sUpdate Finished / worker0\n\n PlanStateHandler->>PlanStateHandler: Completed
"},{"location":"autopilot/","title":"Autopilot","text":"A tool for updating your k0s
controller and worker nodes using specialized plans. There is a public update-server hosted on the same domain as the documentation site. See the example below on how to use it. There is only a single channel edge_release
available. The channel exposes the latest released version.
Plan
YAMLk0s
, URLs for platforms, etc)Plan
Plan
is a simple kubectl apply
operation.Plan
provides a status that details the progress.To enable automatic updates, create an UpdateConfig
object:
apiVersion: autopilot.k0sproject.io/v1beta2\nkind: UpdateConfig\nmetadata:\nname: example\nnamespace: default\nspec:\nchannel: edge_release\nupdateServer: https://docs.k0sproject.io/\nupgradeStrategy:\ncron: \"0 12 * * TUE,WED\" # Check for updates at 12:00 on Tuesday and Wednesday.\n
"},{"location":"autopilot/#safeguards","title":"Safeguards","text":"There are a number of safeguards in place to avoid breaking a cluster.
"},{"location":"autopilot/#stateless-component","title":"Stateless Component","text":"Plan
is applied that has both controller and worker nodes, all of the controller nodes will be updated first. It is only when all controllers have updated successfully that worker nodes will receive their update instructions.Plan
, autopilot evaluates all of the controllers and workers that should be included into the Plan
, and tracks them in the status. After this point, no additional changes to the plan (other than status) will be recognized.selector
discovery method no longer exist by the time the update is ready to be scheduled./ready
/ready
will the current controller get sent update signaling.Plan
transitions into an InconsistentTargets
state, and the Plan
execution ends.update
object payload can provide an optional sha256
hash of the update content (specified in url
), which is compared against the update content after it downloads.Autopilot relies on a Plan
object on its instructions on what to update.
Here is an arbitrary Autopilot plan:
apiVersion: autopilot.k0sproject.io/v1beta2\nkind: Plan\nmetadata:\nname: autopilot\n\nspec:\nid: id1234\ntimestamp: now\n\ncommands:\n- k0supdate:\nversion: v1.27.5+k0s.0\nplatforms:\nlinux-amd64:\nurl: https://github.com/k0sproject/k0s/releases/download/v1.27.5+k0s.0/k0s-v1.27.5+k0s.0-amd64\nsha256: '0000000000000000000000000000000000000000000000000000000000000000'\ntargets:\ncontrollers:\ndiscovery:\nstatic:\nnodes:\n- ip-172-31-44-131\n- ip-172-31-42-134\n- ip-172-31-39-65\nworkers:\nlimits:\nconcurrent: 5\ndiscovery:\nselector:\nlabels: environment=staging\nfields: metadata.name=worker2\n
"},{"location":"autopilot/#core-fields","title":"Core Fields","text":""},{"location":"autopilot/#apiversion-string-required","title":"apiVersion <string> (required)
","text":"v1beta2
, with a full group-version of autopilot.k0sproject.io/v1beta2
metadata.name <string> (required)
","text":"autopilot
spec.id <string> (optional)
","text":"spec.timestamp <string> (optional)
","text":"spec.commands[] (required)
","text":"commands
contains all of the commands that should be performed as a part of the plan.k0supdate
Command","text":""},{"location":"autopilot/#speccommandsk0supdateversion-string-required","title":"spec.commands[].k0supdate.version <string> (required)
","text":"spec.commands[].k0supdate.platforms.*.url <string> (required)
","text":"$GOOS
and $GOARCH
, separated by a hyphen (-
)linux-amd64
, linux-arm64
, linux-arm
linux
. Autopilot may work on other platforms, however this has not been tested.spec.commands[].k0supdate.platforms.*.sha256 <string> (optional)
","text":"spec.commands[].k0supdate.targets.controllers <object> (optional)
","text":"controllers
should be updated.spec.commands[].k0supdate.targets.controllers.limits.concurrent <int> (fixed as 1)
","text":"1
.spec.commands[].k0supdate.targets.workers <object> (optional)
","text":"workers
should be updated.spec.commands[].k0supdate.targets.workers.limits.concurrent <int> (optional, default = 1)
","text":"concurrent
value for worker targets will allow for that number of workers to be updated at a time. If no value is provided, 1
is assumed.airgapupdate
Command","text":""},{"location":"autopilot/#speccommandsairgapupdateversion-string-required","title":"spec.commands[].airgapupdate.version <string> (required)
","text":"spec.commands[].airgapupdate.platforms.*.url <string> (required)
","text":"$GOOS
and $GOARCH
, separated by a hyphen (-
)linux-amd64
, linux-arm64
, linux-arm
linux
. Autopilot may work on other platforms, however this has not been tested.spec.commands[].airgapupdate.platforms.*.sha256 <string> (optional)
","text":"spec.commands[].airgapupdate.targets.workers <object> (optional)
","text":"workers
should be updated.spec.commands[].airgapupdate.targets.workers.limits.concurrent <int> (optional, default = 1)
","text":"concurrent
value for worker targets will allow for that number of workers to be updated at a time. If no value is provided, 1
is assumed.This defines the static
discovery method used for this set of targets (controllers
, workers
). The static
discovery method relies on a fixed set of hostnames defined in .nodes
.
It is expected that a Node
(workers) or ControlNode
(controllers) object exists with the same name.
static:\nnodes:\n- ip-172-31-44-131\n- ip-172-31-42-134\n- ip-172-31-39-65\n
"},{"location":"autopilot/#speccommandsk0supdatetargetsdiscoverystaticnodes-string-required-for-static","title":"spec.commands[].k0supdate.targets.*.discovery.static.nodes[] <string> (required for static)
","text":"controllers
, workers
).The selector
target discovery method relies on a dynamic query to the Kubernetes API using labels and fields to produce a set of hosts that should be updated.
Providing both labels
and fields
in the selector
definition will result in a logical AND
of both operands.
selector:\nlabels: environment=staging\nfields: metadata.name=worker2\n
Specifying an empty selector will result in all nodes being selected for this target set.
selector: {}\n
"},{"location":"autopilot/#speccommandsk0supdatetargetsdiscoveryselectorlabels-string-optional","title":"spec.commands[].k0supdate.targets.*.discovery.selector.labels <string> (optional)
","text":"spec.commands[].k0supdate.targets.*.discovery.selector.fields <string> (optional)
","text":"metadata.name
is available as a query field.After a Plan
has been applied, its progress can be viewed in the .status
of the autopilot
Plan.
kubectl get plan autopilot -oyaml\n
An example of a Plan
status:
status:\nstate: SchedulableWait\ncommands:\n- state: SchedulableWait\nk0supdate:\ncontrollers:\n- lastUpdatedTimestamp: \"2022-04-07T15:52:44Z\"\nname: controller0\nstate: SignalCompleted\n- lastUpdatedTimestamp: \"2022-04-07T15:52:24Z\"\nname: controller1\nstate: SignalCompleted\n- lastUpdatedTimestamp: \"2022-04-07T15:52:24Z\"\nname: controller2\nstate: SignalPending\nworkers:\n- lastUpdatedTimestamp: \"2022-04-07T15:52:24Z\"\nname: worker0\nstate: SignalPending\n- lastUpdatedTimestamp: \"2022-04-07T15:52:24Z\"\nname: worker1\nstate: SignalPending\n- lastUpdatedTimestamp: \"2022-04-07T15:52:24Z\"\nname: worker2\nstate: SignalPending\n
To read this status, this indicates that:
SchedulableWait
, meaning that autopilot is waiting for the next opportunity to process a command.SignalCompleted
successfullySignalPending
)SignalPending
)The Plan
status at .status.status
represents the overall status of the autopilot update operation. There are a number of statuses available:
IncompleteTargets
There are nodes in the resolved Plan
that do not have associated Node
(worker) or ControlNode
(controller) objects. Yes InconsistentTargets
A controller has reported itself as not-ready during the selection of the next controller to update. Yes Schedulable
Indicates that the Plan
can be re-evaluated to determine which next node to update. No SchedulableWait
Scheduling operations are in progress, and no further update scheduling should occur. No Completed
The Plan
has run successfully to completion. Yes Restricted
The Plan
included node types (controller or worker) that violates the --exclude-from-plans
restrictions. Yes"},{"location":"autopilot/#node-status","title":"Node Status","text":"Similar to the Plan Status, the individual nodes can have their own statuses:
Status DescriptionSignalPending
The node is available and awaiting an update signal SignalSent
Update signaling has been successfully applied to this node. MissingPlatform
This node is a platform that an update has not been provided for. MissingSignalNode
This node does have an associated Node
(worker) or ControlNode
(controller) object."},{"location":"autopilot/#updateconfig","title":"UpdateConfig","text":""},{"location":"autopilot/#updateconfig-core-fields","title":"UpdateConfig Core Fields","text":""},{"location":"autopilot/#apiversion-string-required-field","title":"apiVersion <string> (required field)
","text":"v1beta2
, with a full group-version of autopilot.k0sproject.io/v1beta2
metadata.name <string> (required field)
","text":"spec.channel <string> (optional)
","text":"stable
(default), unstable
.spec.updateServer <string> (optional)
","text":"spec.upgradeStrategy.cron <string> (optional)
","text":"spec.planSpec <string> (optional)
","text":"Plan
apiVersion: autopilot.k0sproject.io/v1beta2\nkind: UpdaterConfig\nmetadata:\nname: example\nspec:\nchannel: stable\nupdateServer: https://updates.k0sproject.io/\nupgradeStrategy:\ncron: \"0 12 * * TUE,WED\" # Check for updates at 12:00 on Tuesday and Wednesday.\n# Optional. Specifies a created Plan object\nplanSpec:\ncommands:\n- k0supdate: # optional\nforceupdate: true # optional\ntargets:\ncontrollers:\ndiscovery:\nstatic:\nnodes:\n- ip-172-31-44-131\n- ip-172-31-42-134\n- ip-172-31-39-65\nworkers:\nlimits:\nconcurrent: 5\ndiscovery:\nselector:\nlabels: environment=staging\nfields: metadata.name=worker2\nairgapupdate: # optional\nworkers:\nlimits:\nconcurrent: 5\ndiscovery:\nselector:\nlabels: environment=staging\nfields: metadata.name=worker2\n
"},{"location":"autopilot/#faq","title":"FAQ","text":""},{"location":"autopilot/#q-how-do-i-apply-the-plan-and-controlnode-crds","title":"Q: How do I apply the Plan
and ControlNode
CRDs?","text":"A: These CRD definitions are embedded in the autopilot binary and applied on startup. No additional action is needed.
"},{"location":"autopilot/#q-how-will-controlnode-instances-get-removed","title":"Q: How willControlNode
instances get removed?","text":"A: ControlNode
instances are created by autopilot controllers as they startup. When controllers disappear, they will not remove their associated ControlNode
instance. It is the responsibility of the operator/administrator to ensure their maintenance.
You probably upgraded your workers to an API version greater than what is available on the API server.
https://kubernetes.io/releases/version-skew-policy/
Make sure that your controllers are at the desired version first before upgrading workers.
"},{"location":"backup/","title":"Backup/Restore overview","text":"k0s has integrated support for backing up cluster state and configuration. The k0s backup utility is aiming to back up and restore k0s managed parts of the cluster.
The backups created by k0s backup
command have following pieces of your cluster:
<data-dir>/pki
directory)<data-dir>/manifests
<data-dir>/images
Parts NOT covered by the backup utility:
<data-dir>/manifests
)Any of the backup/restore related operations MUST be performed on the controller node.
"},{"location":"backup/#backuprestore-a-k0s-node-locally","title":"Backup/restore a k0s node locally","text":""},{"location":"backup/#backup-local","title":"Backup (local)","text":"To create backup run the following command on the controller node:
k0s backup --save-path=<directory>\n
The directory used for the save-path
value must exist and be writable. The default value is the current working directory. The command provides backup archive using following naming convention: k0s_backup_<ISODatetimeString>.tar.gz
Because of the DateTime usage, it is guaranteed that none of the previously created archives would be overwritten.
To output the backup archive to stdout, use -
as the save path.
To restore cluster state from the archive use the following command on the controller node:
k0s restore /tmp/k0s_backup_2021-04-26T19_51_57_000Z.tar.gz\n
The command would fail if the data directory for the current controller has overlapping data with the backup archive content.
The command would use the archived k0s.yaml
as the cluster configuration description.
In case if your cluster is HA, after restoring single controller node, join the rest of the controller nodes to the cluster. E.g. steps for N nodes cluster would be:
To read the backup archive from stdin, use -
as the file path.
By using -
as the save or restore path, it is possible to pipe the backup archive through an encryption utility such as GnuPG or OpenSSL.
Note that unencrypted data will still briefly exist as temporary files on the local file system during the backup archvive generation.
"},{"location":"backup/#encrypting-backups-using-gnupg","title":"Encrypting backups using GnuPG","text":"Follow the instructions for your operating system to install the gpg
command if it is not already installed.
This tutorial only covers the bare minimum for example purposes. For secure key management practices and advanced usage refer to the GnuPG user manual.
To generate a new key-pair, use:
gpg --gen-key\n
The key will be stored in your key ring.
gpg --list-keys\n
This will output a list of keys:
/home/user/.gnupg/pubring.gpg\n------------------------------\npub 4096R/BD33228F 2022-01-13\nuid Example User <user@example.com>\nsub 4096R/2F78C251 2022-01-13\n
To export the private key for decrypting the backup on another host, note the key ID (\"BD33228F\" in this example) in the list and use:
gpg --export-secret-keys --armor BD33228F > k0s.key\n
To create an encrypted k0s backup:
k0s backup --save-path - | gpg --encrypt --recipient user@example.com > backup.tar.gz.gpg\n
"},{"location":"backup/#restoring-encrypted-backups-using-gnupg","title":"Restoring encrypted backups using GnuPG","text":"You must have the private key in your gpg keychain. To import the key that was exported in the previous example, use:
gpg --import k0s.key\n
To restore the encrypted backup, use:
gpg --decrypt backup.tar.gz.gpg | k0s restore -\n
"},{"location":"backup/#backuprestore-a-k0s-cluster-using-k0sctl","title":"Backup/restore a k0s cluster using k0sctl","text":"With k0sctl you can perform cluster level backup and restore remotely with one command.
"},{"location":"backup/#backup-remote","title":"Backup (remote)","text":"To create backup run the following command:
k0sctl backup\n
k0sctl connects to the cluster nodes to create a backup. The backup file is stored in the current working directory.
"},{"location":"backup/#restore-remote","title":"Restore (remote)","text":"To restore cluster state from the archive use the following command:
k0sctl apply --restore-from /path/to/backup_file.tar.gz\n
The control plane load balancer address (externalAddress) needs to remain the same between backup and restore. This is caused by the fact that all worker node components connect to this address and cannot currently be re-configured.
"},{"location":"cis_benchmark/","title":"Kube-bench Security Benchmark","text":"Kube-bench is an open source tool which can be used to verify security best practices as defined in CIS Kubernetes Benchmark. It provides a number of tests to help harden your k0s clusters. By default, k0s will pass Kube-bench benchmarks with some exceptions, which are shown below.
"},{"location":"cis_benchmark/#run","title":"Run","text":"Follow the Kube-bench quick start instructions.
After installing the Kube-bench on the host that is running k0s
cluster run the following command:
kube-bench run --config-dir docs/kube-bench/cfg/ --benchmark k0s-1.0\n
"},{"location":"cis_benchmark/#summary-of-disabled-checks","title":"Summary of disabled checks","text":""},{"location":"cis_benchmark/#master-node-security-configuration","title":"Master Node Security Configuration","text":"The current configuration has in total 8 master checks disabled:
id: 1.2.10 - EventRateLimit requires external yaml config. It is left for the users to configure it
type: skip\ntext: \"Ensure that the admission control plugin EventRateLimit is set (Manual)\"\n
id: 1.2.12 - By default this isn't passed to the apiserver for air-gap functionality
type: skip\ntext: \"Ensure that the admission control plugin AlwaysPullImages is set (Manual)\"\n
id: 1.2.22 - For sake of simplicity of k0s all audit configurations are skipped. It is left for the users to configure it
type: skip\ntext: \"Ensure that the --audit-log-path argument is set (Automated)\"\n
id: 1.2.23 - For sake of simplicity of k0s all audit configuration are skipped. It is left for the users to configure it
type: skip\ntext: \"Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)\"\n
id: 1.2.24 - For sake of simplicity of k0s all audit configurations are skipped. It is left for the users to configure it
type: skip\ntext: \"Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)\"\n
id: 1.2.25 - For sake of simplicity of k0s all audit configurations are skipped. It is left for the users to configure it
type: skip\ntext: \"Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)\"\n
id: 1.2.33 - By default it is not enabled. Left for the users to decide
type: skip\ntext: \"Ensure that the --encryption-provider-config argument is set as appropriate (Manual)\"\n
id: 1.2.34 - By default it is not enabled. Left for the users to decide
type: skip\ntext: \"Ensure that encryption providers are appropriately configured (Manual)\"\n
and 4 node checks disabled:
id: 4.1.1 - not applicable since k0s does not use kubelet service file
type: skip\ntext: \"Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)\"\n
id: 4.1.2 - not applicable since k0s does not use kubelet service file
type: skip\ntext: \"Ensure that the kubelet service file ownership is set to root:root (Automated)\"\n
id: 4.2.6 - k0s does not set this. See https://github.com/kubernetes/kubernetes/issues/66693
type: skip\ntext: \"Ensure that the --protect-kernel-defaults argument is set to true (Automated)\"\n
id: 4.2.10 - k0s doesn't set this up because certs get auto rotated
type: skip\ntext: \"Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)\"\n
3 checks for the control plane:
id: 3.1.1 - For purpose of being fully automated k0s is skipping this check
type: skip\ntext: \"Client certificate authentication should not be used for users (Manual)\"\n
id: 3.2.1 - out-of-the box configuration does not have any audit policy configuration but users can customize it in spec.api.extraArgs section of the config
type: skip\ntext: \"Ensure that a minimal audit policy is created (Manual)\"\n
id: 3.2.2 - Same as previous
type: skip\ntext: \"Ensure that the audit policy covers key security concerns (Manual)\"\n
Policy checks are also disabled. The checks are manual and are up to the end user to decide on them.
"},{"location":"cloud-providers/","title":"Cloud providers","text":"k0s builds Kubernetes components in providerless mode, meaning that cloud providers are not built into k0s-managed Kubernetes components. As such, you must externally configure the cloud providers to enable their support in your k0s cluster (for more information on running Kubernetes with cloud providers, refer to the Kubernetes documentation.
"},{"location":"cloud-providers/#external-cloud-providers","title":"External Cloud Providers","text":""},{"location":"cloud-providers/#enable-cloud-provider-support-in-kubelet","title":"Enable cloud provider support in kubelet","text":"Even when all components are built with providerless mode, you must be able to enable cloud provider mode for kubelet. To do this, run the workers with --enable-cloud-provider=true
.
When deploying with k0sctl, you can add this into the installFlags
of worker hosts.
spec:\nhosts:\n- ssh:\naddress: 10.0.0.1\nuser: root\nkeyPath: ~/.ssh/id_rsa\ninstallFlags:\n- --enable-cloud-provider\n- --kubelet-extra-args=\"--cloud-provider=external\"\nrole: worker\n
"},{"location":"cloud-providers/#deploy-the-cloud-provider","title":"Deploy the cloud provider","text":"The easiest way to deploy cloud provider controllers is on the k0s cluster.
Use the built-in manifest deployer built into k0s to deploy your cloud provider as a k0s-managed stack. Next, just drop all required manifests into the /var/lib/k0s/manifests/aws/
directory, and k0s will handle the deployment.
Note: The prerequisites for the various cloud providers can vary (for example, several require that configuration files be present on all of the nodes). Refer to your chosen cloud provider's documentation as necessary.
"},{"location":"cloud-providers/#k0s-cloud-provider","title":"k0s Cloud Provider","text":"Alternatively, k0s provides its own lightweight cloud provider that can be used to statically assign ExternalIP
values to worker nodes via Kubernetes annotations. This is beneficial for those who need to expose worker nodes externally via static IP assignments.
To enable this functionality, add the parameter --enable-k0s-cloud-provider=true
to all controllers, and --enable-cloud-provider=true
to all workers.
Adding a static IP address to a node using kubectl
:
kubectl annotate \\\nnode <node> \\\nk0sproject.io/node-ip-external=<external IP>\n
Both IPv4 and IPv6 addresses are supported.
"},{"location":"cloud-providers/#defaults","title":"Defaults","text":"The default node refresh interval is 2m
, which can be overridden using the --k0s-cloud-provider-update-frequency=<duration>
parameter when launching the controller(s).
The default port that the cloud provider binds to can be overridden using the --k0s-cloud-provider-port=<int>
parameter when launching the controller(s).
Commercial support for k0s if offered by Mirantis Inc..
Mirantis can provide various different levels of support starting from DevCare (9-to-5) all the way to OpsCare+ with fully managed service.
On top of our normal release and support model our commercial customers have access to critical security patches even for released versions that fall outside of the Open Source maintained releases.1 Commercial support also includes support for k0s related tooling such as k0sctl.
If you are interested in commercial support for k0s check out our support description and please contact us for further details.
This is assuming there is a compatible release of upstream project with the fix\u00a0\u21a9
k0s command-line interface has the ability to validate config syntax:
k0s validate config --config path/to/config/file\n
validate config
sub-command can validate the following:
k0s can be installed without a config file. In that case the default configuration will be used. You can, though, create and run your own non-default configuration (used by the k0s controller nodes).
k0s supports providing only partial configurations. In case of partial configuration is provided, k0s will use the defaults for any missing values.
Generate a yaml config file that uses the default settings.
mkdir -p /etc/k0s\nk0s config create > /etc/k0s/k0s.yaml\n
Modify the new yaml config file according to your needs, refer to Configuration file reference below. You can remove the default values if wanted as k0s supports partial configs too.
Install k0s with your new config file.
sudo k0s install controller -c /etc/k0s/k0s.yaml\n
If you need to modify your existing configuration later on, you can change your config file also when k0s is running, but remember to restart k0s to apply your configuration changes.
sudo k0s stop\nsudo k0s start\n
k0sctl can deploy your configuration options at cluster creation time. Your options should be placed in the spec.k0s.config
section of the k0sctl's configuration file. See the section on how to install k0s via k0sctl and the k0sctl README for more information.
CAUTION: As many of the available options affect items deep in the stack, you should fully understand the correlation between the configuration file components and your specific environment before making any changes.
A YAML config file follows, with defaults as generated by the k0s config create
command:
apiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nmetadata:\nname: k0s\nspec:\napi:\naddress: 192.168.68.104\nexternalAddress: my-lb-address.example.com\nk0sApiPort: 9443\nport: 6443\nsans:\n- 192.168.68.104\ntunneledNetworkingMode: false\ncontrollerManager: {}\nextensions:\nhelm:\nconcurrencyLevel: 5\ncharts: null\nrepositories: null\nstorage:\ncreate_default_storage_class: false\ntype: external_storage\ninstallConfig:\nusers:\netcdUser: etcd\nkineUser: kube-apiserver\nkonnectivityUser: konnectivity-server\nkubeAPIserverUser: kube-apiserver\nkubeSchedulerUser: kube-scheduler\nkonnectivity:\nadminPort: 8133\nagentPort: 8132\nnetwork:\ncalico: null\nclusterDomain: cluster.local\ndualStack: {}\nkubeProxy:\nmetricsBindAddress: 0.0.0.0:10249\nmode: iptables\nkuberouter:\nautoMTU: true\nhairpin: Enabled\nipMasq: false\nmetricsPort: 8080\nmtu: 0\npeerRouterASNs: \"\"\npeerRouterIPs: \"\"\nnodeLocalLoadBalancing:\nenabled: false\nenvoyProxy:\napiServerBindPort: 7443\nimage:\nimage: docker.io/envoyproxy/envoy-distroless\nversion: v1.24.1\nkonnectivityServerBindPort: 7132\ntype: EnvoyProxy\npodCIDR: 10.244.0.0/16\nprovider: kuberouter\nserviceCIDR: 10.96.0.0/12\nscheduler: {}\nstorage:\netcd:\nexternalCluster: null\npeerAddress: 192.168.68.104\ntype: etcd\ntelemetry:\nenabled: true\nfeatureGates:\n- name: feature_XXX\nenabled: true\ncomponents: [\"kubelet\", \"kube-api\", \"kube-scheduler\"]\n- name: feature_YYY\nenabled: true\n-\nname: feature_ZZZ\nenabled: false\n
"},{"location":"configuration/#spec-key-detail","title":"spec
Key Detail","text":""},{"location":"configuration/#specapi","title":"spec.api
","text":"Element Description externalAddress
The loadbalancer address (for k0s controllers running behind a loadbalancer). Configures all cluster components to connect to this address and also configures this address for use when joining new nodes to the cluster. address
Local address on which to bind an API. Also serves as one of the addresses pushed on the k0s create service certificate on the API. Defaults to first non-local address found on the node. sans
List of additional addresses to push to API servers serving the certificate. extraArgs
Map of key-values (strings) for any extra arguments to pass down to Kubernetes api-server process. port
\u00b9 Custom port for kube-api server to listen on (default: 6443) k0sApiPort
\u00b9 Custom port for k0s-api server to listen on (default: 9443) tunneledNetworkingMode
Whether to tunnel Kubernetes access from worker nodes via local port forwarding. (default: false
) \u00b9 If port
and k0sApiPort
are used with the externalAddress
element, the loadbalancer serving at externalAddress
must listen on the same ports.
spec.storage
","text":"Element Description type
Type of the data store (valid values:etcd
or kine
). Note: Type etcd
will cause k0s to create and manage an elastic etcd cluster within the controller nodes. etcd.peerAddress
Node address used for etcd cluster peering. etcd.extraArgs
Map of key-values (strings) for any extra arguments to pass down to etcd process. kine.dataSource
kine datasource URL."},{"location":"configuration/#specnetwork","title":"spec.network
","text":"Element Description provider
Network provider (valid values: calico
, kuberouter
, or custom
). For custom
, you can push any network provider (default: kuberouter
). Be aware that it is your responsibility to configure all of the CNI-related setups, including the CNI provider itself and all necessary host levels setups (for example, CNI binaries). Note: Once you initialize the cluster with a network provider the only way to change providers is through a full cluster redeployment. podCIDR
Pod network CIDR to use in the cluster. serviceCIDR
Network CIDR to use for cluster VIP services. clusterDomain
Cluster Domain to be passed to the kubelet and the coredns configuration."},{"location":"configuration/#specnetworkcalico","title":"spec.network.calico
","text":"Element Description mode
vxlan
(default), ipip
or bird
overlay
Overlay mode: Always
(default), CrossSubnet
or Never
(requires mode=vxlan
to disable calico overlay-network). vxlanPort
The UDP port for VXLAN (default: 4789
). vxlanVNI
The virtual network ID for VXLAN (default: 4096
). mtu
MTU for overlay network (default: 0
, which causes Calico to detect optimal MTU during bootstrap). wireguard
Enable wireguard-based encryption (default: false
). Your host system must be wireguard ready (refer to the Calico documentation for details). flexVolumeDriverPath
The host path for Calicos flex-volume-driver(default: /usr/libexec/k0s/kubelet-plugins/volume/exec/nodeagent~uds
). Change this path only if the default path is unwriteable (refer to Project Calico Issue #2712 for details). Ideally, you will pair this option with a custom volumePluginDir
in the profile you use for your worker nodes. ipAutodetectionMethod
Use to force Calico to pick up the interface for pod network inter-node routing (default: \"\"
, meaning not set, so that Calico will instead use its defaults). For more information, refer to the Calico documentation. envVars
Map of key-values (strings) for any calico-node environment variable."},{"location":"configuration/#specnetworkcalicoenvvars","title":"spec.network.calico.envVars
","text":"Environment variable's value must be string, e.g.:
spec:\nnetwork:\nprovider: calico\ncalico:\nenvVars:\nTEST_BOOL_VAR: \"true\"\nTEST_INT_VAR: \"42\"\nTEST_STRING_VAR: test\n
K0s runs Calico with some predefined vars, which can be overwritten by setting new value in spec.network.calico.envVars
:
CALICO_IPV4POOL_CIDR: \"{{ spec.network.podCIDR }}\"\nCALICO_DISABLE_FILE_LOGGING: \"true\"\nFELIX_DEFAULTENDPOINTTOHOSTACTION: \"ACCEPT\"\nFELIX_LOGSEVERITYSCREEN: \"info\"\nFELIX_HEALTHENABLED: \"true\"\nFELIX_PROMETHEUSMETRICSENABLED: \"true\"\nFELIX_FEATUREDETECTOVERRIDE: \"ChecksumOffloadBroken=true\"\n
FELIX_FEATUREDETECTOVERRIDE: ChecksumOffloadBroken=true
disables VXLAN offloading because of projectcalico/calico#4727.
In SingleStack mode there are additional vars:
FELIX_IPV6SUPPORT: \"false\"\n
In DualStack mode there are additional vars:
CALICO_IPV6POOL_NAT_OUTGOING: \"true\"\nFELIX_IPV6SUPPORT: \"true\"\nIP6: \"autodetect\"\nCALICO_IPV6POOL_CIDR: \"{{ spec.network.dualStack.IPv6podCIDR }}\"\n
"},{"location":"configuration/#specnetworkkuberouter","title":"spec.network.kuberouter
","text":"Element Description autoMTU
Autodetection of used MTU (default: true
). mtu
Override MTU setting, if autoMTU
must be set to false
). metricsPort
Kube-router metrics server port. Set to 0 to disable metrics (default: 8080
). peerRouterIPs
Comma-separated list of global peer addresses. peerRouterASNs
Comma-separated list of global peer ASNs. hairpin
Hairpin mode, supported modes Enabled
: enabled cluster wide, Allowed
: must be allowed per service using annotations, Disabled
: doesn't work at all (default: Enabled) hairpinMode
Deprecated Use hairpin
instead. If both hairpin
and hairpinMode
are defined, this is ignored. If only hairpinMode is configured explicitly activates hairpinMode (https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode). ipMasq
IP masquerade for traffic originating from the pod network, and destined outside of it (default: false) Note: Kube-router allows many networking aspects to be configured per node, service, and pod (for more information, refer to the Kube-router user guide).
"},{"location":"configuration/#specnetworkkubeproxy","title":"spec.network.kubeProxy
","text":"Element Description disabled
Disable kube-proxy altogether (default: false
). mode
Kube proxy operating mode, supported modes iptables
, ipvs
, userspace
(default: iptables
) iptables
Kube proxy iptables settings ipvs
Kube proxy ipvs settings Default kube-proxy iptables settings:
iptables:\nmasqueradeAll: false\nmasqueradeBit: null\nminSyncPeriod: 0s\nsyncPeriod: 0s\n
Default kube-proxy ipvs settings:
ipvs:\nexcludeCIDRs: null\nminSyncPeriod: 0s\nscheduler: \"\"\nstrictARP: false\nsyncPeriod: 0s\ntcpFinTimeout: 0s\ntcpTimeout: 0s\nudpTimeout: 0s\n
"},{"location":"configuration/#specnetworknodelocalloadbalancing","title":"spec.network.nodeLocalLoadBalancing
","text":"Configuration options related to k0s's node-local load balancing feature.
Note: This feature is experimental! Expect instabilities and/or breaking changes.
Element Descriptionenabled
Indicates if node-local load balancing should be used to access Kubernetes API servers from worker nodes. Default: false
. type
The type of the node-local load balancer to deploy on worker nodes. Default: EnvoyProxy
. (This is the only option for now.) envoyProxy
Configuration options related to the \"EnvoyProxy\" type of load balancing."},{"location":"configuration/#specnetworknodelocalloadbalancingenvoyproxy","title":"spec.network.nodeLocalLoadBalancing.envoyProxy
","text":"Configuration options required for using Envoy as the backing implementation for node-local load balancing.
Note: This type of load balancing is not supported on ARMv7 workers.
Element Descriptionimage
The OCI image that's being used for the Envoy Pod. imagePullPolicy
The pull policy being used used for the Envoy Pod. Defaults to spec.images.default_pull_policy
if omitted. apiServerBindPort
Port number on which to bind the Envoy load balancer for the Kubernetes API server to on a worker's loopback interface. Default: 7443
. konnectivityServerBindPort
Port number on which to bind the Envoy load balancer for the konnectivity server to on a worker's loopback interface. Default: 7132
."},{"location":"configuration/#speccontrollermanager","title":"spec.controllerManager
","text":"Element Description extraArgs
Map of key-values (strings) for any extra arguments you want to pass down to the Kubernetes controller manager process."},{"location":"configuration/#specscheduler","title":"spec.scheduler
","text":"Element Description extraArgs
Map of key-values (strings) for any extra arguments you want to pass down to Kubernetes scheduler process."},{"location":"configuration/#specworkerprofiles","title":"spec.workerProfiles
","text":"Worker profiles are used to manage worker-specific configuration in a centralized manner. A ConfigMap is generated for each worker profile. Based on the --profile
argument given to the k0s worker
, the configuration in the corresponding ConfigMap is is picked up during startup.
The worker profiles are defined as an array. Each element has following properties:
Property Descriptionname
String; name to use as profile selector for the worker process values
Object; Kubelet configuration overrides, see below for details"},{"location":"configuration/#specworkerprofilesvalues-kubelet-configuration-overrides","title":"spec.workerProfiles[].values
(Kubelet configuration overrides)","text":"The Kubelet configuration overrides of a profile override the defaults defined by k0s.
Note that there are several fields that cannot be overridden:
clusterDNS
clusterDomain
apiVersion
kind
staticPodURL
spec.featureGates
","text":"Available components are:
If components
are omitted, propagates to all kube components.
Modifies extraArgs.
"},{"location":"configuration/#example","title":"Example","text":"spec:\nfeatureGates:\n- name: feature-gate-0\nenabled: true\ncomponents: [\"kube-apiserver\", \"kube-controller-manager\", \"kubelet\", \"kube-scheduler\"]\n- name: feature-gate-1\nenabled: true\n- name: feature-gate-2\nenabled: false\n
"},{"location":"configuration/#kubelet-feature-gates-example","title":"Kubelet feature gates example","text":"The below is an example of a k0s config with feature gates enabled:
spec:\nfeatureGates:\n- name: DevicePlugins\nenabled: true\ncomponents: [\"kubelet\"]\n- name: Accelerators\nenabled: true\ncomponents: [\"kubelet\"]\n- name: AllowExtTrafficLocalEndpoints\nenabled: false\n
"},{"location":"configuration/#configuration-examples","title":"Configuration examples","text":""},{"location":"configuration/#custom-volumeplugindir","title":"Custom volumePluginDir","text":"spec:\nworkerProfiles:\n- name: custom-pluginDir\nvalues:\nvolumePluginDir: /var/libexec/k0s/kubelet-plugins/volume/exec\n
"},{"location":"configuration/#eviction-policy","title":"Eviction Policy","text":"spec:\nworkerProfiles:\n- name: custom-eviction\nvalues:\nevictionHard:\nmemory.available: \"500Mi\"\nnodefs.available: \"1Gi\"\nimagefs.available: \"100Gi\"\nevictionMinimumReclaim:\nmemory.available: \"0Mi\"\nnodefs.available: \"500Mi\"\nimagefs.available: \"2Gi\"\n
"},{"location":"configuration/#unsafe-sysctls","title":"Unsafe Sysctls","text":"spec:\nworkerProfiles:\n- name: custom-eviction\nvalues:\nallowedUnsafeSysctls:\n- fs.inotify.max_user_instances\n
"},{"location":"configuration/#specimages","title":"spec.images
","text":"Nodes under the images
key all have the same basic structure:
spec:\nimages:\ncoredns:\nimage: quay.io/coredns/coredns\nversion: v1.7.0\n
If you want the list of default images and their versions to be included, use k0s config create --include-images
.
spec.images.konnectivity
spec.images.metricsserver
spec.images.kubeproxy
spec.images.coredns
spec.images.calico.cni
spec.images.calico.flexvolume
spec.images.calico.node
spec.images.calico.kubecontrollers
spec.images.kuberouter.cni
spec.images.kuberouter.cniInstaller
spec.images.repository
\u00b9\u00b9 If spec.images.repository
is set and not empty, every image will be pulled from images.repository
If spec.images.default_pull_policy
is set and not empty, it will be used as a pull policy for each bundled image.
images:\nrepository: \"my.own.repo\"\nkonnectivity:\nimage: calico/kube-controllers\nversion: v3.16.2\nmetricsserver:\nimage: registry.k8s.io/metrics-server/metrics-server\nversion: v0.6.4\n
In the runtime the image names are calculated as my.own.repo/calico/kube-controllers:v3.16.2
and my.own.repo/metrics-server/metrics-server:v0.6.4
. This only affects the the imgages pull location, and thus omitting an image specification here will not disable component deployment.
spec.extensions.helm
","text":"spec.extensions.helm
is the config file key in which you configure the list of Helm repositories and charts to deploy during cluster bootstrap (for more information, refer to Helm Charts).
spec.extensions.storage
","text":"spec.extensions.storage
controls bundled storage provider. The default value external
makes no storage deployed.
To enable embedded host-local storage provider use the following configuration:
spec:\nextensions:\nstorage:\ntype: openebs_local_storage\n
"},{"location":"configuration/#speckonnectivity","title":"spec.konnectivity
","text":"The spec.konnectivity
key is the config file key in which you configure Konnectivity-related settings.
agentPort
agent port to listen on (default 8132)adminPort
admin port to listen on (default 8133)spec.telemetry
","text":"To improve the end-user experience k0s is configured by defaul to collect telemetry data from clusters and send it to the k0s development team. To disable the telemetry function, change the enabled
setting to false
.
The telemetry interval is ten minutes.
spec:\ntelemetry:\nenabled: true\n
"},{"location":"configuration/#disabling-controller-components","title":"Disabling controller components","text":"k0s allows to completely disable some of the system components. This allows users to build a minimal Kubernetes control plane and use what ever components they need to fulfill their need for the control plane. Disabling the system components happens through a command line flag for the controller process:
--disable-components strings disable components (valid items: api-config,autopilot,control-api,coredns,csr-approver,endpoint-reconciler,helm,konnectivity-server,kube-controller-manager,kube-proxy,kube-scheduler,metrics-server,network-provider,node-role,system-rbac,worker-config)\n
Note: As of k0s 1.26, the kubelet-config component has been replaced by the worker-config component. k0s will issue a warning when the old component name is being used. It is scheduled for removal in k0s 1.27. Please update to the new component name.
If you use k0sctl, just add the flag when installing the cluster for the first controller at spec.hosts.installFlags
in the config file like e.g.:
spec:\nhosts:\n- role: controller\ninstallFlags:\n- --disable-components=metrics-server\n
As seen from the component list, the only always-on component is the Kubernetes api-server, without that k0s serves no purpose.
"},{"location":"conformance-testing/","title":"Kubernetes conformance testing for k0s","text":"We run the conformance testing for the last RC build for a release. Follow the instructions as the conformance testing repository.
In a nutshell, you need to:
sonobuoy run --mode=certified-conformance
See runtime.
"},{"location":"custom-ca/","title":"Install using custom CA certificate","text":"k0s generates all needed certificates automatically in the <data-dir>/pki
directory (/var/lib/k0s/pki
, by default).
But sometimes there is a need to have the CA certificate in advance. To make it work, just put ca.key
and ca.crt
files to the <data-dir>/pki
:
mkdir -p /var/lib/k0s/pki\ncd /var/lib/k0s/pki\nopenssl genrsa -out ca.key 2048\nopenssl req -x509 -new -nodes -key ca.key -sha256 -days 365 -out ca.crt -subj \"/CN=Custom CA\"\n
Then you can install k0s as usual.
"},{"location":"custom-ca/#pre-generated-tokens","title":"Pre-generated tokens","text":"It's possible to get join in advance without having a running cluster.
k0s token pre-shared --role worker --cert /var/lib/k0s/pki/ca.crt --url https://<controller-ip>:6443/\n
The command above generates a join token and a Secret. A Secret should be deployed to the cluster to authorize the token. For example, you can put the Secret under the manifest directory and it will be deployed automatically.
"},{"location":"custom-cri-runtime/","title":"Custom cri runtime","text":"See runtime.
"},{"location":"dockershim/","title":"Dockershim deprecation - what does it mean for K0s?","text":"Back in December 2020, Kubernetes have announced the deprecation of the dockershim from version 1.24 onwards. As a consequence, k0s 1.24 and above don't support the dockershim as well.
"},{"location":"dockershim/#what-is-dockershim-and-why-was-it-deprecated","title":"What is dockershim and why was it deprecated?","text":"The dockershim is a transparent library that intercepts API calls to the kubernetes API and handles their operation in the Docker API. Early versions of Kubernetes used this shim in order to allow containers to run over docker. Later versions of Kubernetes started creating containers via the CRI (Container Runtime Interface). Since CRI has become the de-facto default runtime for Kubernetes, maintaining the dockershim turned into a heavy burden for Kubernetes maintainers, and so the decision to deprecate the built-in dockershim support came into being.
"},{"location":"dockershim/#so-whats-going-to-happen-to-dockershim","title":"So what's going to happen to dockershim?","text":"Dockershim is not gone. It's only changed ownership. Mirantis has agreed to maintain dockershim (now called cri-dockerd). See: The Future of Dockershim is cri-dockerd.
From Kubernetes version 1.24 you will have the built-in possibility to run containers via CRI, but if you want to continue using docker, you are free to do so, using cri-dockerd.
In order to continue to use the Docker engine with Kubernetes v1.24+, you will have to migrated all worker nodes to use cri-dockerd.
"},{"location":"dockershim/#migrating-to-cri-dockerd","title":"Migrating to CRI-Dockerd","text":"This migration guide assumes that you've been running k0s with docker on version 1.23 and below.
The following steps will need to be done on ALL k0s' worker nodes, or single-node controllers. Basically any node that runs containers will need to be migrated using the process detailed below.
Please note that there are currently some pitfalls around container metrics when using CRI-dockerd.
"},{"location":"dockershim/#cordon-and-drain-the-node","title":"Cordon and drain the node","text":"Get a list of all nodes (k0s is still version 1.23, which already includes the docker-shim):
sudo k0s kubectl get nodes -o wide\n\nNAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME\nip-10-0-49-188.eu-west-1.compute.internal Ready control-plane 52m v1.27.5+k0s 10.0.49.188 <none> Ubuntu 20.04.4 LTS 5.13.0-1022-aws docker://20.10.16\nip-10-0-62-250.eu-west-1.compute.internal Ready <none> 12s v1.27.5+k0s 10.0.62.250 <none> Ubuntu 20.04.4 LTS 5.13.0-1017-aws docker://20.10.16\n
cordon and drain the nodes (migrate one by one):
sudo k0s kubectl cordon ip-10-0-62-250.eu-west-1.compute.internal \nsudo k0s kubectl drain ip-10-0-62-250.eu-west-1.compute.internal --ignore-daemonsets\n
sudo k0s kubectl get nodes -o wide\n\nNAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME\nip-10-0-49-188.eu-west-1.compute.internal Ready control-plane 56m v1.27.5+k0s 10.0.49.188 <none> Ubuntu 20.04.4 LTS 5.13.0-1022-aws docker://20.10.16\nip-10-0-62-250.eu-west-1.compute.internal Ready,SchedulingDisabled <none> 3m40s v1.27.5+k0s 10.0.62.250 <none> Ubuntu 20.04.4 LTS 5.13.0-1017-aws docker://20.10.16\n
Stop k0s on the node:
sudo k0s stop\n
"},{"location":"dockershim/#installing-cri-dockerd","title":"Installing CRI-Dockerd","text":"Download the Latest cri-dockerd deb package:
cd /tmp\n\n# Get the deb file name for ubuntu-jammy\nOS=\"ubuntu-jammy\"\nPKG=$(curl -s https://api.github.com/repos/Mirantis/cri-dockerd/releases/latest | grep ${OS} | grep http | cut -d '\"' -f 4)\n\nwget ${PKG} -O cri-dockerd-latest.deb\n\nsudo dpkg -i cri-dockerd-latest.deb\n\nSelecting previously unselected package cri-dockerd.\n(Reading database ... 164618 files and directories currently installed.)\nPreparing to unpack cri-dockerd-latest.deb ...\nUnpacking cri-dockerd (0.2.1~3-0~ubuntu-jammy) ...\nSetting up cri-dockerd (0.2.1~3-0~ubuntu-jammy) ...\nCreated symlink /etc/systemd/system/multi-user.target.wants/cri-docker.service \u2192 /lib/systemd/system/cri-docker.service.\nCreated symlink /etc/systemd/system/sockets.target.wants/cri-docker.socket \u2192 /lib/systemd/system/cri-docker.socket.\n
Verify the correct version:
which cri-dockerd\n/usr/bin/cri-dockerd\n\ncri-dockerd --version\ncri-dockerd 0.2.1 (HEAD)\n
Make sure dockershim is started:
sudo systemctl status cri-docker.service\n\u25cf cri-docker.service - CRI Interface for Docker Application Container Engine\n Loaded: loaded (/lib/systemd/system/cri-docker.service; enabled; vendor preset: enabled)\nActive: active (running) since Wed 2022-05-25 14:27:31 UTC; 1min 23s ago\nTriggeredBy: \u25cf cri-docker.socket\n Docs: https://docs.mirantis.com\n Main PID: 1404151 (cri-dockerd)\nTasks: 9\nMemory: 15.3M\n CGroup: /system.slice/cri-docker.service\n \u2514\u25001404151 /usr/bin/cri-dockerd --container-runtime-endpoint fd:// --network-plugin=\n
"},{"location":"dockershim/#configure-k0s-to-use-dockershim","title":"Configure K0s to use dockershim","text":"Replace docker socket in the systemd file for cri-dockerd (the step below should be run AFTER upgrading k0s to version 1.24):
sudo sed -i -e 's_--cri-socket=docker:unix:///var/run/docker.sock_--cri-socket docker:unix:///var/run/cri-dockerd.sock_' /etc/systemd/system/k0sworker.service\nsudo systemctl daemon-reload\n
"},{"location":"dockershim/#start-k0s-with-cri-dockerd","title":"Start k0s with cri-dockerd","text":"sudo k0s start\n
Verify the running pods via docker ps
:
docker ps --format \"table {{.ID}}\\t{{.Names}}\\t{{.State}}\\t{{.Status}}\\t{{.Image}}\"\n\nCONTAINER ID NAMES STATE STATUS IMAGE\n1b9b4624ddfd k8s_konnectivity-agent_konnectivity-agent-5jpd7_kube-system_1b3101ea-baeb-4a22-99a2-088d7ca5be85_1 running Up 51 minutes quay.io/k0sproject/apiserver-network-proxy-agent\n414758a8a951 k8s_kube-router_kube-router-qlkgg_kube-system_9a1b67bf-5347-4acd-98ac-f9a67f2db730_1 running Up 51 minutes 3a67679337a5\nb81960bb304c k8s_kube-proxy_kube-proxy-tv95n_kube-system_164dc9f8-f47c-4f6c-acb7-ede5dbcd63cd_1 running Up 51 minutes quay.io/k0sproject/kube-proxy\nfb888cbc5ae0 k8s_POD_kube-router-qlkgg_kube-system_9a1b67bf-5347-4acd-98ac-f9a67f2db730_0 running Up 51 minutes registry.k8s.io/pause:3.1\n382d0a938c9d k8s_POD_konnectivity-agent-5jpd7_kube-system_1b3101ea-baeb-4a22-99a2-088d7ca5be85_0 running Up 51 minutes registry.k8s.io/pause:3.1\n72d4a47b5609 k8s_POD_kube-proxy-tv95n_kube-system_164dc9f8-f47c-4f6c-acb7-ede5dbcd63cd_0 running Up 51 minutes registry.k8s.io/pause:3.1\n
On the controller, you'll be able to see the worker started with the new docker container runtime:
sudo k0s kubectl get nodes -o wide\n\nNAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME\nip-10-0-49-188.eu-west-1.compute.internal Ready control-plane 117m v1.27.5+k0s 10.0.49.188 <none> Ubuntu 20.04.4 LTS 5.13.0-1022-aws docker://20.10.16\nip-10-0-62-250.eu-west-1.compute.internal Ready,SchedulingDisabled <none> 64m v1.27.5+k0s 10.0.62.250 <none> Ubuntu 20.04.4 LTS 5.13.0-1017-aws docker://20.10.16\n
"},{"location":"dockershim/#uncordon-the-node","title":"Uncordon the Node","text":"sudo k0s kubectl uncordon ip-10-0-62-250.eu-west-1.compute.internal\n\nnode/ip-10-0-62-250.eu-west-1.compute.internal uncordoned\n
You should now see the node Ready for scheduling with the docker Runtime:
sudo k0s kubectl get nodes -o wide\n\nNAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME\nip-10-0-49-188.eu-west-1.compute.internal Ready control-plane 119m v1.27.5+k0s 10.0.49.188 <none> Ubuntu 20.04.4 LTS 5.13.0-1022-aws docker://20.10.16\nip-10-0-62-250.eu-west-1.compute.internal Ready <none> 66m v1.27.5+k0s 10.0.62.250 <none> Ubuntu 20.04.4 LTS 5.13.0-1017-aws docker://20.10.16\n
"},{"location":"dual-stack/","title":"Dual-stack Networking","text":"Note: Dual stack networking setup requires that you configure Calico or a custom CNI as the CNI provider.
Use the following k0s.yaml
as a template to enable dual-stack networking. This configuration will set up bundled calico CNI, enable feature gates for the Kubernetes components, and set up kubernetes-controller-manager
.
spec:\nnetwork:\npodCIDR: \"10.244.0.0/16\"\nserviceCIDR: \"10.96.0.0/12\"\nprovider: calico\ncalico:\nmode: \"bird\"\ndualStack:\nenabled: true\nIPv6podCIDR: \"fd00::/108\"\nIPv6serviceCIDR: \"fd01::/108\"\n
"},{"location":"dual-stack/#cni-settings-calico","title":"CNI Settings: Calico","text":"For cross-pod connectivity, use BIRD for the backend. Calico does not support tunneling for the IPv6, and thus VXLAN and IPIP backends do not work.
Note: In any Calico mode other than cross-pod, the pods can only reach pods on the same node.
"},{"location":"dual-stack/#cni-settings-external-cni","title":"CNI Settings: External CNI","text":"Although the k0s.yaml
dualStack section enables all of the neccessary feature gates for the Kubernetes components, for use with an external CNI it must be set up to support IPv6.
k0s comes with the option to enable dynamic configuration for cluster level components. This covers all the components other than etcd (or sqlite) and the Kubernetes api-server. This option enables k0s configuration directly via Kubernetes API as opposed to using a configuration file for all cluster configuration.
This feature has to be enabled for every controller in the cluster using the --enable-dynamic-config
flag in k0s controller
or k0s install controller
commands. Having both types of controllers in the same cluster will cause a conflict.
The existing and enabled-by-default method is what we call static configuration. That's the way where the k0s process reads the config from the given YAML file (or uses the default config if no config is given by user) and configures every component accordingly. This means that for any configuration change the cluster admin has to restart all controllers on the cluster and have matching configs on each controller node.
In dynamic configuration mode the first controller to boot up when the cluster is created will use the given config YAML as a bootstrap configuration and stores it in the Kubernetes API. All the other controllers will find the config existing on the API and will use it as the source-of-truth for configuring all the components except for etcd and kube-apiserver. After the initial cluster bootstrap the source of truth for all controllers is the configuration object in the Kubernetes API.
"},{"location":"dynamic-configuration/#cluster-configuration-vs-controller-node-configuration","title":"Cluster configuration vs. controller node configuration","text":"In the k0s configuration options there are some options that are cluster-wide and some that are specific to each controller node in the cluster. The following list outlines which options are controller node specific and have to be configured only via the local file:
spec.api
- these options configure how the local Kubernetes API server is setupspec.storage
- these options configure how the local storage (etcd or sqlite) is setupIn case of HA control plane, all the controllers will need this part of the configuration as otherwise they will not be able to get the storage and Kubernetes API server running.
"},{"location":"dynamic-configuration/#configuration-location","title":"Configuration location","text":"The cluster wide configuration is stored in the Kubernetes API as a custom resource called clusterconfig
. There's currently only one instance named k0s
. You can edit the configuration with what ever means possible, for example with:
k0s config edit\n
This will open the configuration object for editing in your system's default editor.
"},{"location":"dynamic-configuration/#configuration-reconciliation","title":"Configuration reconciliation","text":"The dynamic configuration uses the typical operator pattern for operation. k0s controller will detect when the object changes and will reconcile the configuration changes to be reflected to how different components are configured. So say you want to change the MTU setting for kube-router CNI networking you'd change the config to contain e.g.:
kuberouter:\nmtu: 1350\nautoMTU: false\n
This will change the kube-router related configmap and thus make kube-router to use different MTU settings for new pods.
"},{"location":"dynamic-configuration/#configuration-options","title":"Configuration options","text":"The configuration object is a 1-to-1 mapping with the existing configuration YAML. All the configuration options EXCEPT options under spec.api
and spec.storage
are dynamically reconciled.
As with any Kubernetes cluster there are certain things that just cannot be changed on-the-fly, this is the list of non-changeable options:
network.podCIDR
network.serviceCIDR
network.provider
The dynamic configuration reconciler operator will write status events for all the changes it detects. To see all dynamic config related events, use:
k0s config status\n
LAST SEEN TYPE REASON OBJECT MESSAGE\n64s Warning FailedReconciling clusterconfig/k0s failed to validate config: [invalid pod CIDR invalid ip address]\n59s Normal SuccessfulReconcile clusterconfig/k0s Succesfully reconciler cluster config\n69s Warning FailedReconciling clusterconfig/k0s cannot change CNI provider from kuberouter to calico\n
"},{"location":"environment-variables/","title":"Environment variables","text":"k0s install
does not support environment variables.
Setting environment variables for components used by k0s depends on the used init system. The environment variables set in k0scontroller
or k0sworker
service will be inherited by k0s components, such as etcd
, containerd
, konnectivity
, etc.
Component specific environment variables can be set in k0scontroller
or k0sworker
service. For example: for CONTAINERD_HTTPS_PROXY
, the prefix CONTAINERD_
will be stripped and converted to HTTPS_PROXY
in the containerd
process.
For those components having env prefix convention such as ETCD_xxx
, they are handled specially, i.e. the prefix will not be stripped. For example, ETCD_MAX_WALS
will still be ETCD_MAX_WALS
in etcd process.
The proxy envs HTTP_PROXY
, HTTPS_PROXY
, NO_PROXY
are always overridden by component specific environment variables, so ETCD_HTTPS_PROXY
will still be converted to HTTPS_PROXY
in etcd process.
Create a drop-in directory and add config file with a desired environment variable:
mkdir -p /etc/systemd/system/k0scontroller.service.d\ntee -a /etc/systemd/system/k0scontroller.service.d/http-proxy.conf <<EOT\n[Service]\nEnvironment=HTTP_PROXY=192.168.33.10:3128\nEOT\n
"},{"location":"environment-variables/#openrc","title":"OpenRC","text":"Export desired environment variable overriding service configuration in /etc/conf.d directory:
echo 'export HTTP_PROXY=\"192.168.33.10:3128\"' > /etc/conf.d/k0scontroller\n
"},{"location":"experimental-windows/","title":"Run k0s worker nodes in Windows","text":"IMPORTANT: Windows support for k0s is under active development and must be considered experimental.
"},{"location":"experimental-windows/#prerequisites","title":"Prerequisites","text":"The cluster must be running at least one worker node and control plane on Linux. You can use Windows to run additional worker nodes.
"},{"location":"experimental-windows/#run-k0s","title":"Run k0s","text":"Note: The k0s.exe supervises kubelet.exe and kube-proxy.exe.
During the first run, the calico install script is created as C:\\bootstrap.ps1
. This bootstrap script downloads the calico binaries, builds pause container and sets up vSwitch settings.
Install Mirantis Container Runtime on the Windows node(s), as it is required for the initial Calico set up).
k0s worker --cri-socket=docker:tcp://127.0.0.1:2375 --cidr-range=<cidr_range> --cluster-dns=<clusterdns> --api-server=<k0s api> <token>\n
You must initiate the Cluster control with the correct config.
"},{"location":"experimental-windows/#configuration","title":"Configuration","text":""},{"location":"experimental-windows/#strict-affinity","title":"Strict-affinity","text":"You must enable strict affinity to run the windows node.
If the spec.network.calico.withWindowsNodes
field is set to true
(it is set to false
by default) the additional calico related manifest /var/lib/k0s/manifests/calico/calico-IPAMConfig-ipamconfig.yaml
is created with the following values:
---\napiVersion: crd.projectcalico.org/v1\nkind: IPAMConfig\nmetadata:\nname: default\nspec:\nstrictAffinity: true\n
Alternately, you can manually execute calicoctl:
calicoctl ipam configure --strictaffinity=true\n
"},{"location":"experimental-windows/#network-connectivity-in-aws","title":"Network connectivity in AWS","text":"Disable the Change Source/Dest. Check
option for the network interface attached to your EC2 instance. In AWS, the console option for the network interface is in the Actions menu.
k0s offers the following CLI arguments in lieu of a formal means for passing cluster settings from controller plane to worker:
kubectl run win --image=hello-world:nanoserver --command=true -i --attach=true -- cmd.exe\n
"},{"location":"experimental-windows/#manifest-for-pod-with-iis-web-server","title":"Manifest for pod with IIS web-server","text":"apiVersion: v1\nkind: Pod\nmetadata:\nname: iis\nspec:\ncontainers:\n- name: iis\nimage: mcr.microsoft.com/windows/servercore/iis\nimagePullPolicy: IfNotPresent\n
"},{"location":"extensions/","title":"Cluster extensions","text":"k0s allows users to use extensions to extend cluster functionality.
At the moment the only supported type of extensions is helm based charts.
The default configuration has no extensions.
"},{"location":"extensions/#helm-based-extensions","title":"Helm based extensions","text":""},{"location":"extensions/#configuration-example","title":"Configuration example","text":"helm:\nrepositories:\n- name: stable\nurl: https://charts.helm.sh/stable\n- name: prometheus-community\nurl: https://prometheus-community.github.io/helm-charts\ncharts:\n- name: prometheus-stack\nchartname: prometheus-community/prometheus\nversion: \"11.16.8\"\nvalues: |\nstorageSpec:\nemptyDir:\nmedium: Memory\nnamespace: default\n# We don't need to specify the repo in the repositories section for OCI charts\n- name: oci-chart\nchartname: oci://registry:8080/chart\nversion: \"0.0.1\"\nvalues: \"\"\nnamespace: default\n# Other way is to use local tgz file with chart\n# the file must exist all controller nodes\n- name: tgz-chart\nchartname: /tmp/chart.tgz\nversion: \"0.0.1\"\nvalues: \"\"\nnamespace: default\n
By using the configuration above, the cluster would:
prometheus-community/prometheus
chart of the specified version to the default
namespace.The chart installation is implemented by using CRD helm.k0sproject.io/Chart
. For every given helm extension the cluster creates a Chart CRD instance. The cluster has a controller which monitors for the Chart CRDs, supporting the following operations:
For security reasons, the cluster operates only on Chart CRDs instantiated in the kube-system
namespace, however, the target namespace could be any.
apiVersion: helm.k0sproject.io/v1beta1\nkind: Chart\nmetadata:\ncreationTimestamp: \"2020-11-10T14:17:53Z\"\ngeneration: 2\nlabels:\nk0s.k0sproject.io/stack: helm\nname: k0s-addon-chart-test-addon\nnamespace: kube-system\nresourceVersion: \"627\"\nselfLink: /apis/helm.k0sproject.io/v1beta1/namespaces/kube-system/charts/k0s-addon-chart-test-addon\nuid: ebe59ed4-1ff8-4d41-8e33-005b183651ed\nspec:\nchartName: prometheus-community/prometheus\nnamespace: default\nvalues: |\nstorageSpec:\nemptyDir:\nmedium: Memory\nversion: 11.16.8\nstatus:\nappVersion: 2.21.0\nnamespace: default\nreleaseName: prometheus-1605017878\nrevision: 2\nupdated: 2020-11-10 14:18:08.235656 +0000 UTC m=+41.871656901\nversion: 11.16.8\n
The Chart.spec
defines the chart information.
The Chart.status
keeps the information about the last operation performed by the operator.
k0s is packaged as a single binary, which includes all the needed components. All the binaries are statically linked which means that in typical use cases there's an absolute minimum of external runtime dependencies.
However, depending on the node role and cluster configuration, some of the underlying components may have specific dependencies, like OS level tools, packages and libraries. This page aims to provide a comprehensive overview.
The following command checks for known requirements on a host (currently only available on Linux):
k0s sysinfo\n
"},{"location":"external-runtime-deps/#a-unique-machine-id-for-multi-node-setups","title":"A unique machine ID for multi-node setups","text":"Whenever k0s is run in a multi-node setup (i.e. the --single
command line flag isn't used), k0s requires a machine ID: a unique host identifier that is somewhat stable across reboots. For Linux, this ID is read from the files /var/lib/dbus/machine-id
or /etc/machine-id
. For Windows, it's taken from the registry key HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Cryptography\\MachineGuid
. If neither of the OS specific sources yield a result, k0s will fallback to use a machine ID based on the hostname.
When running k0s on top of virtualized or containerized environments, you need to ensure that hosts get their own unique IDs, even if they have been created from the same image.
"},{"location":"external-runtime-deps/#linux-specific","title":"Linux specific","text":""},{"location":"external-runtime-deps/#linux-kernel-configuration","title":"Linux kernel configuration","text":"Needless to say, as k0s operates Kubernetes worker nodes, there's a certain number of needed Linux kernel modules and configurations that we need in the system. This basically stems from the need to run both containers and also be able to set up networking for the containers.
The needed kernel configuration items are listed below. All of them are available in Kernel versions 4.3 and above. If running on older kernels, check if the distro in use has backported some features; nevertheless, it might meet the requirements. k0s will check the Linux kernel release as part of its pre-flight checks and issue a warning if it's below 3.10.
The list covers ONLY the k0s/kubernetes components\u2019 needs on worker nodes. Your own workloads may require more.
CONFIG_CGROUPS
: Control Group supportCONFIG_CGROUP_FREEZER
: Freezer cgroup subsystemCONFIG_CGROUP_PIDS
: PIDs cgroup subsystem kubernetes/kubeadm#2335 (comment)CONFIG_CGROUP_DEVICE
: Device controller for cgroupsCONFIG_CPUSETS
: Cpuset supportCONFIG_CGROUP_CPUACCT
: Simple CPU accounting cgroup subsystemCONFIG_MEMCG
: Memory Resource Controller for Control GroupsCONFIG_CGROUP_HUGETLB
: HugeTLB Resource Controller for Control Groups kubernetes/kubeadm#2335 (comment)CONFIG_CGROUP_SCHED
: Group CPU schedulerCONFIG_FAIR_GROUP_SCHED
: Group scheduling for SCHED_OTHER kubernetes/kubeadm#2335 (comment)CONFIG_CFS_BANDWIDTH
: CPU bandwidth provisioning for FAIR_GROUP_SCHED Required if CPU CFS quota enforcement is enabled for containers that specify CPU limits (--cpu-cfs-quota
).CONFIG_BLK_CGROUP
: Block IO controller kubernetes/kubernetes#92287 (comment)CONFIG_NAMESPACES
: Namespaces supportCONFIG_UTS_NS
: UTS namespaceCONFIG_IPC_NS
: IPC namespaceCONFIG_PID_NS
: PID namespaceCONFIG_NET_NS
: Network namespaceCONFIG_NET
: Networking supportCONFIG_INET
: TCP/IP networkingCONFIG_NETFILTER
: Network packet filtering framework (Netfilter)CONFIG_NETFILTER_ADVANCED
: Advanced netfilter configurationCONFIG_NETFILTER_XTABLES
: Netfilter Xtables supportCONFIG_NETFILTER_XT_TARGET_REDIRECT
: REDIRECT target supportCONFIG_NETFILTER_XT_MATCH_COMMENT
: \"comment\" match supportCONFIG_EXT4_FS
: The Extended 4 (ext4) filesystemCONFIG_PROC_FS
: /proc file system supportNote: As part of its pre-flight checks, k0s will try to inspect and validate the kernel configuration. In order for that to succeed, the configuration needs to be accessible at runtime. There are some typical places that k0s will check. A bullet-proof way to ensure the accessibility is to enable CONFIG_IKCONFIG_PROC
, and, if enabled as a module, to load the configs
module: modprobe configs
.
Both cgroup v1 and cgroup v2 are supported.
Required cgroup controllers:
Optional cgroup controllers:
There are a few external tools that may be needed or used under specific circumstances:
"},{"location":"external-runtime-deps/#containerd-and-apparmor","title":"containerd and AppArmor","text":"In order to use containerd in conjunction with AppArmor, it must be enabled in the kernel and the /sbin/apparmor_parser
executable must be installed on the host, otherwise containerd will disable AppArmor support.
iptables may be executed to detect if there are any existing iptables rules and if those are in legacy of nft mode. If iptables is not found, k0s will assume that there are no pre-existing iptables rules.
"},{"location":"external-runtime-deps/#useradd-adduser","title":"useradd / adduser","text":"During k0s install
the external tool useradd
will be used on the controllers to create system user accounts for k0s. If this does exist it will fall-back to busybox's adduser
.
k0s reset
will execute either userdel
or deluser
to clean up system user accounts.
On k0s worker will modprobe
be executed to load missing kernel modules if they are not detected.
External /usr/bin/id
will be executed as a fallback if local user lookup fails, in case NSS is used.
iptables
Required for worker nodes. Resolved by @ncopa in #1046 by adding iptables
and friends to k0s's embedded binaries.find
, du
and nice
Required for worker nodes. Resolved upstream by @ncopa in kubernetes/kubernetes#96115, contained in Kubernetes 1.21.8 (5b13c8f68d4) and 1.22.0 (d45ba645a8f).TBD.
"},{"location":"helm-charts/","title":"Helm Charts","text":"Defining your extensions as Helm charts is one of two methods you can use to run k0s with your preferred extensions (the other being through the use of Manifest Deployer).
k0s supports two methods for deploying applications using Helm charts:
k0s.yaml
. This method does not require a separate install of helm
tool and the charts automatically deploy at the k0s bootstrap phase.Adding Helm charts into the k0s configuration file gives you a declarative way in which to configure the cluster. k0s controller manages the setup of Helm charts that are defined as extensions in the k0s configuration file.
"},{"location":"helm-charts/#wait-for-install","title":"Wait for install","text":"Each chart is proccesed the same way CLI tool does with following options:
--wait
--wait-for-jobs
--timeout 10m
It is possible to customize timeout by using .Timeout
field.
In the example, Prometheus is configured from \"stable\" Helms chart repository. Add the following to k0s.yaml
and restart k0s, after which Prometheus should start automatically with k0s.
spec:\nextensions:\nhelm:\nconcurrencyLevel: 5\nrepositories:\n- name: stable\nurl: https://charts.helm.sh/stable\n- name: prometheus-community\nurl: https://prometheus-community.github.io/helm-charts\ncharts:\n- name: prometheus-stack\nchartname: prometheus-community/prometheus\nversion: \"14.6.1\"\ntimeout: 20m\norder: 1\nvalues: |\nalertmanager:\npersistentVolume:\nenabled: false\nserver:\npersistentVolume:\nenabled: false\nnamespace: default\n# We don't need to specify the repo in the repositories section for OCI charts\n- name: oci-chart\nchartname: oci://registry:8080/chart\nversion: \"0.0.1\"\norder: 2\nvalues: \"\"\nnamespace: default\n# Other way is to use local tgz file with chart\n# the file must exist on all controller nodes\n- name: tgz-chart\nchartname: /tmp/chart.tgz\nversion: \"0.0.1\"\norder: 2 values: \"\"\nnamespace: default\n
Example extensions that you can use with Helm charts include:
Running k0s controller with --debug=true
enables helm debug logging.
You can create high availability for the control plane by distributing the control plane across multiple nodes and installing a load balancer on top. Etcd can be colocated with the controller nodes (default in k0s) to achieve highly available datastore at the same time.
Note: In this context even 2 node controlplane is considered HA even though it's not really HA from etcd point of view. The same requirement for LB still applies.
"},{"location":"high-availability/#network-considerations","title":"Network considerations","text":"You should plan to allocate the control plane nodes into different zones. This will avoid failures in case one zone fails.
For etcd high availability it's recommended to configure 3 or 5 controller nodes. For more information, refer to the etcd documentation.
"},{"location":"high-availability/#load-balancer","title":"Load Balancer","text":"Control plane high availability requires a tcp load balancer, which acts as a single point of contact to access the controllers. The load balancer needs to allow and route traffic to each controller through the following ports:
The load balancer can be implemented in many different ways and k0s doesn't have any additional requirements. You can use for example HAProxy, NGINX or your cloud provider's load balancer.
"},{"location":"high-availability/#example-configuration-haproxy","title":"Example configuration: HAProxy","text":"Add the following lines to the end of the haproxy.cfg:
frontend kubeAPI\n bind :6443\n mode tcp\n default_backend kubeAPI_backend\nfrontend konnectivity\n bind :8132\n mode tcp\n default_backend konnectivity_backend\nfrontend controllerJoinAPI\n bind :9443\n mode tcp\n default_backend controllerJoinAPI_backend\n\nbackend kubeAPI_backend\n mode tcp\n server k0s-controller1 <ip-address1>:6443 check check-ssl verify none\n server k0s-controller2 <ip-address2>:6443 check check-ssl verify none\n server k0s-controller3 <ip-address3>:6443 check check-ssl verify none\nbackend konnectivity_backend\n mode tcp\n server k0s-controller1 <ip-address1>:8132 check check-ssl verify none\n server k0s-controller2 <ip-address2>:8132 check check-ssl verify none\n server k0s-controller3 <ip-address3>:8132 check check-ssl verify none\nbackend controllerJoinAPI_backend\n mode tcp\n server k0s-controller1 <ip-address1>:9443 check check-ssl verify none\n server k0s-controller2 <ip-address2>:9443 check check-ssl verify none\n server k0s-controller3 <ip-address3>:9443 check check-ssl verify none\n\nlisten stats\n bind *:9000\n mode http\n stats enable\n stats uri /\n
The last block \"listen stats\" is optional, but can be helpful. It enables HAProxy statistics with a separate dashboard to monitor for example the health of each backend server. You can access it using a web browser:
http://<ip-addr>:9000\n
Restart HAProxy to apply the configuration changes.
"},{"location":"high-availability/#k0s-configuration","title":"k0s configuration","text":"The load balancer address must be configured to k0s either by using k0s.yaml
or by using k0sctl to automatically deploy all controllers with the same configuration:
Note to update your load balancer's public ip address into two places.
spec:\napi:\nexternalAddress: <load balancer public ip address>\nsans:\n- <load balancer public ip address>\n
"},{"location":"high-availability/#configuration-using-k0sctlyaml-for-k0sctl","title":"Configuration using k0sctl.yaml (for k0sctl)","text":"Add the following lines to the end of the k0sctl.yaml. Note to update your load balancer's public ip address into two places.
k0s:\nconfig:\nspec:\napi:\nexternalAddress: <load balancer public ip address>\nsans:\n- <load balancer public ip address>\n
For greater detail about k0s configuration, refer to the Full configuration file reference.
"},{"location":"install/","title":"Quick Start Guide","text":"On completion of the Quick Start you will have a full Kubernetes cluster with a single node that includes both the controller and the worker. Such a setup is ideal for environments that do not require high-availability and multiple nodes.
"},{"location":"install/#prerequisites","title":"Prerequisites","text":"Note: Before proceeding, make sure to review the System Requirements.
Though the Quick Start material is written for Debian/Ubuntu, you can use it for any Linux distro that is running either a Systemd or OpenRC init system.
"},{"location":"install/#install-k0s","title":"Install k0s","text":"Download k0s
Run the k0s download script to download the latest stable version of k0s and make it executable from /usr/bin/k0s.
curl -sSLf https://get.k0s.sh | sudo sh\n
Install k0s as a service
The k0s install
sub-command installs k0s as a system service on the local host that is running one of the supported init systems: Systemd or OpenRC. You can execute the install for workers, controllers or single node (controller+worker) instances.
Run the following command to install a single node k0s that includes the controller and worker functions with the default configuration:
sudo k0s install controller --single\n
The k0s install controller
sub-command accepts the same flags and parameters as the k0s controller
. Refer to manual install for a custom config file example.
It is possible to set environment variables with the install command:
sudo k0s install controller -e ETCD_UNSUPPORTED_ARCH=arm\n
The system service can be reinstalled with the --force
flag:
sudo k0s install controller --single --force\nsudo systemctl daemon-reload\n
Start k0s as a service
To start the k0s service, run:
sudo k0s start\n
The k0s service will start automatically after the node restart.
A minute or two typically passes before the node is ready to deploy applications.
Check service, logs and k0s status
To get general information about your k0s instance's status, run:
$ sudo k0s status\nVersion: v1.27.5+k0s.0\nProcess ID: 436\nRole: controller\nWorkloads: true\nInit System: linux-systemd\n
Access your cluster using kubectl
Note: k0s includes the Kubernetes command-line tool kubectl.
Use kubectl to deploy your application or to check your node status:
$ sudo k0s kubectl get nodes\nNAME STATUS ROLES AGE VERSION\nk0s Ready <none> 4m6s v1.27.5+k0s\n
The removal of k0s is a two-step process.
Stop the service.
sudo k0s stop\n
Execute the k0s reset
command.
The k0s reset
command cleans up the installed system service, data directories, containers, mounts and network namespaces.
sudo k0s reset\n
Reboot the system.
A few small k0s fragments persist even after the reset (for example, iptables). As such, you should initiate a reboot after the running of the k0s reset
command.
You can create a k0s cluster on top of docker. In such a scenario, by default, both controller and worker nodes are run in the same container to provide an easy local testing \"cluster\".
"},{"location":"k0s-in-docker/#prerequisites","title":"Prerequisites","text":"You will require a Docker environment running on a Mac, Windows, or Linux system.
"},{"location":"k0s-in-docker/#container-images","title":"Container images","text":"The k0s containers are published both on Docker Hub and GitHub. For reasons of simplicity, the examples given here use Docker Hub (GitHub requires a separate authentication that is not covered). Alternative links include:
Note: Due to Docker Hub tag validation scheme, we have to use -
as the k0s version separator instead of the usual +
. So for example k0s version v1.27.5+k0s.0
is tagged as docker.io/k0sproject/k0s:v1.27.5-k0s.0
.
You can run your own k0s in Docker:
docker run -d --name k0s --hostname k0s --privileged -v /var/lib/k0s -p 6443:6443 docker.io/k0sproject/k0s:latest\n
Note: If you are using Docker Desktop as the runtime, starting from 4.3.0 version it's using cgroups v2 in the VM that runs the engine. This means you have to add some extra flags to the above command to get kubelet and containerd to properly work with cgroups v2:
--cgroupns=host -v /sys/fs/cgroup:/sys/fs/cgroup:rw\n
"},{"location":"k0s-in-docker/#2-optional-create-additional-workers","title":"2. (Optional) Create additional workers","text":"You can attach multiple workers nodes into the cluster to then distribute your application containers to separate workers.
For each required worker:
Acquire a join token for the worker:
token=$(docker exec -t -i k0s k0s token create --role=worker)\n
Run the container to create and join the new worker:
docker run -d --name k0s-worker1 --hostname k0s-worker1 --privileged -v /var/lib/k0s docker.io/k0sproject/k0s:latest k0s worker $token\n
Access your cluster using kubectl:
docker exec k0s kubectl get nodes\n
Alternatively, grab the kubeconfig file with docker exec k0s cat /var/lib/k0s/pki/admin.conf
and paste it into Lens.
As an alternative you can run k0s using Docker Compose:
version: \"3.9\"\nservices:\nk0s:\ncontainer_name: k0s\nimage: docker.io/k0sproject/k0s:latest\ncommand: k0s controller --config=/etc/k0s/config.yaml --enable-worker\nhostname: k0s\nprivileged: true\nvolumes:\n- \"/var/lib/k0s\"\ntmpfs:\n- /run\n- /var/run\nports:\n- \"6443:6443\"\nnetwork_mode: \"bridge\"\nenvironment:\nK0S_CONFIG: |-\napiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nmetadata:\nname: k0s\n# Any additional configuration goes here ...\n
"},{"location":"k0s-in-docker/#known-limitations","title":"Known limitations","text":""},{"location":"k0s-in-docker/#no-custom-docker-networks","title":"No custom Docker networks","text":"Currently, k0s nodes cannot be run if the containers are configured to use custom networks (for example, with --net my-net
). This is because Docker sets up a custom DNS service within the network which creates issues with CoreDNS. No completely reliable workaounds are available, however no issues should arise from running k0s cluster(s) on a bridge network.
You can manually set up k0s nodes by creating a multi-node cluster that is locally managed on each node. This involves several steps, to first install each node separately, and to then connect the node together using access tokens.
"},{"location":"k0s-multi-node/#prerequisites","title":"Prerequisites","text":"Note: Before proceeding, make sure to review the System Requirements.
Though the Manual Install material is written for Debian/Ubuntu, you can use it for any Linux distro that is running either a Systemd or OpenRC init system.
You can speed up the use of the k0s
command by enabling shell completion.
Run the k0s download script to download the latest stable version of k0s and make it executable from /usr/bin/k0s.
curl -sSLf https://get.k0s.sh | sudo sh\n
The download script accepts the following environment variables:
Variable PurposeK0S_VERSION=v1.27.5+k0s.0
Select the version of k0s to be installed DEBUG=true
Output commands and their arguments at execution. Note: If you require environment variables and use sudo, you can do:
curl -sSLf https://get.k0s.sh | sudo K0S_VERSION=v1.27.5+k0s.0 sh\n
"},{"location":"k0s-multi-node/#2-bootstrap-a-controller-node","title":"2. Bootstrap a controller node","text":"Create a configuration file:
mkdir -p /etc/k0s\nk0s config create > /etc/k0s/k0s.yaml\n
Note: For information on settings modification, refer to the configuration documentation.
sudo k0s install controller -c /etc/k0s/k0s.yaml\n
sudo k0s start\n
k0s process acts as a \"supervisor\" for all of the control plane components. In moments the control plane will be up and running.
"},{"location":"k0s-multi-node/#3-create-a-join-token","title":"3. Create a join token","text":"You need a token to join workers to the cluster. The token embeds information that enables mutual trust between the worker and controller(s) and which allows the node to join the cluster as worker.
To get a token, run the following command on one of the existing controller nodes:
sudo k0s token create --role=worker\n
The resulting output is a long token string, which you can use to add a worker to the cluster.
For enhanced security, run the following command to set an expiration time for the token:
sudo k0s token create --role=worker --expiry=100h > token-file\n
"},{"location":"k0s-multi-node/#4-add-workers-to-the-cluster","title":"4. Add workers to the cluster","text":"To join the worker, run k0s in the worker mode with the join token you created:
sudo k0s install worker --token-file /path/to/token/file\n
sudo k0s start\n
"},{"location":"k0s-multi-node/#about-tokens","title":"About tokens","text":"The join tokens are base64-encoded kubeconfigs for several reasons:
The bearer token embedded in the kubeconfig is a bootstrap token. For controller join tokens and worker join tokens k0s uses different usage attributes to ensure that k0s can validate the token role on the controller side.
"},{"location":"k0s-multi-node/#5-add-controllers-to-the-cluster","title":"5. Add controllers to the cluster","text":"Note: Either etcd or an external data store (MySQL or Postgres) via kine must be in use to add new controller nodes to the cluster. Pay strict attention to the high availability configuration and make sure the configuration is identical for all controller nodes.
To create a join token for the new controller, run the following command on an existing controller:
sudo k0s token create --role=controller --expiry=1h > token-file\n
On the new controller, run:
sudo k0s install controller --token-file /path/to/token/file -c /etc/k0s/k0s.yaml\n
Important notice here is that each controller in the cluster must have k0s.yaml otherwise some cluster nodes will use default config values which will lead to inconsistency behavior. If your configuration file includes IP addresses (node address, sans, etcd peerAddress), remember to update them accordingly for this specific controller node.
sudo k0s start\n
"},{"location":"k0s-multi-node/#6-check-k0s-status","title":"6. Check k0s status","text":"To get general information about your k0s instance's status:
sudo k0s status\n
Version: v1.27.5+k0s.0\nProcess ID: 2769\nParent Process ID: 1\nRole: controller\nInit System: linux-systemd\nService file: /etc/systemd/system/k0scontroller.service\n
"},{"location":"k0s-multi-node/#7-access-your-cluster","title":"7. Access your cluster","text":"Use the Kubernetes 'kubectl' command-line tool that comes with k0s binary to deploy your application or check your node status:
sudo k0s kubectl get nodes\n
NAME STATUS ROLES AGE VERSION\nk0s Ready <none> 4m6s v1.27.5+k0s\n
You can also access your cluster easily with Lens, simply by copying the kubeconfig and pasting it to Lens:
sudo cat /var/lib/k0s/pki/admin.conf\n
Note: To access the cluster from an external network you must replace localhost
in the kubeconfig with the host ip address for your controller.
See the Quick Start Guide.
"},{"location":"k0sctl-install/","title":"Install using k0sctl","text":"k0sctl is a command-line tool for bootstrapping and managing k0s clusters. k0sctl connects to the provided hosts using SSH and gathers information on the hosts, with which it forms a cluster by configuring the hosts, deploying k0s, and then connecting the k0s nodes together.
With k0sctl, you can create multi-node clusters in a manner that is automatic and easily repeatable. This method is recommended for production cluster installation.
Note: The k0sctl install method is necessary for automatic upgrade.
"},{"location":"k0sctl-install/#prerequisites","title":"Prerequisites","text":"You can execute k0sctl on any system that supports the Go language. Pre-compiled k0sctl binaries are available on the k0sctl releases page).
Note: For target host prerequisites information, refer to the k0s System Requirements.
"},{"location":"k0sctl-install/#install-k0s","title":"Install k0s","text":""},{"location":"k0sctl-install/#1-install-k0sctl-tool","title":"1. Install k0sctl tool","text":"k0sctl is a single binary, the instructions for downloading and installing of which are available in the k0sctl github repository.
"},{"location":"k0sctl-install/#2-configure-the-cluster","title":"2. Configure the cluster","text":"Run the following command to create a k0sctl configuration file:
k0sctl init > k0sctl.yaml\n
This action creates a k0sctl.yaml
file in the current directory:
apiVersion: k0sctl.k0sproject.io/v1beta1\nkind: Cluster\nmetadata:\nname: k0s-cluster\nspec:\nhosts:\n- role: controller\nssh:\naddress: 10.0.0.1 # replace with the controller's IP address\nuser: root\nkeyPath: ~/.ssh/id_rsa\n- role: worker\nssh:\naddress: 10.0.0.2 # replace with the worker's IP address\nuser: root\nkeyPath: ~/.ssh/id_rsa\n
Provide each host with a valid IP address that is reachable by k0ctl, and the connection details for an SSH connection.
Note: Refer to the k0sctl documentation for k0sctl configuration specifications.
"},{"location":"k0sctl-install/#3-deploy-the-cluster","title":"3. Deploy the cluster","text":"Run k0sctl apply
to perform the cluster deployment:
k0sctl apply --config k0sctl.yaml\n
\u2800\u28ff\u28ff\u2847\u2800\u2800\u2880\u28f4\u28fe\u28ff\u281f\u2801\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u287f\u281b\u2801\u2800\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u2800\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588\n\u2800\u28ff\u28ff\u2847\u28e0\u28f6\u28ff\u287f\u280b\u2800\u2800\u2800\u28b8\u28ff\u2847\u2800\u2800\u2800\u28e0\u2800\u2800\u2880\u28e0\u2846\u28b8\u28ff\u28ff\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2588\u2588\u2588 \u2588\u2588\u2588 \u2588\u2588\u2588\n\u2800\u28ff\u28ff\u28ff\u28ff\u28df\u280b\u2800\u2800\u2800\u2800\u2800\u28b8\u28ff\u2847\u2800\u28b0\u28fe\u28ff\u2800\u2800\u28ff\u28ff\u2847\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u2800\u2588\u2588\u2588 \u2588\u2588\u2588 \u2588\u2588\u2588\n\u2800\u28ff\u28ff\u284f\u283b\u28ff\u28f7\u28e4\u2840\u2800\u2800\u2800\u2838\u281b\u2801\u2800\u2838\u280b\u2801\u2800\u2800\u28ff\u28ff\u2847\u2808\u2809\u2809\u2809\u2809\u2809\u2809\u2809\u2809\u28b9\u28ff\u28ff\u2800\u2588\u2588\u2588 \u2588\u2588\u2588 \u2588\u2588\u2588\n\u2800\u28ff\u28ff\u2847\u2800\u2800\u2819\u28bf\u28ff\u28e6\u28c0\u2800\u2800\u2800\u28e0\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28ff\u28ff\u2847\u28b0\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28fe\u28ff\u28ff\u2800\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\n\nINFO k0sctl 0.0.0 Copyright 2021, Mirantis Inc.\nINFO Anonymized telemetry will be sent to Mirantis.\nINFO By continuing to use k0sctl you agree to these terms:\nINFO https://k0sproject.io/licenses/eula\nINFO ==> Running phase: Connect to hosts\nINFO [ssh] 10.0.0.1:22: connected\nINFO [ssh] 10.0.0.2:22: connected\nINFO ==> Running phase: Detect host operating systems\nINFO [ssh] 10.0.0.1:22: is running Ubuntu 20.10\nINFO [ssh] 10.0.0.2:22: is running Ubuntu 20.10\nINFO ==> Running phase: Prepare hosts\nINFO [ssh] 10.0.0.1:22: installing kubectl\nINFO ==> Running phase: Gather host facts\nINFO [ssh] 10.0.0.1:22: discovered 10.12.18.133 as private address\nINFO ==> Running phase: Validate hosts\nINFO ==> Running phase: Gather k0s facts\nINFO ==> Running phase: Download K0s on the hosts\nINFO [ssh] 10.0.0.2:22: downloading k0s 0.11.0\nINFO [ssh] 10.0.0.1:22: downloading k0s 0.11.0\nINFO ==> Running phase: Configure K0s\nWARN [ssh] 10.0.0.1:22: generating default configuration\nINFO [ssh] 10.0.0.1:22: validating configuration\nINFO [ssh] 10.0.0.1:22: configuration was changed\nINFO ==> Running phase: Initialize K0s Cluster\nINFO [ssh] 10.0.0.1:22: installing k0s controller\nINFO [ssh] 10.0.0.1:22: waiting for the k0s service to start\nINFO [ssh] 10.0.0.1:22: waiting for kubernetes api to respond\nINFO ==> Running phase: Install workers\nINFO [ssh] 10.0.0.1:22: generating token\nINFO [ssh] 10.0.0.2:22: writing join token\nINFO [ssh] 10.0.0.2:22: installing k0s worker\nINFO [ssh] 10.0.0.2:22: starting service\nINFO [ssh] 10.0.0.2:22: waiting for node to become ready\nINFO ==> Running phase: Disconnect from hosts\nINFO ==> Finished in 2m2s\nINFO k0s cluster version 0.11.0 is now installed\nINFO Tip: To access the cluster you can now fetch the admin kubeconfig using:\nINFO k0sctl kubeconfig\n
"},{"location":"k0sctl-install/#4-access-the-cluster","title":"4. Access the cluster","text":"To access your k0s cluster, use k0sctl to generate a kubeconfig
for the purpose.
k0sctl kubeconfig > kubeconfig\n
With the kubeconfig
, you can access your cluster using either kubectl or Lens.
kubectl get pods --kubeconfig kubeconfig -A\n
NAMESPACE NAME READY STATUS RESTARTS AGE\nkube-system calico-kube-controllers-5f6546844f-w8x27 1/1 Running 0 3m50s\nkube-system calico-node-vd7lx 1/1 Running 0 3m44s\nkube-system coredns-5c98d7d4d8-tmrwv 1/1 Running 0 4m10s\nkube-system konnectivity-agent-d9xv2 1/1 Running 0 3m31s\nkube-system kube-proxy-xp9r9 1/1 Running 0 4m4s\nkube-system metrics-server-6fbcd86f7b-5frtn 1/1 Running 0 3m51s\n
"},{"location":"k0sctl-install/#known-limitations","title":"Known limitations","text":"Included with k0s, Manifest Deployer is one of two methods you can use to run k0s with your preferred extensions (the other being by defining your extensions as Helm charts).
"},{"location":"manifests/#overview","title":"Overview","text":"Manifest Deployer runs on the controller nodes and provides an easy way to automatically deploy manifests at runtime.
By default, k0s reads all manifests under /var/lib/k0s/manifests
and ensures that their state matches the cluster state. Moreover, on removal of a manifest file, k0s will automatically prune all of it associated resources.
The use of Manifest Deployer is quite similar to the use the kubectl apply
command. The main difference between the two is that Manifest Deployer constantly monitors the directory for changes, and thus you do not need to manually apply changes that are made to the manifest files.
/var/lib/k0s/manifests
is considered to be its own \"stack\". Nested directories (further subfolders), however, are excluded from the stack mechanism and thus are not automatically deployed by the Manifest Deployer.To try Manifest Deployer, create a new folder under /var/lib/k0s/manifests
and then create a manifest file (such as nginx.yaml
) with the following content:
apiVersion: v1\nkind: Namespace\nmetadata:\nname: nginx\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: nginx-deployment\nnamespace: nginx\nspec:\nselector:\nmatchLabels:\napp: nginx\nreplicas: 3\ntemplate:\nmetadata:\nlabels:\napp: nginx\nspec:\ncontainers:\n- name: nginx\nimage: nginx:latest\nports:\n- containerPort: 80\n
New pods will appear soon thereafter.
sudo k0s kubectl get pods --namespace nginx\n
NAME READY STATUS RESTARTS AGE\nnginx-deployment-66b6c48dd5-8zq7d 1/1 Running 0 10m\nnginx-deployment-66b6c48dd5-br4jv 1/1 Running 0 10m\nnginx-deployment-66b6c48dd5-sqvhb 1/1 Running 0 10m\n
"},{"location":"networking/","title":"Networking","text":""},{"location":"networking/#in-cluster-networking","title":"In-cluster networking","text":"k0s supports two Container Network Interface (CNI) providers out-of-box, Kube-router and Calico. In addition, k0s can support your own CNI configuration.
"},{"location":"networking/#notes","title":"Notes","text":"Kube-router is built into k0s, and so by default the distribution uses it for network provision. Kube-router uses the standard Linux networking stack and toolset, and you can set up CNI networking without any overlays by using BGP as the main mechanism for in-cluster networking.
In addition to Kube-router, k0s also offers Calico as an alternative, built-in network provider. Calico is a layer 3 container networking solution that routes packets to pods. It supports, for example, pod-specific network policies that help to secure kubernetes clusters in demanding use cases. Calico uses the vxlan overlay network by default, and you can configure it to support ipip (IP-in-IP).
You can opt-out of having k0s manage the network setup and choose instead to use any network plugin that adheres to the CNI specification. To do so, configure custom
as the network provider in the k0s configuration file (k0s.yaml
). You can do this, for example, by pushing network provider manifests into /var/lib/k0s/manifests
, from where k0s controllers will collect them for deployment into the cluster (for more information, refer to Manifest Deployer.
One goal of k0s is to allow for the deployment of an isolated control plane, which may prevent the establishment of an IP route between controller nodes and the pod network. Thus, to enable this communication path (which is mandated by conformance tests), k0s deploys Konnectivity service to proxy traffic from the API server (control plane) into the worker nodes. This ensures that we can always fulfill all the Kubernetes API functionalities, but still operate the control plane in total isolation from the workers.
Note: To allow Konnectivity agents running on the worker nodes to establish the connection, configure your firewalls for outbound access, port 8132. Moreover, configure your firewalls for outbound access, port 6443, in order to access Kube-API from the worker nodes.
"},{"location":"networking/#required-ports-and-protocols","title":"Required ports and protocols","text":"Protocol Port Service Direction Notes TCP 2380 etcd peers controller <-> controller TCP 6443 kube-apiserver Worker, CLI => controller Authenticated Kube API using Kube TLS client certs, ServiceAccount tokens with RBAC TCP 179 kube-router worker <-> worker BGP routing sessions between peers UDP 4789 Calico worker <-> worker Calico VXLAN overlay TCP 10250 kubelet Master, Worker => Host*
Authenticated kubelet API for the master node kube-apiserver
(and heapster
/metrics-server
addons) using TLS client certs TCP 9443 k0s-api controller <-> controller k0s controller join API, TLS with token auth TCP 8132 konnectivity worker <-> controller Konnectivity is used as \"reverse\" tunnel between kube-apiserver and worker kubelets"},{"location":"networking/#iptables","title":"iptables","text":"iptables
can work in two distinct modes, legacy
and nftables
. k0s autodetects the mode and prefers nftables
. To check which mode k0s is configured with check ls -lah /var/lib/k0s/bin/
. The iptables
link target reveals the mode which k0s selected. k0s has the same logic as other k8s components, but to ensure al component have picked up the same mode you can check via: kube-proxy: nsenter -t $(pidof kube-proxy) -m iptables -V
kube-router: nsenter -t $(pidof kube-router) -m /sbin/iptables -V
calico: nsenter -t $(pidof -s calico-node) -m iptables -V
There are known version incompatibility issues in iptables versions. k0s ships (in /var/lib/k0s/bin
) a version of iptables that is tested to interoperate with all other Kubernetes components it ships with. However if you have other tooling (firewalls etc.) on your hosts that uses iptables and the host iptables version is different that k0s (and other k8s components) ships with it may cause networking issues. This is based on the fact that iptables being user-space tooling it does not provide any strong version compatibility guarantees.
If you are using firewalld
on your hosts you need to ensure it is configured to use the same FirewallBackend
as k0s and other Kubernetes components use. Otherwise networking will be broken in various ways.
Here's an example configuration for a tested working networking setup:
[root@rhel-test ~]# firewall-cmd --list-all\npublic (active)\ntarget: default\n icmp-block-inversion: no\n interfaces: eth0\n sources: 10.244.0.0/16 10.96.0.0/12\n services: cockpit dhcpv6-client ssh\n ports: 80/tcp 6443/tcp 8132/tcp 10250/tcp 179/tcp 179/udp\n protocols: forward: no\n masquerade: yes\n forward-ports: source-ports: icmp-blocks: rich rules:\n
"},{"location":"nllb/","title":"Node-local load balancing","text":"Note: This feature is experimental! Expect instabilities and/or breaking changes.
For clusters that don't have an externally managed load balancer for the k0s control plane, there is another option to get a highly available control plane, at least from within the cluster. K0s calls this \"node-local load balancing\". In contrast to an externally managed load balancer, node-local load balancing takes place exclusively on the worker nodes. It does not contribute to making the control plane highly available to the outside world (e.g. humans interacting with the cluster using management tools such as Lens or kubectl
), but rather makes the cluster itself internally resilient to controller node outages.
The k0s worker process manages a load balancer on each worker node's loopback interface and configures the relevant components to use that load balancer. This allows for requests from worker components to the control plane to be distributed among all currently available controller nodes, rather than being directed to the controller node that has been used to join a particular worker into the cluster. This improves the reliability and fault tolerance of the cluster in case a controller node becomes unhealthy.
Envoy is the only load balancer that is supported so far. Please note that Envoy is not available on ARMv7, so node-local load balancing is currently unavailable on that platform.
"},{"location":"nllb/#enabling-in-a-cluster","title":"Enabling in a cluster","text":"In order to use node-local load balancing, the cluster needs to comply with the following:
spec.api.externalAddress
.spec.api.tunneledNetworkingMode
as true
.--single
flag.Add the following to the cluster configuration (k0s.yaml
):
spec:\nnetwork:\nnodeLocalLoadBalancing:\nenabled: true\ntype: EnvoyProxy\n
Or alternatively, if using k0sctl
, add the following to the k0sctl configuration (k0sctl.yaml
):
spec:\nk0s:\nconfig:\nspec:\nnetwork:\nnodeLocalLoadBalancing:\nenabled: true\ntype: EnvoyProxy\n
All newly added worker nodes will then use node-local load balancing. The k0s worker process on worker nodes that are already running must be restarted for the new configuration to take effect.
"},{"location":"nllb/#full-example-using-k0sctl","title":"Full example usingk0sctl
","text":"The following example shows a full k0sctl
configuration file featuring three controllers and two workers with node-local load balancing enabled:
apiVersion: k0sctl.k0sproject.io/v1beta1\nkind: Cluster\nmetadata:\nname: k0s-cluster\nspec:\nk0s:\nversion: v1.27.5+k0s.0\nconfig:\nspec:\nnetwork:\nnodeLocalLoadBalancing:\nenabled: true\ntype: EnvoyProxy\nhosts:\n- role: controller\nssh:\naddress: 10.81.146.254\nkeyPath: k0s-ssh-private-key.pem\nport: 22\nuser: k0s\n- role: controller\nssh:\naddress: 10.81.146.184\nkeyPath: k0s-ssh-private-key.pem\nport: 22\nuser: k0s\n- role: controller\nssh:\naddress: 10.81.146.113\nkeyPath: k0s-ssh-private-key.pem\nport: 22\nuser: k0s\n- role: worker\nssh:\naddress: 10.81.146.198\nkeyPath: k0s-ssh-private-key.pem\nport: 22\nuser: k0s\n- role: worker\nssh:\naddress: 10.81.146.51\nkeyPath: k0s-ssh-private-key.pem\nport: 22\nuser: k0s\n
Save the above configuration into a file called k0sctl.yaml
and apply it in order to bootstrap the cluster:
$ k0sctl apply\n\u28ff\u28ff\u2847\u2800\u2800\u2880\u28f4\u28fe\u28ff\u281f\u2801\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u287f\u281b\u2801\u2800\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u2800\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588\n\u28ff\u28ff\u2847\u28e0\u28f6\u28ff\u287f\u280b\u2800\u2800\u2800\u28b8\u28ff\u2847\u2800\u2800\u2800\u28e0\u2800\u2800\u2880\u28e0\u2846\u28b8\u28ff\u28ff\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2588\u2588\u2588 \u2588\u2588\u2588 \u2588\u2588\u2588\n\u28ff\u28ff\u28ff\u28ff\u28df\u280b\u2800\u2800\u2800\u2800\u2800\u28b8\u28ff\u2847\u2800\u28b0\u28fe\u28ff\u2800\u2800\u28ff\u28ff\u2847\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u2800\u2588\u2588\u2588 \u2588\u2588\u2588 \u2588\u2588\u2588\n\u28ff\u28ff\u284f\u283b\u28ff\u28f7\u28e4\u2840\u2800\u2800\u2800\u2838\u281b\u2801\u2800\u2838\u280b\u2801\u2800\u2800\u28ff\u28ff\u2847\u2808\u2809\u2809\u2809\u2809\u2809\u2809\u2809\u2809\u28b9\u28ff\u28ff\u2800\u2588\u2588\u2588 \u2588\u2588\u2588 \u2588\u2588\u2588\n\u28ff\u28ff\u2847\u2800\u2800\u2819\u28bf\u28ff\u28e6\u28c0\u2800\u2800\u2800\u28e0\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28ff\u28ff\u2847\u28b0\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28fe\u28ff\u28ff\u2800\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\nk0sctl 0.15.0 Copyright 2022, k0sctl authors.\nBy continuing to use k0sctl you agree to these terms:\nhttps://k0sproject.io/licenses/eula\nlevel=info msg=\"==> Running phase: Connect to hosts\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: connected\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: connected\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: connected\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: connected\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: connected\"\nlevel=info msg=\"==> Running phase: Detect host operating systems\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: is running Alpine Linux v3.17\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: is running Alpine Linux v3.17\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: is running Alpine Linux v3.17\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: is running Alpine Linux v3.17\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: is running Alpine Linux v3.17\"\nlevel=info msg=\"==> Running phase: Acquire exclusive host lock\"\nlevel=info msg=\"==> Running phase: Prepare hosts\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: installing packages (curl)\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: installing packages (curl, iptables)\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: installing packages (curl)\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: installing packages (curl, iptables)\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: installing packages (curl)\"\nlevel=info msg=\"==> Running phase: Gather host facts\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: using k0s-controller-1 as hostname\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: using k0s-worker-1 as hostname\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: using k0s-worker-0 as hostname\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: using k0s-controller-2 as hostname\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: using k0s-controller-0 as hostname\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: discovered eth0 as private interface\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: discovered eth0 as private interface\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: discovered eth0 as private interface\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: discovered eth0 as private interface\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: discovered eth0 as private interface\"\nlevel=info msg=\"==> Running phase: Download k0s binaries to local host\"\nlevel=info msg=\"==> Running phase: Validate hosts\"\nlevel=info msg=\"==> Running phase: Gather k0s facts\"\nlevel=info msg=\"==> Running phase: Validate facts\"\nlevel=info msg=\"==> Running phase: Upload k0s binaries to hosts\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.27.5+k0s.0\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.27.5+k0s.0\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.27.5+k0s.0\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.27.5+k0s.0\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.27.5+k0s.0\"\nlevel=info msg=\"==> Running phase: Configure k0s\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: validating configuration\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: validating configuration\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: validating configuration\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: configuration was changed\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: configuration was changed\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: configuration was changed\"\nlevel=info msg=\"==> Running phase: Initialize the k0s cluster\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: installing k0s controller\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: waiting for the k0s service to start\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: waiting for kubernetes api to respond\"\nlevel=info msg=\"==> Running phase: Install controllers\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: generating token\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: writing join token\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: installing k0s controller\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: starting service\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: waiting for the k0s service to start\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: waiting for kubernetes api to respond\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: generating token\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: writing join token\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: installing k0s controller\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: starting service\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: waiting for the k0s service to start\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: waiting for kubernetes api to respond\"\nlevel=info msg=\"==> Running phase: Install workers\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: validating api connection to https://10.81.146.254:6443\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: validating api connection to https://10.81.146.254:6443\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: generating token\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: writing join token\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: writing join token\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: installing k0s worker\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: installing k0s worker\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: starting service\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: starting service\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: waiting for node to become ready\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: waiting for node to become ready\"\nlevel=info msg=\"==> Running phase: Release exclusive host lock\"\nlevel=info msg=\"==> Running phase: Disconnect from hosts\"\nlevel=info msg=\"==> Finished in 3m30s\"\nlevel=info msg=\"k0s cluster version v1.27.5+k0s.0 is now installed\"\nlevel=info msg=\"Tip: To access the cluster you can now fetch the admin kubeconfig using:\"\nlevel=info msg=\" k0sctl kubeconfig\"\n
The cluster with the two nodes should be available by now. Setup the kubeconfig file in order to interact with it:
k0sctl kubeconfig > k0s-kubeconfig\nexport KUBECONFIG=$(pwd)/k0s-kubeconfig\n
The three controllers are available and provide API Server endpoints:
$ kubectl -n kube-node-lease get \\\nlease/k0s-ctrl-k0s-controller-0 \\\nlease/k0s-ctrl-k0s-controller-1 \\\nlease/k0s-ctrl-k0s-controller-2 \\\nlease/k0s-endpoint-reconciler\nNAME HOLDER AGE\nk0s-ctrl-k0s-controller-0 9ec2b221890e5ed6f4cc70377bfe809fef5be541a2774dc5de81db7acb2786f1 2m37s\nk0s-ctrl-k0s-controller-1 fe45284924abb1bfce674e5a9aa8d647f17c81e53bbab17cf28288f13d5e8f97 2m18s\nk0s-ctrl-k0s-controller-2 5ab43278e63fc863b2a7f0fe1aab37316a6db40c5a3d8a17b9d35b5346e23b3d 2m9s\nk0s-endpoint-reconciler 9ec2b221890e5ed6f4cc70377bfe809fef5be541a2774dc5de81db7acb2786f1 2m37s\n\n$ kubectl -n default get endpoints\nNAME ENDPOINTS AGE\nkubernetes 10.81.146.113:6443,10.81.146.184:6443,10.81.146.254:6443 2m49s\n
The first controller is the current k0s leader. The two worker nodes can be listed, too:
$ kubectl get nodes -owide\nNAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME\nk0s-worker-0 Ready <none> 2m16s v1.27.5+k0s 10.81.146.198 <none> Alpine Linux v3.17 5.15.83-0-virt containerd://1.7.1\nk0s-worker-1 Ready <none> 2m15s v1.27.5+k0s 10.81.146.51 <none> Alpine Linux v3.17 5.15.83-0-virt containerd://1.7.1\n
There is one node-local load balancer pod running for each worker node:
$ kubectl -n kube-system get pod -owide -l app.kubernetes.io/managed-by=k0s,app.kubernetes.io/component=nllb\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES\nnllb-k0s-worker-0 1/1 Running 0 81s 10.81.146.198 k0s-worker-0 <none> <none>\nnllb-k0s-worker-1 1/1 Running 0 85s 10.81.146.51 k0s-worker-1 <none> <none>\n
The cluster is using node-local load balancing and is able to tolerate the outage of one controller node. Shutdown the first controller to simulate a failure condition:
$ ssh -i k0s-ssh-private-key.pem k0s@10.81.146.254 'echo \"Powering off $(hostname) ...\" && sudo poweroff'\nPowering off k0s-controller-0 ...\n
Node-local load balancing provides high availability from within the cluster, not from the outside. The generated kubeconfig file lists the first controller's IP as the Kubernetes API server address by default. As this controller is gone by now, a subsequent call to kubectl
will fail:
$ kubectl get nodes\nUnable to connect to the server: dial tcp 10.81.146.254:6443: connect: no route to host\n
Changing the server address in k0s-kubeconfig
from the first controller to another one makes the cluster accessible again. Pick one of the other controller IP addresses and put that into the kubeconfig file. The addresses are listed both in k0sctl.yaml
as well as in the output of kubectl -n default get endpoints
above.
$ ssh -i k0s-ssh-private-key.pem k0s@10.81.146.184 hostname\nk0s-controller-1\n\n$ sed -i s#https://10\\\\.81\\\\.146\\\\.254:6443#https://10.81.146.184:6443#g k0s-kubeconfig\n\n$ kubectl get nodes -owide\nNAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME\nk0s-worker-0 Ready <none> 3m35s v1.27.5+k0s 10.81.146.198 <none> Alpine Linux v3.17 5.15.83-0-virt containerd://1.7.1\nk0s-worker-1 Ready <none> 3m34s v1.27.5+k0s 10.81.146.51 <none> Alpine Linux v3.17 5.15.83-0-virt containerd://1.7.1\n\n$ kubectl -n kube-system get pods -owide -l app.kubernetes.io/managed-by=k0s,app.kubernetes.io/component=nllb\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES\nnllb-k0s-worker-0 1/1 Running 0 2m31s 10.81.146.198 k0s-worker-0 <none> <none>\nnllb-k0s-worker-1 1/1 Running 0 2m35s 10.81.146.51 k0s-worker-1 <none> <none>\n
The first controller is no longer active. Its IP address is not listed in the default/kubernetes
Endpoints resource and its k0s controller lease is orphaned:
$ kubectl -n default get endpoints\nNAME ENDPOINTS AGE\nkubernetes 10.81.146.113:6443,10.81.146.184:6443 3m56s\n\n$ kubectl -n kube-node-lease get \\\nlease/k0s-ctrl-k0s-controller-0 \\\nlease/k0s-ctrl-k0s-controller-1 \\\nlease/k0s-ctrl-k0s-controller-2 \\\nlease/k0s-endpoint-reconciler\nNAME HOLDER AGE\nk0s-ctrl-k0s-controller-0 4m47s\nk0s-ctrl-k0s-controller-1 fe45284924abb1bfce674e5a9aa8d647f17c81e53bbab17cf28288f13d5e8f97 4m28s\nk0s-ctrl-k0s-controller-2 5ab43278e63fc863b2a7f0fe1aab37316a6db40c5a3d8a17b9d35b5346e23b3d 4m19s\nk0s-endpoint-reconciler 5ab43278e63fc863b2a7f0fe1aab37316a6db40c5a3d8a17b9d35b5346e23b3d 4m47s\n
Despite that controller being unavailable, the cluster remains operational. The third controller has become the new k0s leader. Workloads will run just fine:
$ kubectl -n default run nginx --image=nginx\npod/nginx created\n\n$ kubectl -n default get pods -owide\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES\nnginx 1/1 Running 0 16s 10.244.0.5 k0s-worker-1 <none> <none>\n\n$ kubectl -n default logs nginx\n/docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration\n/docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/\n/docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh\n10-listen-on-ipv6-by-default.sh: info: Getting the checksum of /etc/nginx/conf.d/default.conf\n10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf\n/docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh\n/docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh\n/docker-entrypoint.sh: Configuration complete; ready for start up\n[notice] 1#1: using the \"epoll\" event method\n[notice] 1#1: nginx/1.23.3\n[notice] 1#1: built by gcc 10.2.1 20210110 (Debian 10.2.1-6)\n[notice] 1#1: OS: Linux 5.15.83-0-virt\n[notice] 1#1: getrlimit(RLIMIT_NOFILE): 1048576:1048576\n[notice] 1#1: start worker processes\n[notice] 1#1: start worker process 28\n
"},{"location":"podsecurity/","title":"Pod Security Standards","text":"Since Pod Security Policies have been removed in Kubernetes v1.25, Kubernetes offers Pod Security Standards \u2013 a new way to enhance cluster security.
To enable PSS in k0s you need to create an admission controller config file:
```yaml\napiVersion: apiserver.config.k8s.io/v1\nkind: AdmissionConfiguration\nplugins:\n- name: PodSecurity\n configuration:\n apiVersion: pod-security.admission.config.k8s.io/v1beta1\n kind: PodSecurityConfiguration\n # Defaults applied when a mode label is not set.\n defaults:\n enforce: \"privileged\"\n enforce-version: \"latest\"\n exemptions:\n # Don't forget to exempt namespaces or users that are responsible for deploying\n # cluster components, because they need to run privileged containers\n usernames: [\"admin\"]\n namespaces: [\"kube-system\"]\n```\n
Add these extra arguments to the k0s configuration:
```yaml\napiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nspec:\n api:\n extraArgs:\n admission-control-config-file: /path/to/admission/control/config.yaml\n```\n
"},{"location":"raspberry-pi4/","title":"Create a Raspberry Pi 4 cluster","text":""},{"location":"raspberry-pi4/#prerequisites","title":"Prerequisites","text":"This guide assumes that you use a Raspberry Pi 4 Model B computer and a sufficiently large SD card of at least 32 GB. We will be using Ubuntu Linux for this guide, although k0s should run quite fine on other 64-bit Linux distributions for the Raspberry Pi as well. Please file a Bug if you encounter any obstacles.
"},{"location":"raspberry-pi4/#set-up-the-system","title":"Set up the system","text":""},{"location":"raspberry-pi4/#prepare-sd-card-and-boot-up-the-raspberry-pi","title":"Prepare SD card and boot up the Raspberry Pi","text":"Install Ubuntu Server 22.04.1 LTS 64-bit for Raspberry Pi. Ubuntu provides a step by step guide for the installation process. They use Raspberry Pi Imager, a specialized imaging utility that you can use to write the Ubuntu image, amongst others, to your SD cards. Follow that guide to get a working installation. (You can skip part 5 of the guide, since we won't need a Desktop Environment to run k0s.)
Alternatively, you can also opt to download the Ubuntu server image for Raspberry Pi manually and write it to an SD card using a tool like dd
:
wget https://cdimage.ubuntu.com/releases/22.04.1/release/ubuntu-22.04.1-preinstalled-server-arm64+raspi.img.xz\nunxz ubuntu-22.04.1-preinstalled-server-arm64+raspi.img.xz\ndd if=ubuntu-22.04.1-preinstalled-server-arm64+raspi.img of=/dev/mmcblk0 bs=4M status=progress\n
Note: The manual process is more prone to accidental data loss than the guided one via Raspberry Pi Imager. Be sure to choose the correct device names. The previous content of the SD card will be wiped. Moreover, the partition written to the SD card needs to be resized to make the full capacity of the card available to Ubuntu. This can be achieved, for example, in this way:
growpart /dev/mmcblk0 2\nresize2fs /dev/mmcblk0p2\n
Ubuntu uses cloud-init to allow for automated customizations of the system configuration. The cloud-init configuration files are located on the boot partition of the SD card. You can mount that partition and modify those, e.g. to provision network configuration, users, authorized SSH keys, additional packages and also an automatic installation of k0s.
After you have prepared the SD card, plug it into the Raspberry Pi and boot it up. Once cloud-init finished bootstrapping the system, the default login credentials are set to user ubuntu
with password ubuntu
(which you will be prompted to change on first login).
Note: For network configuration purposes, this documentation assumes that all of your computers are connected on the same subnet.
Review k0s's required ports and protocols to ensure that your network and firewall configurations allow necessary traffic for the cluster.
Review the Ubuntu Server Networking Configuration documentation to ensure that all systems have a static IP address on the network, or that the network is providing a static DHCP lease for the nodes. If the network should be managed via cloud-init, please refer to their documentation.
"},{"location":"raspberry-pi4/#optional-provision-ssh-keys","title":"(Optional) Provision SSH keys","text":"Ubuntu Server deploys and enables OpenSSH via cloud-init by default. Confirm, though, that for whichever user you will deploy the cluster with on the build system, their SSH Key is copied to each node's root user. Before you start, the configuration should be such that the current user can run:
ssh root@${HOST}\n
Where ${HOST}
is any node and the login can succeed with no further prompts.
While having a swap file is technically optional, it can help to ease memory pressure when running memory intensive workloads or on Raspberry Pis with less than 8 GB of RAM.
To create a swap file:
fallocate -l 2G /swapfile && \\\nchmod 0600 /swapfile && \\\nmkswap /swapfile && \\\nswapon -a\n
Ensure that the usage of swap is not too aggressive by setting the sudo sysctl vm.swappiness=10
(the default is generally higher) and configuring it to be persistent in /etc/sysctl.d/*
.
Ensure that your swap is mounted after reboots by confirming that the following line exists in your /etc/fstab
configuration:
/swapfile none swap sw 0 0\n
Download a k0s release. For example:
wget -O /tmp/k0s https://github.com/k0sproject/k0s/releases/download/v1.27.5+k0s.0/k0s-v1.27.5+k0s.0-arm64 # replace version number!\nsudo install /tmp/k0s /usr/local/bin/k0s\n
\u2015 or \u2015
Use the k0s download script (as one command) to download the latest stable k0s and make it executable in /usr/bin/k0s
.
curl -sSLf https://get.k0s.sh | sudo sh\n
At this point you can run k0s
:
ubuntu@ubuntu:~$ k0s version\nv1.27.5+k0s.0\n
To check if k0s's system requirements and external runtime dependencies are fulfilled by your current setup, you can invoke k0s sysinfo
:
ubuntu@ubuntu:~$ k0s sysinfo\nMachine ID: \"d84cde1f38844d1425dc04c454c5aa95e41fb11115bbb141c016f4cd3dea4f51\" (from machine) (pass)\nTotal memory: 3.7 GiB (pass)\nDisk space available for /var/lib/k0s: 24.3 GiB (pass)\nOperating system: Linux (pass)\n Linux kernel release: 5.15.0-1013-raspi (pass)\n Max. file descriptors per process: current: 1024 / max: 1048576 (warning: < 65536)\n Executable in path: modprobe: /usr/sbin/modprobe (pass)\n /proc file system: mounted (0x9fa0) (pass)\n Control Groups: version 2 (pass)\n cgroup controller \"cpu\": available (pass)\n cgroup controller \"cpuacct\": available (via cpu in version 2) (pass)\n cgroup controller \"cpuset\": available (pass)\n cgroup controller \"memory\": available (pass)\n cgroup controller \"devices\": available (assumed) (pass)\n cgroup controller \"freezer\": available (assumed) (pass)\n cgroup controller \"pids\": available (pass)\n cgroup controller \"hugetlb\": available (pass)\n cgroup controller \"blkio\": available (via io in version 2) (pass)\n CONFIG_CGROUPS: Control Group support: built-in (pass)\n CONFIG_CGROUP_FREEZER: Freezer cgroup subsystem: built-in (pass)\n CONFIG_CGROUP_PIDS: PIDs cgroup subsystem: built-in (pass)\n CONFIG_CGROUP_DEVICE: Device controller for cgroups: built-in (pass)\n CONFIG_CPUSETS: Cpuset support: built-in (pass)\n CONFIG_CGROUP_CPUACCT: Simple CPU accounting cgroup subsystem: built-in (pass)\n CONFIG_MEMCG: Memory Resource Controller for Control Groups: built-in (pass)\n CONFIG_CGROUP_HUGETLB: HugeTLB Resource Controller for Control Groups: built-in (pass)\n CONFIG_CGROUP_SCHED: Group CPU scheduler: built-in (pass)\n CONFIG_FAIR_GROUP_SCHED: Group scheduling for SCHED_OTHER: built-in (pass)\n CONFIG_CFS_BANDWIDTH: CPU bandwidth provisioning for FAIR_GROUP_SCHED: built-in (pass)\n CONFIG_BLK_CGROUP: Block IO controller: built-in (pass)\n CONFIG_NAMESPACES: Namespaces support: built-in (pass)\n CONFIG_UTS_NS: UTS namespace: built-in (pass)\n CONFIG_IPC_NS: IPC namespace: built-in (pass)\n CONFIG_PID_NS: PID namespace: built-in (pass)\n CONFIG_NET_NS: Network namespace: built-in (pass)\n CONFIG_NET: Networking support: built-in (pass)\n CONFIG_INET: TCP/IP networking: built-in (pass)\n CONFIG_IPV6: The IPv6 protocol: built-in (pass)\n CONFIG_NETFILTER: Network packet filtering framework (Netfilter): built-in (pass)\n CONFIG_NETFILTER_ADVANCED: Advanced netfilter configuration: built-in (pass)\n CONFIG_NETFILTER_XTABLES: Netfilter Xtables support: module (pass)\n CONFIG_NETFILTER_XT_TARGET_REDIRECT: REDIRECT target support: module (pass)\n CONFIG_NETFILTER_XT_MATCH_COMMENT: \"comment\" match support: module (pass)\n CONFIG_NETFILTER_XT_MARK: nfmark target and match support: module (pass)\n CONFIG_NETFILTER_XT_SET: set target and match support: module (pass)\n CONFIG_NETFILTER_XT_TARGET_MASQUERADE: MASQUERADE target support: module (pass)\n CONFIG_NETFILTER_XT_NAT: \"SNAT and DNAT\" targets support: module (pass)\n CONFIG_NETFILTER_XT_MATCH_ADDRTYPE: \"addrtype\" address type match support: module (pass)\n CONFIG_NETFILTER_XT_MATCH_CONNTRACK: \"conntrack\" connection tracking match support: module (pass)\n CONFIG_NETFILTER_XT_MATCH_MULTIPORT: \"multiport\" Multiple port match support: module (pass)\n CONFIG_NETFILTER_XT_MATCH_RECENT: \"recent\" match support: module (pass)\n CONFIG_NETFILTER_XT_MATCH_STATISTIC: \"statistic\" match support: module (pass)\n CONFIG_NETFILTER_NETLINK: module (pass)\n CONFIG_NF_CONNTRACK: Netfilter connection tracking support: module (pass)\n CONFIG_NF_NAT: module (pass)\n CONFIG_IP_SET: IP set support: module (pass)\n CONFIG_IP_SET_HASH_IP: hash:ip set support: module (pass)\n CONFIG_IP_SET_HASH_NET: hash:net set support: module (pass)\n CONFIG_IP_VS: IP virtual server support: module (pass)\n CONFIG_IP_VS_NFCT: Netfilter connection tracking: built-in (pass)\n CONFIG_NF_CONNTRACK_IPV4: IPv4 connetion tracking support (required for NAT): unknown (warning)\n CONFIG_NF_REJECT_IPV4: IPv4 packet rejection: module (pass)\n CONFIG_NF_NAT_IPV4: IPv4 NAT: unknown (warning)\n CONFIG_IP_NF_IPTABLES: IP tables support: module (pass)\n CONFIG_IP_NF_FILTER: Packet filtering: module (pass)\n CONFIG_IP_NF_TARGET_REJECT: REJECT target support: module (pass)\n CONFIG_IP_NF_NAT: iptables NAT support: module (pass)\n CONFIG_IP_NF_MANGLE: Packet mangling: module (pass)\n CONFIG_NF_DEFRAG_IPV4: module (pass)\n CONFIG_NF_CONNTRACK_IPV6: IPv6 connetion tracking support (required for NAT): unknown (warning)\n CONFIG_NF_NAT_IPV6: IPv6 NAT: unknown (warning)\n CONFIG_IP6_NF_IPTABLES: IP6 tables support: module (pass)\n CONFIG_IP6_NF_FILTER: Packet filtering: module (pass)\n CONFIG_IP6_NF_MANGLE: Packet mangling: module (pass)\n CONFIG_IP6_NF_NAT: ip6tables NAT support: module (pass)\n CONFIG_NF_DEFRAG_IPV6: module (pass)\n CONFIG_BRIDGE: 802.1d Ethernet Bridging: module (pass)\n CONFIG_LLC: module (pass)\n CONFIG_STP: module (pass)\n CONFIG_EXT4_FS: The Extended 4 (ext4) filesystem: built-in (pass)\n CONFIG_PROC_FS: /proc file system support: built-in (pass)\n
"},{"location":"raspberry-pi4/#deploy-a-node","title":"Deploy a node","text":"Each node can now serve as a control plane node or worker node or both.
"},{"location":"raspberry-pi4/#as-single-node","title":"As single node","text":"This is a self-contained single node setup which runs both control plane components and worker components. If you don't plan join any more nodes into the cluster, this is for you.
Install the k0scontroller
service:
ubuntu@ubuntu:~$ sudo k0s install controller --single\nubuntu@ubuntu:~$ sudo systemctl status k0scontroller.service\n\u25cb k0scontroller.service - k0s - Zero Friction Kubernetes\n Loaded: loaded (/etc/systemd/system/k0scontroller.service; enabled; vendor preset: enabled)\n Active: inactive (dead)\n Docs: https://docs.k0sproject.io\n
Start it:
ubuntu@ubuntu:~$ sudo systemctl start k0scontroller.service\nubuntu@ubuntu:~$ systemctl status k0scontroller.service\n\u25cf k0scontroller.service - k0s - Zero Friction Kubernetes\n Loaded: loaded (/etc/systemd/system/k0scontroller.service; enabled; vendor preset: enabled)\n Active: active (running) since Thu 2022-08-18 09:56:02 UTC; 2s ago\n Docs: https://docs.k0sproject.io\n Main PID: 2720 (k0s)\n Tasks: 10\n Memory: 24.7M\n CPU: 4.654s\n CGroup: /system.slice/k0scontroller.service\n \u2514\u25002720 /usr/local/bin/k0s controller --single=true\n\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] received CSR\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] generating key: rsa-2048\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] received CSR\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] generating key: rsa-2048\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] received CSR\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] generating key: rsa-2048\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] encoded CSR\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] signed certificate with serial number 6275509116227039894094374442676315636193163621\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] encoded CSR\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] signed certificate with serial number 336800507542010809697469355930007636411790073226\n
When the cluster is up, try to have a look:
ubuntu@ubuntu:~$ sudo k0s kc get nodes -owide\nNAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME\nubuntu Ready control-plane 4m41s v1.27.5+k0s 10.152.56.54 <none> Ubuntu 22.04.1 LTS 5.15.0-1013-raspi containerd://1.7.2\nubuntu@ubuntu:~$ sudo k0s kc get pod -owide -A\nNAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES\nkube-system kube-proxy-kkv2l 1/1 Running 0 4m44s 10.152.56.54 ubuntu <none> <none>\nkube-system kube-router-vf2pv 1/1 Running 0 4m44s 10.152.56.54 ubuntu <none> <none>\nkube-system coredns-88b745646-wd4mp 1/1 Running 0 5m10s 10.244.0.2 ubuntu <none> <none>\nkube-system metrics-server-7d7c4887f4-ssk49 1/1 Running 0 5m6s 10.244.0.3 ubuntu <none> <none>\n
Overall, the single k0s node uses less than 1 GiB of RAM:
ubuntu@ubuntu:~$ free -h\n total used free shared buff/cache available\nMem: 3.7Gi 715Mi 1.3Gi 3.0Mi 1.7Gi 2.8Gi\nSwap: 0B 0B 0B\n
"},{"location":"raspberry-pi4/#as-a-controller-node","title":"As a controller node","text":"This will install k0s as a single non-HA controller. It won't be able to run any workloads, so you need to connect more workers to it.
Install the k0scontroller
service. Note that we're not specifying any flags:
ubuntu@ubuntu:~$ sudo k0s install controller\nubuntu@ubuntu:~$ systemctl status k0scontroller.service\n\u25cb k0scontroller.service - k0s - Zero Friction Kubernetes\n Loaded: loaded (/etc/systemd/system/k0scontroller.service; enabled; vendor preset: enabled)\n Active: inactive (dead)\n Docs: https://docs.k0sproject.io\n
Start it:
ubuntu@ubuntu:~$ sudo systemctl start k0scontroller.service\nubuntu@ubuntu:~$ systemctl status k0scontroller.service\n\u25cf k0scontroller.service - k0s - Zero Friction Kubernetes\n Loaded: loaded (/etc/systemd/system/k0scontroller.service; enabled; vendor preset: enabled)\n Active: active (running) since Thu 2022-08-18 10:31:07 UTC; 3s ago\n Docs: https://docs.k0sproject.io\n Main PID: 1176 (k0s)\n Tasks: 10\n Memory: 30.2M\n CPU: 8.936s\n CGroup: /system.slice/k0scontroller.service\n \u2514\u25001176 /usr/local/bin/k0s controller\n\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] signed certificate with serial number 723202396395786987172578079268287418983457689579\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] encoded CSR\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] signed certificate with serial number 36297085497443583023060005045470362249819432477\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] encoded CSR\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] encoded CSR\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] signed certificate with serial number 728910847354665355109188021924183608444435075827\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] generate received request\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] received CSR\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] generating key: rsa-2048\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] signed certificate with serial number 718948898553094584370065610752227487244528071083\n
As soon as the controller is up, we can try to inspect the API as we did for the single node:
ubuntu@ubuntu:~$ sudo k0s kc get nodes -owide\nNo resources found\nubuntu@ubuntu:~$ sudo k0s kc get pod -owide -A\nNAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES\nkube-system coredns-88b745646-6tpwm 0/1 Pending 0 29s <none> <none> <none> <none>\nkube-system metrics-server-7d7c4887f4-9k5k5 0/1 Pending 0 24s <none> <none> <none> <none>\n
As we see, there are no nodes and two pending pods. A control plane without workers. The memory consumption is below the single node controller, but not much:
ubuntu@ubuntu:~$ free -h\n total used free shared buff/cache available\nMem: 3.7Gi 678Mi 2.3Gi 3.0Mi 758Mi 2.9Gi\nSwap: 0B 0B 0B\n
This controller runs a full-fledged control plane, backed by etcd, as opposed to the lightweight kine based one from the single node example. For the latter, k0s doesn't support joining new nodes.
More nodes can be added by creating join tokens. To add a worker node, create a token for it:
ubuntu@ubuntu:~$ sudo k0s token create --role worker\nH4sIAAAAAAAC/2yV0Y6jPBKF7/MUeYGZ30DonUTai5+Ak5DgbhuXHXwHmAnBhtAJHdKs9t1XnZmRdqW9K1cdfceyrDqzvD+L6no7X7rV/O7MSvtxG6rrbTX7Nv9dr2bz+Xx+q6736rqa18PQ31Z//eWg747vfvdfvvuL1cti4T1VZXUdzj/PZT5U3/KPob5cz8PnN50P+Wp+SNFwSJ01Ax3zcxAyEUMKKqYIA3vO0LA2TpwCC1hEQipFrxD2UogDhawQobWJY297jxHBCdbS70hIvWKTOMWGBcwhgUaMSegPhdPH+VY13GDGYNxTiwONdMSEJtTiLeVYMMALDn6dOKqXtt5r0WfQPpqK43cpWKBAecnWktxEiAvWVZEDghPCorhmXTlWp/7PTPz3jEPcVZF6p0KsFfIlNZiIiB11iFUhlJ+1jkxwn/EjU4kRnnI1zsEJkkiH4OHt2pI4a0gEINZUYEEhQinEkUb4qU0Rvn+9CQD5UKJ0dKfG1NVZ2dWCcfCkHFDKycjbYZuGIsk5DngY7Svcn3N5mdIGm1yylkU+Srcxyiy7l50ZRUTvGqtcNuK9QAvEjcihu4yJh/sipC5xy4nBssut9UrcB6nENz72JnfxKLBmxAseZftgyhHvfLIjaeK+PNYX2tmwkKQrGjPlSFAI2VRKmyZmidjnsGCefRfe6Vp4p6veBk0FCtaN/uBu7JAp9kS6nFKDCQvxVUXYsGPiFji+VU05UtFvdLt8oVK8JRE+5m6fZfbvBcGa8QhH0pzG6vxjLEOSEJvtZdRvhNSywNmCejEihiRMYp/IH34utZc6GpdwWwgbc9Hhh5Q+4ushLeXJEZ6t85YBCLxTTfwmGhyWW+HC2B+AE1DnYdK4l9pYJ/P0jhn1mrsq1MbHKYqcRO6cyuAQQG/kRlsq2aOK/HVp2FZKDVRqQg0OmNuz3MTB2jgBiXSQCGHYVmN6XnoAItDIrmnbBxDFHbdqB8ZZU5ktGMRAgQUApzuH3chQ9BCSRcrBR2riVCHxBt5ln3kYlXKxKKI6JEizV4wn3tWyMMk1N/iVtvpayvqaQ+nrKfj6gxMzOOCIBF/+cBQv4JG4AnATe0GZjUNy6gcWkkG5CJGpntKGTnzb472XfeqtekuQzqsWua+bpaw2j9d0ih02YZauh5y4/v7gqZzY2lYmVuWkahFqzF0cri1jbPu3n4d6nVp10G4fVw3OZbp8VabfaQfvtWN9zYNOdfVYmIWjz4PMzOOFmv5Nb3u39CgqXdUCth4xyxrwaQ8Oc3On9xIet3mHmewCj7kJgmP/pr3os5i0oLx+1+4yyj1mcwuTmDIko50DpndhWwNxHwcQQSuEGFljI0Z7lYJ1EhgnguJ3PukPYXr3VbJYOCdE5ECSFpBqgrDEpzFzRSfFxSUgIrJhUQZxW5jazxpCk445CfK3RMbHdcOGtL2N0O7uAuyCId8A0izZ4B2EseQb55EgwVX7+CyjmB9c1eSTVQXeLWiDj4CjUW7ZXXl9nR7pqDYKUXnZqyZ4r46x98bR/vduxtzQE0UiFZHdpEACEcFzLx/o5Z+z+bzL22o1N+g2Ky/dUD2GXznxq/6VE39C46n6anzcnqePorLV8K24XIbbcM37/6V9XK9VN3z7Q3o2zbnTq/n60v08n2b9tfpZXauurG6r+b/+PfuiPs1/Q/4P/mn8vMJwMVW3mrvL84/lj+8N8ia/uZ/Lf2izWFb57D8BAAD//zANvmsEBwAA\n
Save the join token for subsequent steps.
"},{"location":"raspberry-pi4/#as-a-worker-node","title":"As a worker node","text":"To join an existing k0s cluster, create the join token file for the worker (where $TOKEN_CONTENT
is one of the join tokens created in the control plane setup):
sudo sh -c 'mkdir -p /var/lib/k0s/ && umask 077 && echo \"$TOKEN_CONTENT\" > /var/lib/k0s/join-token'\n
After that, install the k0sworker
service:
ubuntu@ubuntu:~$ sudo k0s install worker --token-file /var/lib/k0s/join-token\nubuntu@ubuntu:~$ systemctl status k0sworker.service\n\u25cb k0sworker.service - k0s - Zero Friction Kubernetes\n Loaded: loaded (/etc/systemd/system/k0sworker.service; enabled; vendor preset: enabled)\n Active: inactive (dead)\n Docs: https://docs.k0sproject.io\n
Start the service:
ubuntu@ubuntu:~$ sudo systemctl start k0sworker.service\nubuntu@ubuntu:~$ systemctl status k0sworker.service\n\u25cf k0sworker.service - k0s - Zero Friction Kubernetes\n Loaded: loaded (/etc/systemd/system/k0sworker.service; enabled; vendor preset: enabled)\n Active: active (running) since Thu 2022-08-18 13:48:58 UTC; 2s ago\n Docs: https://docs.k0sproject.io\n Main PID: 1631 (k0s)\n Tasks: 22\n Memory: 181.7M\n CPU: 4.010s\n CGroup: /system.slice/k0sworker.service\n \u251c\u25001631 /usr/local/bin/k0s worker --token-file=/var/lib/k0s/join-token\n \u2514\u25001643 /var/lib/k0s/bin/containerd --root=/var/lib/k0s/containerd --state=/run/k0s/containerd --address=/run/k0s/containerd.sock --log-level=info --config=/etc/k0s/containerd.toml\n\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"Starting to supervise\" component=containerd\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"Started successfully, go nuts pid 1643\" component=containerd\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"starting OCIBundleReconciler\"\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"starting Kubelet\"\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"Starting kubelet\"\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"detected 127.0.0.53 nameserver, assuming systemd-resolved, so using resolv.conf: /run/systemd/resolve/resolv.conf\"\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"Starting to supervise\" component=kubelet\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"Started successfully, go nuts pid 1648\" component=kubelet\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"starting Status\"\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"starting Autopilot\"\n
As this is a worker node, we cannot access the Kubernetes API via the builtin k0s kc
subcommand, but we can check the k0s API instead:
ubuntu@ubuntu:~$ sudo k0s status\nVersion: v1.27.5+k0s.0\nProcess ID: 1631\nRole: worker\nWorkloads: true\nSingleNode: false\n
The memory requirements are also pretty low:
ubuntu@ubuntu:~$ free -h\n total used free shared buff/cache available\nMem: 3.7Gi 336Mi 2.1Gi 3.0Mi 1.2Gi 3.2Gi\nSwap: 0B 0B 0B\n
"},{"location":"raspberry-pi4/#connect-to-the-cluster","title":"Connect to the cluster","text":"On a controller node, generate a new raspi-cluster-master
user with admin rights and get a kubeconfig for it:
ubuntu@ubuntu:~$ sudo k0s kc create clusterrolebinding raspi-cluster-master-admin --clusterrole=admin --user=raspi-cluster-master\nclusterrolebinding.rbac.authorization.k8s.io/raspi-cluster-master-admin created\nubuntu@ubuntu:~$ sudo k0s kubeconfig create --groups system:masters raspi-cluster-master\n\napiVersion: v1\nclusters:\n- cluster:\n server: https://10.152.56.54:6443\n certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURBRENDQWVpZ0F3SUJBZ0lVT2RSVzdWdm83UWR5dmdFZHRUK1V3WDN2YXdvd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0dERVdNQlFHQTFVRUF4TU5hM1ZpWlhKdVpYUmxjeTFqWVRBZUZ3MHlNakE0TVRneE5EQTFNREJhRncwegpNakE0TVRVeE5EQTFNREJhTUJneEZqQVVCZ05WQkFNVERXdDFZbVZ5Ym1WMFpYTXRZMkV3Z2dFaU1BMEdDU3FHClNJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUURsdy8wRFJtcG1xRjVnVElmN1o5bElRN0RFdUp6WDJLN1MKcWNvYk5oallFanBqbnBDaXFYOSt5T1R2cGgyUlRKN2tvaGkvUGxrYm5oM2pkeVQ3NWxSMGowSkV1elRMaUdJcApoR2pqc3htek5RRWVwb210R0JwZXNGeUE3NmxTNVp6WVJtT0lFQVgwb0liWjBZazhuU3pQaXBsWDMwcTFETEhGCkVIcSsyZG9vVXRIb09EaEdmWFRJTUJsclZCV3dCV3cxbmdnN0dKb01TN2tHblpYaUw2NFBiRDg5NmtjYXo0a28KTXhhZGc1ZmZQNStBV3JIVHhKV1d2YjNCMjEyOWx3R3FiOHhMTCt1cnVISHVjNEh4em9OVUt1WUlXc2lvQWp4YgphdDh6M1QwV2RnSit2VithWWlRNFlLeEVFdFB4cEMvUHk0czU0UHF4RzVZa0hiMDczMEUxQWdNQkFBR2pRakJBCk1BNEdBMVVkRHdFQi93UUVBd0lCQmpBUEJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJTd2p4STIKRUxVNCtNZUtwT0JNQUNnZDdKU1QxVEFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBQ3k3dHFFMk5WT3E0Z0I1Ngp2clVZMFU0SWp1c0dUN0UzQ2xqSUtQODk2Mm9xdlpvU0NWb2U5YS9UQTR6ZXYrSXJwaTZ1QXFxc3RmT3JFcDJ4CmVwMWdYZHQrbG5nV0xlbXdWdEVOZ0xvSnBTM09Vc3N1ai9XcmJwSVU4M04xWVJTRzdzU21KdXhpa3pnVUhiUk8KZ01SLzIxSDFESzJFdmdQY2pHWXlGbUQzSXQzSjVNcnNiUHZTRG4rUzdWWWF0eWhIMUo4dmwxVDFpbzRWWjRTNgpJRFlaV05JOU10TUpqcGxXL01pRnlwTUhFU1E3UEhHeHpGVExoWFplS0pKSlRPYXFha1AxM3J1WFByVHVDQkl4CkFCSWQraU9qdGhSU3ZxbTFocGtHcmY4Rm9PdG1PYXZmazdDdnNJTWdUV2pqd2JJZWZIRU8zUmVBMzZWZWV3bXoKOFJHVUtBPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=\n name: k0s\ncontexts:\n- context:\n cluster: k0s\n user: raspi-cluster-master\n name: k0s\ncurrent-context: k0s\nkind: Config\npreferences: {}\nusers:\n- name: raspi-cluster-master\n user:\n client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURYVENDQWtXZ0F3SUJBZ0lVV0ZZNkZ4cCtUYnhxQUxTVjM0REVMb0dEc3Q0d0RRWUpLb1pJaHZjTkFRRUwKQlFBd0dERVdNQlFHQTFVRUF4TU5hM1ZpWlhKdVpYUmxjeTFqWVRBZUZ3MHlNakE0TVRneE5ERTRNREJhRncweQpNekE0TVRneE5ERTRNREJhTURneEZ6QVZCZ05WQkFvVERuTjVjM1JsYlRwdFlYTjBaWEp6TVIwd0d3WURWUVFECkV4UnlZWE53YVMxamJIVnpkR1Z5TFcxaGMzUmxjakNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0MKQVFvQ2dnRUJBTGJNalI5eHA1dDJzank1S0dEQnQ2dWl3QU4vaEhwZkFUNXJrZTFRblc2eFlZeDYzR2JBTXYrRQpjWmEyUEdPempQeVVTZThVdWp4ZnR0L1JWSTJRVkVIRGlJZ1ZDNk1tUUFmTm1WVlpKOHBFaTM2dGJZYUVxN3dxClhxYmJBQ0F0ZGtwNTJ0Y0RLVU9sRS9SV0tUSjN4bXUvRmh0OTIrRDdtM1RrZTE0TkJ5a1hvakk1a2xVWU9ySEMKVTN3V210eXlIUFpDMFBPdWpXSE5yeS9wOXFjZzRreWNDN0NzUVZqMWoxY2JwdXRpWllvRHNHV3piS0RTbExRZApyYnUwRnRVZVpUQzVPN2NuTk5tMU1EZldubXhlekw4L2N5dkJCYnRmMjhmcERFeEhMT2dTY2ZZUlZwUllPMzdvCk5yUjljMGNaZE9oZW5YVnlQcU1WVVlSNkQxMlRrY0VDQXdFQUFhTi9NSDB3RGdZRFZSMFBBUUgvQkFRREFnV2cKTUIwR0ExVWRKUVFXTUJRR0NDc0dBUVVGQndNQkJnZ3JCZ0VGQlFjREFqQU1CZ05WSFJNQkFmOEVBakFBTUIwRwpBMVVkRGdRV0JCUitqQTlGNm1jc25ob2NtMnd0dFNYY2tCaUpoakFmQmdOVkhTTUVHREFXZ0JTd2p4STJFTFU0CitNZUtwT0JNQUNnZDdKU1QxVEFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBY2RRV3N4OUpHOUIxckxVc2Y1QzgKd1BzTkhkZURYeG1idm4zbXN3aFdVMEZHU1pjWjlkMTYzeXhEWnA4QlNzNWFjNnZqcU1lWlFyRThDUXdXYTlxVAowZVJXcTlFODYzcS9VcFVNN3lPM1BnMHd4RWtQSTVuSjRkM0o3MHA3Zk4zenpzMUJzU0h6Q2hzOWR4dE5XaVp5CnNINzdhbG9NanA0cXBEVWRyVWcyT0d4RWhRdzJIaXE3ZEprQm80a3hoWmhBc3lWTDdZRng0SDY3WkIzSjY4V3QKdTdiWnRmUVJZV3ZPUE9oS0pFdmlLVXptNDJBUlZXTDdhZHVESTBBNmpxbXhkTGNxKzlNWVlaNm1CT0NWakx1WgoybDlJSVI2NkdjOUdpdC9kSFdwbTVZbmozeW8xcUU0UVg4ZmVUQTczUlU5cmFIdkNpTGdVbFRaVUNGa3JNL0NtCndBPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=\n client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdHN5TkgzR25tM2F5UExrb1lNRzNxNkxBQTMrRWVsOEJQbXVSN1ZDZGJyRmhqSHJjClpzQXkvNFJ4bHJZOFk3T00vSlJKN3hTNlBGKzIzOUZValpCVVFjT0lpQlVMb3laQUI4MlpWVmtueWtTTGZxMXQKaG9TcnZDcGVwdHNBSUMxMlNubmExd01wUTZVVDlGWXBNbmZHYTc4V0czM2I0UHViZE9SN1hnMEhLUmVpTWptUwpWUmc2c2NKVGZCYWEzTEljOWtMUTg2Nk5ZYzJ2TCtuMnB5RGlUSndMc0t4QldQV1BWeHVtNjJKbGlnT3daYk5zCm9OS1V0QjJ0dTdRVzFSNWxNTGs3dHljMDJiVXdOOWFlYkY3TXZ6OXpLOEVGdTEvYngra01URWNzNkJKeDloRlcKbEZnN2Z1ZzJ0SDF6UnhsMDZGNmRkWEkrb3hWUmhIb1BYWk9Sd1FJREFRQUJBb0lCQUFpYytzbFFnYVZCb29SWgo5UjBhQTUyQ3ZhbHNpTUY3V0lPb2JlZlF0SnBTb1ZZTk0vVmplUU94S2VrQURUaGxiVzg1VFlLR1o0QVF3bjBwClQrS2J1bHllNmYvL2ZkemlJSUk5bmN2M3QzaEFZcEpGZWJPczdLcWhGSFNvUFFsSEd4dkhRaGgvZmFKQ1ZQNWUKVVBLZjBpbWhoMWtrUlFnRTB2NWZCYkVZekEyVGl4bThJSGtQUkdmZWN4WmF1VHpBS2VLR0hjTFpDem8xRHhlSgp3bHpEUW9YWDdHQnY5MGxqR1pndENXcFEyRUxaZ1NwdW0rZ0crekg1WFNXZXgwMzJ4d0NhbkdDdGcyRmxHd2V2Ck9PaG8zSjNrRWVJR1MzSzFJY24rcU9hMjRGZmgvcmRsWXFSdStWeEZ4ZkZqWGxaUjdjZkF4Mnc1Z3NmWm9CRXIKUE1oMTdVRUNnWUVBejZiTDc4RWsvZU1jczF6aWdaVVpZcE5qa2FuWHlsS3NUUWM1dU1pRmNORFdObFkxdlQzVQprOHE5cHVLbnBZRVlTTGVVTS9tSWk5TVp6bmZjSmJSL0hJSG9YVjFMQVJ2blQ0djN3T0JsaDc5ajdKUjBpOW1OClYrR0Q1SlNPUmZCVmYxVlJHRXN6d3ZhOVJsS2lMZ0JVM2tKeWN2Q09jYm5aeFltSXRrbDhDbXNDZ1lFQTRWeG4KZTY2QURIYmR3T0plbEFSKytkVHh5eVYyRjY1SEZDNldPQVh2RVRucGRudnRRUUprWWhNYzM1Y2gvMldmZDBWYQpZb3lGZE9kRThKZSsvcWxuS1pBc3BHRC9yZHp2VmFteHQ4WXdrQXU5Q1diZWw2VENPYkZOQ2hjK1NUbmRqN0duCmlSUHprM1JYMnBEVi9OaW5FVFA0TEJnTHJQYkxlSVAwSzZ4bjk0TUNnWUVBeXZGMmNVendUVjRRNTgrSTVDS0gKVzhzMnpkOFRzbjVZUFRRcG1zb0hlTG55RWNyeDNKRTRXSFVXSTZ0ek01TFczQUxuU21DL3JnQlVRWER0Yk1CYQpWczh6L1VPM2tVN25JOXhrK0ZHWGlUTnBnb2VZM0RGMExZYVBNL0JvbUR3S0kxZUwyVlZ1TWthWnQ4ZjlEejV0CnM0ZDNlWlJYY3hpem1KY1JVUzdDbHg4Q2dZQk45Vmc2K2RlRCtFNm4zZWNYenlKWnJHZGtmZllISlJ1amlLWWcKaFRUNVFZNVlsWEF5Yi9CbjJQTEJDaGdSc0lia2pKSkN5eGVUcERrOS9WQnQ2ZzRzMjVvRjF5UTdjZFU5VGZHVApnRFRtYjVrYU9vSy85SmZYdTFUS0s5WTVJSkpibGZvOXVqQWxqemFnL2o5NE16NC8vamxZajR6aWJaRmZoRTRnCkdZanhud0tCZ0U1cFIwMlVCa1hYL3IvdjRqck52enNDSjR5V3U2aWtpem00UmJKUXJVdEVNd1Y3a2JjNEs0VFIKM2s1blo1M1J4OGhjYTlMbXREcDJIRWo2MlBpL2pMR0JTN0NhOCtQcStxNjZwWWFZTDAwWnc4UGI3OVMrUmpzQQpONkNuQWg1dDFYeDhVMTIvWm9JcjBpOWZDaERuNlBqVEM0MVh5M1EwWWd6TW5jYXMyNVBiCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==\n
Using the above kubeconfig, you can now access and use the cluster:
ubuntu@ubuntu:~$ KUBECONFIG=/path/to/kubeconfig kubectl get nodes,deployments,pods -owide -A\nNAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME\nnode/ubuntu Ready <none> 5m1s v1.27.5+k0s 10.152.56.54 <none> Ubuntu 22.04.1 LTS 5.15.0-1013-raspi containerd://1.7.2\n\nNAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR\nkube-system deployment.apps/coredns 1/1 1 1 33m coredns registry.k8s.io/coredns/coredns:v1.7.0 k8s-app=kube-dns\nkube-system deployment.apps/metrics-server 1/1 1 1 33m metrics-server registry.k8s.io/metrics-server/metrics-server:v0.6.4 k8s-app=metrics-server\n\nNAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES\nkube-system pod/coredns-88b745646-pkk5w 1/1 Running 0 33m 10.244.0.5 ubuntu <none> <none>\nkube-system pod/konnectivity-agent-h4nfj 1/1 Running 0 5m1s 10.244.0.6 ubuntu <none> <none>\nkube-system pod/kube-proxy-qcgzs 1/1 Running 0 5m1s 10.152.56.54 ubuntu <none> <none>\nkube-system pod/kube-router-6lrht 1/1 Running 0 5m1s 10.152.56.54 ubuntu <none> <none>\nkube-system pod/metrics-server-7d7c4887f4-wwbkk 1/1 Running 0 33m 10.244.0.4 ubuntu <none> <none>\n
"},{"location":"reinstall-k0sctl/","title":"Reinstall a node","text":"k0sctl
currently does not support changing all the configuration of containerd (state
, root
) on the fly.
For example, in order to move containerd's root
directory to a new partition/drive, you have to provide --data-dir /new/drive
in your k0sctl installFlags
for each (worker) node. --data-dir
is an option of k0s
and then added to the service unit.
The following is an example of that:
# spec.hosts[*].installFlags\n- role: worker\ninstallFlags:\n- --profile flatcar\n- --enable-cloud-provider\n- --data-dir /new/drive\n- --kubelet-extra-args=\"--cloud-provider=external\"\n
However, the installFlags
are only used when the node is installed.
Drain the node:
kubectl drain node.hostname\n
Access your node (e.g. via ssh) to stop and reset k0s:
sudo k0s stop\nsudo k0s reset\n
Reboot the node (for good measure):
sudo systemctl reboot\n
Once the node is available again, run k0sctl apply
to integrate it into your cluster and uncordon the node to allow pods to be scheduled:
k0sctl apply -c config.yaml\nkubectl uncordon node.hostname\n
"},{"location":"releases/","title":"Releases","text":"This page describes how we release and support the k0s project. Mirantis Inc. can also provide commercial support for k0s.
"},{"location":"releases/#upstream-kubernetes-release-support-cycle","title":"Upstream Kubernetes release & support cycle","text":"This release and support cycle is followed for ALL new minor releases. A minor release can be e.g. 1.25, 1.26 and so on. What this means in practice is that every 4 months there is a new minor release published.
After a minor release is published, the upstream community is maintaining it for 14 months. Maintenance in this case means that upstream Kubernetes provides bug fixes, CVE mitigations and such for 14 months per minor release.
"},{"location":"releases/#k0s-release-and-support-model","title":"k0s release and support model","text":"Starting from the k0s 1.21, k0s started following the Kubernetes project's release and support model.
k0s project follows closely the upstream Kubernetes release cycle. The only difference to upstream Kubernetes release / maintenance schedule is that our initial release date is always a few weeks behind the upstream Kubernetes version release date as we are building our version of k0s from the officially released version of Kubernetes and need time for testing the final version before shipping.
Given the fact that upstream Kubernetes provides support and patch releases for a minor version for roughly 14 months, it means that k0s will follow this same model. Each minor release is maintained for roughly 14 months since its initial release.
k0s project will typically include patches and fixes included in a Kubernetes upstream patch release for the fixes needed in k0s own codebase. For example, if a bug is identified in 1.26 series k0s project will create and ship a fix for it with the next upstream Kubernetes 1.26.x release. In rare cases where a critical bug is identified we may also ship \u201cout of band\u201d patches. Such out-of-band release would be identified in the version string suffix. For example a normal release following Kubernetes upstream would be 1.26.3+k0s.0 whereas a critical out-of-band patch would be identified as 1.26.3+k0s.1.
"},{"location":"releases/#new-features-and-enhancements","title":"New features and enhancements","text":"The biggest new k0s features will typically only be delivered on top of the latest Kubernetes version, but smaller enhancements can be included in older release tracks as well.
"},{"location":"releases/#version-string","title":"Version string","text":"The k0s version string consists of the Kubernetes version and the k0s version. For example:
The Kubernetes version (1.27.5) is the first part, and the last part (k0s.0) reflects the k0s version, which is built on top of the certain Kubernetes version.
"},{"location":"remove_controller/","title":"Remove or replace a controller","text":"You can manually remove or replace a controller from a multi-node k0s cluster (>=3 controllers) without downtime. However, you have to maintain quorum on Etcd while doing so.
"},{"location":"remove_controller/#remove-a-controller","title":"Remove a controller","text":"If your controller is also a worker (k0s controller --enable-worker
), you first have to delete the controller from Kubernetes itself. To do so, run the following commands from the controller:
# Remove the containers from the node and cordon it\nk0s kubectl drain --ignore-daemonsets --delete-emptydir-data <controller>\n# Delete the node from the cluster\nk0s kubectl delete node <controller>\n
Then you need to remove it from the Etcd cluster. For example, if you want to remove controller01
from a cluster with 3 controllers:
# First, list the Etcd members\nk0s etcd member-list\n{\"members\":{\"controller01\":\"<PEER_ADDRESS1>\", \"controller02\": \"<PEER_ADDRESS2>\", \"controller03\": \"<PEER_ADDRESS3>\"}}\n# Then, remove the controller01 using its peer address\nk0s etcd leave --peer-address \"<PEER_ADDRESS1>\"\n
The controller is now removed from the cluster. To reset k0s on the machine, run the following commands:
k0s stop\nk0s reset\nreboot\n
"},{"location":"remove_controller/#replace-a-controller","title":"Replace a controller","text":"To replace a controller, you first remove the old controller (like described above) then follow the manual installation procedure to add the new one.
"},{"location":"reset/","title":"Uninstall/Reset","text":"k0s can be uninstalled locally with k0s reset
command and remotely with k0sctl reset
command. They remove all k0s-related files from the host.
reset
operates under the assumption that k0s is installed as a service on the host.
To prevent accidental triggering, k0s reset
will not run if the k0s service is running, so you must first stop the service:
Stop the service:
sudo k0s stop\n
Invoke the reset
command:
$ sudo k0s reset\nINFO[2021-06-29 13:08:39] * containers steps\nINFO[2021-06-29 13:08:44] successfully removed k0s containers!\nINFO[2021-06-29 13:08:44] no config file given, using defaults\nINFO[2021-06-29 13:08:44] * remove k0s users step:\nINFO[2021-06-29 13:08:44] no config file given, using defaults\nINFO[2021-06-29 13:08:44] * uninstall service step\nINFO[2021-06-29 13:08:44] Uninstalling the k0s service\nINFO[2021-06-29 13:08:45] * remove directories step\nINFO[2021-06-29 13:08:45] * CNI leftovers cleanup step\nINFO k0s cleanup operations done. To ensure a full reset, a node reboot is recommended.\n
k0sctl can be used to connect each node and remove all k0s-related files and processes from the hosts.
k0sctl reset
command:$ k0sctl reset --config k0sctl.yaml\nk0sctl v0.9.0 Copyright 2021, k0sctl authors.\n\n? Going to reset all of the hosts, which will destroy all configuration and data, Are you sure? Yes\nINFO ==> Running phase: Connect to hosts \nINFO [ssh] 13.53.43.63:22: connected \nINFO [ssh] 13.53.218.149:22: connected INFO ==> Running phase: Detect host operating systems \nINFO [ssh] 13.53.43.63:22: is running Ubuntu 20.04.2 LTS \nINFO [ssh] 13.53.218.149:22: is running Ubuntu 20.04.2 LTS INFO ==> Running phase: Prepare hosts INFO ==> Running phase: Gather k0s facts \nINFO [ssh] 13.53.43.63:22: found existing configuration \nINFO [ssh] 13.53.43.63:22: is running k0s controller version 1.27.5+k0s.0\nINFO [ssh] 13.53.218.149:22: is running k0s worker version 1.27.5+k0s.0\nINFO [ssh] 13.53.43.63:22: checking if worker has joined INFO ==> Running phase: Reset hosts \nINFO [ssh] 13.53.43.63:22: stopping k0s \nINFO [ssh] 13.53.218.149:22: stopping k0s \nINFO [ssh] 13.53.218.149:22: running k0s reset \nINFO [ssh] 13.53.43.63:22: running k0s reset INFO ==> Running phase: Disconnect from hosts INFO ==> Finished in 8s
k0s uses containerd as the default Container Runtime Interface (CRI) and runc as the default low-level runtime. In most cases they don't require any configuration changes. However, if custom configuration is needed, this page provides some examples.
"},{"location":"runtime/#containerd-configuration","title":"containerd configuration","text":"By default k0s manages the full containerd configuration. User has the option of fully overriding, and thus also managing, the configuration themselves.
"},{"location":"runtime/#user-managed-containerd-configuration","title":"User managed containerd configuration","text":"In the default k0s generated configuration there's a \"magic\" comment telling k0s it is k0s managed:
# k0s_managed=true\n
If you wish to take over the configuration management remove this line.
To make changes to containerd configuration you must first generate a default containerd configuration, with the default values set to /etc/k0s/containerd.toml
:
containerd config default > /etc/k0s/containerd.toml\n
k0s
runs containerd with the following default values:
/var/lib/k0s/bin/containerd \\\n--root=/var/lib/k0s/containerd \\\n--state=/run/k0s/containerd \\\n--address=/run/k0s/containerd.sock \\\n--config=/etc/k0s/containerd.toml\n
Next, add the following default values to the configuration file:
version = 2\nroot = \"/var/lib/k0s/containerd\"\nstate = \"/run/k0s/containerd\"\n...\n\n[grpc]\naddress = \"/run/k0s/containerd.sock\"\n
"},{"location":"runtime/#k0s-managed-dynamic-runtime-configuration","title":"k0s managed dynamic runtime configuration","text":"From 1.27.1 onwards k0s enables dynamic configuration on containerd CRI runtimes. This works by k0s creating a special directory in /etc/k0s/containerd.d/
where user can drop-in partial containerd configuration snippets.
k0s will automatically pick up these files and adds these in containerd configuration imports
list. If k0s sees the configuration drop-ins are CRI related configurations k0s will automatically collect all these into a single file and adds that as a single import file. This is to overcome some hard limitation on containerd 1.X versions. Read more at containerd#8056
Following chapters provide some examples how to configure different runtimes for containerd using k0s managed drop-in configurations.
"},{"location":"runtime/#using-gvisor","title":"Using gVisor","text":"gVisor is an application kernel, written in Go, that implements a substantial portion of the Linux system call interface. It provides an additional layer of isolation between running applications and the host operating system.
Install the needed gVisor binaries into the host.
(\nset -e\n ARCH=$(uname -m)\nURL=https://storage.googleapis.com/gvisor/releases/release/latest/${ARCH}\nwget ${URL}/runsc ${URL}/runsc.sha512 \\\n${URL}/containerd-shim-runsc-v1 ${URL}/containerd-shim-runsc-v1.sha512\n sha512sum -c runsc.sha512 \\\n-c containerd-shim-runsc-v1.sha512\n rm -f *.sha512\n chmod a+rx runsc containerd-shim-runsc-v1\n sudo mv runsc containerd-shim-runsc-v1 /usr/local/bin\n)\n
Refer to the gVisor install docs for more information.
Prepare the config for k0s
managed containerD, to utilize gVisor as additional runtime:
cat <<EOF | sudo tee /etc/k0s/containerd.d/gvisor.toml\nversion = 2\n\n[plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.runsc]\n runtime_type = \"io.containerd.runsc.v1\"\nEOF\n
Start and join the worker into the cluster, as normal:
k0s worker $token\n
Register containerd to the Kubernetes side to make gVisor runtime usable for workloads (by default, containerd uses normal runc as the runtime):
cat <<EOF | kubectl apply -f -\napiVersion: node.k8s.io/v1\nkind: RuntimeClass\nmetadata:\n name: gvisor\nhandler: runsc\nEOF\n
At this point, you can use gVisor runtime for your workloads:
apiVersion: v1\nkind: Pod\nmetadata:\nname: nginx-gvisor\nspec:\nruntimeClassName: gvisor\ncontainers:\n- name: nginx\nimage: nginx\n
(Optional) Verify that the created nginx pod is running under gVisor runtime:
# kubectl exec nginx-gvisor -- dmesg | grep -i gvisor\n[ 0.000000] Starting gVisor...\n
nvidia-container-runtime
","text":"First, install the NVIDIA runtime components:
distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \\\n&& curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - \\\n&& curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list\nsudo apt-get update && sudo apt-get install -y nvidia-container-runtime\n
Next, drop in the containerd runtime configuration snippet into /etc/k0s/containerd.d/nvidia.toml
[plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.nvidia]\nprivileged_without_host_devices = false\nruntime_engine = \"\"\nruntime_root = \"\"\nruntime_type = \"io.containerd.runc.v1\"\n[plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.nvidia.options]\nBinaryName = \"/usr/bin/nvidia-container-runtime\"\n
Create the needed RuntimeClass
:
cat <<EOF | kubectl apply -f -\napiVersion: node.k8s.io/v1\nkind: RuntimeClass\nmetadata:\n name: nvidia\nhandler: nvidia\nEOF\n
Note Detailed instruction on how to run nvidia-container-runtime
on your node is available here.
Warning: You can use your own CRI runtime with k0s (for example, docker
). However, k0s will not start or manage the runtime, and configuration is solely your responsibility.
Use the option --cri-socket
to run a k0s worker with a custom CRI runtime. the option takes input in the form of <type>:<socket_path>
(for type
, use docker
for a pure Docker setup and remote
for anything else).
To run k0s with a pre-existing Dockershim setup, run the worker with k0s worker --cri-socket docker:unix:///var/run/cri-dockerd.sock <token>
. A detailed explanation on dockershim and a guide for installing cri-dockerd can be found in our k0s dockershim guide.
SELinux enforces mandatory access control policies that confine user programs and system services, as well as access to files and network resources. Limiting privilege to the minimum required to work reduces or eliminates the ability of these programs and daemons to cause harm if faulty or compromised.
Enabling SELinux in container runtime provides an additional security control to help further enforce isolation among deployed containers and the host.
This guide describes how to enable SELinux in Kubernetes environment provided by k0s on CentOS and Red Hat Enterprise Linux (RHEL).
"},{"location":"selinux/#requirements","title":"Requirements","text":"SELinux is enabled on CentOS and RHEL by default. Below command output indicates SELinux is enabled.
$ getenforce\nEnforcing\n
"},{"location":"selinux/#install-container-selinux","title":"Install container-selinux","text":"It is required to have container-selinux installed. In most Fedora based distributions including Fedora 37, Red Hat Enterprise Linux 7, 8 and 8, CentOS 7 and 8 and Rocky Linux 9 this can be achieved by installing the package container-selinux.
In RHEL 7 and CentOS 7 this is achieved by running:
yum install -y container-selinux\n
In the rest of the metnioned distributions run:
dnf install -y container-selinux\n
"},{"location":"selinux/#set-selinux-labels-for-k0s-installation-files","title":"Set SELinux labels for k0s installation files","text":"Run below commands on the host OS of the worker nodes.
DATA_DIR=\"/var/lib/k0s\"\nsudo semanage fcontext -a -t container_runtime_exec_t \"${DATA_DIR}/bin/containerd.*\"\nsudo semanage fcontext -a -t container_runtime_exec_t \"${DATA_DIR}/bin/runc\"\nsudo restorecon -R -v ${DATA_DIR}/bin\nsudo semanage fcontext -a -t container_var_lib_t \"${DATA_DIR}/containerd(/.*)?\"\nsudo semanage fcontext -a -t container_ro_file_t \"${DATA_DIR}/containerd/io.containerd.snapshotter.*/snapshots(/.*)?\"\nsudo restorecon -R -v ${DATA_DIR}/containerd\n
"},{"location":"selinux/#enable-selinux-in-containerd-of-k0s","title":"Enable SELinux in containerd of k0s","text":"Add below lines to /etc/k0s/containerd.toml
of the worker nodes. You need to restart k0s service on the node to make the change take effect.
[plugins.\"io.containerd.grpc.v1.cri\"]\nenable_selinux = true\n
"},{"location":"selinux/#verify-selinux-works-in-kubernetes-environment","title":"Verify SELinux works in Kubernetes environment","text":"By following the example Assign SELinux labels to a Container, deploy a testing pod using below YAML file:
apiVersion: v1\nkind: Pod\nmetadata:\nname: test-selinux\nspec:\ncontainers:\n- image: busybox\nname: test-selinux\ncommand: [\"sleep\", \"infinity\"]\nsecurityContext:\nseLinuxOptions:\nlevel: \"s0:c123,c456\"\n
After the pod starts, ssh to the worker node on which the pod is running and check the pod process. It should display the label s0:c123,c456
that you sepecified in YAML file:
$ ps -efZ | grep -F 'sleep infinity'\nsystem_u:system_r:container_t:s0:c123,c456 root 3346 3288 0 16:39 ? 00:00:00 sleep infinity\n
"},{"location":"shell-completion/","title":"Enabling Shell Completion","text":"Generate the k0s completion script using the k0s completion <shell_name>
command, for Bash, Zsh, fish, or PowerShell.
Sourcing the completion script in your shell enables k0s autocompletion.
"},{"location":"shell-completion/#bash","title":"Bash","text":"echo 'source <(k0s completion bash)' >>~/.bashrc\n
To load completions for each session, execute once:
k0s completion bash > /etc/bash_completion.d/k0s\n
"},{"location":"shell-completion/#zsh","title":"Zsh","text":"If shell completion is not already enabled in Zsh environment you will need to enable it:
echo \"autoload -U compinit; compinit\" >> ~/.zshrc\n
To load completions for each session, execute once:
k0s completion zsh > \"${fpath[1]}/_k0s\"\n
Note: You must start a new shell for the setup to take effect.
"},{"location":"shell-completion/#fish","title":"Fish","text":"k0s completion fish | source\n
To load completions for each session, execute once:
k0s completion fish > ~/.config/fish/completions/k0s.fish\n
"},{"location":"storage/","title":"Storage","text":""},{"location":"storage/#bundled-openebs-storage","title":"Bundled OpenEBS storage","text":"K0s comes out with bundled OpenEBS installation which can be enabled by using configuration file
Use following configuration as an example:
spec:\nextensions:\nstorage:\ntype: openebs_local_storage\n
The cluster will have two storage classes available for you to use:
k0s kubectl get storageclass\n
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE\nopenebs-device openebs.io/local Delete WaitForFirstConsumer false 24s\nopenebs-hostpath openebs.io/local Delete WaitForFirstConsumer false 24s\n
The openebs-hostpath
is the storage class that maps to the /var/openebs/local
The openebs-device
is not configured and could be configured by manifest deployer accordingly to the OpenEBS documentation
Use following manifests as an example of pod with mounted volume:
apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\nname: nginx-pvc\nnamespace: default\nspec:\naccessModes:\n- ReadWriteOnce\nstorageClassName: openebs-hostpath\nresources:\nrequests:\nstorage: 5Gi\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: nginx\nnamespace: default\nlabels:\napp: nginx\nspec:\nselector:\nmatchLabels:\napp: nginx\nstrategy:\ntype: Recreate\ntemplate:\nmetadata:\nlabels:\napp: nginx\nspec:\ncontainers:\n- image: nginx name: nginx\nvolumeMounts:\n- name: persistent-storage\nmountPath: /var/lib/nginx\nvolumes:\n- name: persistent-storage\npersistentVolumeClaim:\nclaimName: nginx-pvc\n
k0s kubectl apply -f nginx.yaml\n
persistentvolumeclaim/nginx-pvc created\ndeployment.apps/nginx created\nbash-5.1# k0s kc get pods\nNAME READY STATUS RESTARTS AGE\nnginx-d95bcb7db-gzsdt 1/1 Running 0 30s\n
k0s kubectl get pv\n
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE\npvc-9a7fae2d-eb03-42c3-aaa9-1a807d5df12f 5Gi RWO Delete Bound default/nginx-pvc openebs-hostpath 30s\n
"},{"location":"storage/#csi","title":"CSI","text":"k0s supports a wide range of different storage options by utilizing Container Storage Interface (CSI). All Kubernetes storage solutions are supported and users can easily select the storage that fits best for their needs.
When the storage solution implements Container Storage Interface (CSI), containers can communicate with the storage for creation and configuration of persistent volumes. This makes it easy to dynamically provision the requested volumes. It also expands the supported storage solutions from the previous generation, in-tree volume plugins. More information about the CSI concept is described on the Kubernetes Blog.
"},{"location":"storage/#installing-3rd-party-storage-solutions","title":"Installing 3rd party storage solutions","text":"Follow your storage driver's installation instructions. Note that the Kubelet installed by k0s uses a slightly different path for its working directory (/varlib/k0s/kubelet
instead of /var/lib/kubelet
). Consult the CSI driver's configuration documentation on how to customize this path.
Different Kubernetes storage solutions are explained in the official Kubernetes storage documentation. All of them can be used with k0s. Here are some popular ones:
If you are looking for a fault-tolerant storage with data replication, you can find a k0s tutorial for configuring Ceph storage with Rook in here.
"},{"location":"system-monitoring/","title":"System components monitoring","text":"Controller nodes are isolated by default, which thus means that a cluster user cannot schedule workloads onto controller nodes.
k0s provides a mechanism to expose system components for monitoring. System component metrics can give a better look into what is happening inside them. Metrics are particularly useful for building dashboards and alerts. You can read more about metrics for Kubernetes system components here.
Note: the mechanism is an opt-in feature, you can enable it on installation:
```shell\nsudo k0s install controller --enable-metrics-scraper\n```\n
"},{"location":"system-monitoring/#jobs","title":"Jobs","text":"The list of components which is scrapped by k0s:
Note: kube-apiserver metrics are not scrapped since they are accessible via kubernetes
endpoint within the cluster.
k0s uses pushgateway with TTL to make it possible to detect issues with the metrics delivery. Default TTL is 2 minutes.
"},{"location":"system-requirements/","title":"System requirements","text":"This page describes the system requirements for k0s.
"},{"location":"system-requirements/#minimum-memory-and-cpu-requirements","title":"Minimum memory and CPU requirements","text":"The minimum requirements for k0s detailed below are approximations, and thus your results may vary.
Role Memory (RAM) Virtual CPU (vCPU) Controller node 1 GB 1 vCPU Worker node 0.5 GB 1 vCPU Controller + worker 1 GB 1 vCPU"},{"location":"system-requirements/#controller-node-recommendations","title":"Controller node recommendations","text":"# of Worker nodes # of Pods Recommended RAM Recommended vCPU up to 10 up to 1000 1-2 GB 1-2 vCPU up to 50 up to 5000 2-4 GB 2-4 vCPU up to 100 up to 10000 4-8 GB 2-4 vCPU up to 500 up to 50000 8-16 GB 4-8 vCPU up to 1000 up to 100000 16-32 GB 8-16 vCPU up to 5000 up to 150000 32-64 GB 16-32 vCPUk0s has the standard Kubernetes limits for the maximum number of nodes, pods, etc. For more details, see the Kubernetes considerations for large clusters.
k0s controller node measured memory consumption can be found below on this page.
"},{"location":"system-requirements/#storage","title":"Storage","text":"It's recommended to use an SSD for optimal storage performance (cluster latency and throughput are sensitive to storage).
The specific storage consumption for k0s is as follows:
Role Storage (k0s part) Controller node ~0.5 GB Worker node ~1.3 GB Controller + worker ~1.7 GBNote: The operating system and application requirements must be considered in addition to the k0s part.
"},{"location":"system-requirements/#host-operating-system","title":"Host operating system","text":"For information on the required ports and protocols, refer to networking.
"},{"location":"system-requirements/#external-runtime-dependencies","title":"External runtime dependencies","text":"k0s strives to be as independent from the OS as possible. The current and past external runtime dependencies are documented here.
To run some automated compatiblility checks on your system, use k0s sysinfo
.
The following table shows the measured memory consumption in the cluster of one controller node.
# of Worker nodes # of Pods (besides default) Memory consumption 1 0 510 MB 1 100 600 MB 20 0 660 MB 20 2000 1000 MB 50 0 790 MB 50 5000 1400 MB 100 0 1000 MB 100 10000 2300 MB 200 0 1500 MB 200 20000 3300 MBMeasurement details:
There are few common cases we've seen where k0s fails to run properly.
"},{"location":"troubleshooting/#coredns-in-crashloop","title":"CoreDNS in crashloop","text":"The most common case we've encountered so far has been CoreDNS getting into crashloop on the node(s).
With kubectl you see something like this:
$ kubectl get pod --all-namespaces\nNAMESPACE NAME READY STATUS RESTARTS AGE\nkube-system calico-kube-controllers-5f6546844f-25px6 1/1 Running 0 167m\nkube-system calico-node-fwjx5 1/1 Running 0 164m\nkube-system calico-node-t4tx5 1/1 Running 0 164m\nkube-system calico-node-whwsg 1/1 Running 0 164m\nkube-system coredns-5c98d7d4d8-tfs4q 1/1 Error 17 167m\nkube-system konnectivity-agent-9jkfd 1/1 Running 0 164m\nkube-system konnectivity-agent-bvhdb 1/1 Running 0 164m\nkube-system konnectivity-agent-r6mzj 1/1 Running 0 164m\nkube-system kube-proxy-kr2r9 1/1 Running 0 164m\nkube-system kube-proxy-tbljr 1/1 Running 0 164m\nkube-system kube-proxy-xbw7p 1/1 Running 0 164m\nkube-system metrics-server-7d4bcb75dd-pqkrs 1/1 Running 0 167m\n
When you check the logs, it'll show something like this:
kubectl -n kube-system logs coredns-5c98d7d4d8-tfs4q\n
plugin/loop: Loop (127.0.0.1:55953 -> :1053) detected for zone \".\", see https://coredns.io/plugins/loop#troubleshooting. Query: \"HINFO 4547991504243258144.3688648895315093531.\"\n
This is most often caused by systemd-resolved stub (or something similar) running locally and CoreDNS detects a possible loop with DNS queries.
The easiest but most crude way to workaround is to disable the systemd-resolved stub and revert the hosts /etc/resolv.conf
to original
Read more at CoreDNS troubleshooting docs.
"},{"location":"troubleshooting/#k0s-controller-fails-on-arm-boxes","title":"k0s controller
fails on ARM boxes","text":"In the logs you probably see etcd not starting up properly.
Etcd is not fully supported on ARM architecture, thus you need to run k0s controller
and thus also etcd process with env ETCD_UNSUPPORTED_ARCH=arm
.
As etcd is not fully supported on ARM, it also means that the k0s control plane with etcd itself is not fully supported on ARM either.
"},{"location":"troubleshooting/#k0s-will-not-start-on-zfs-based-systems","title":"k0s
will not start on ZFS-based systems","text":"On ZFS-based systems k0s will fail to start because containerd runs by default in overlayfs mode to manage image layers. This is not compatible with ZFS and requires a custom config of containerd. The following steps should get k0s working on ZFS-based systems:
$ ctr -a /run/k0s/containerd.sock plugins ls
that the containerd ZFS snapshotter plugin is in ok
state (should be the case if ZFS kernel modules and ZFS userspace utils are correctly configured):TYPE ID PLATFORMS STATUS \n...\nio.containerd.snapshotter.v1 zfs linux/amd64 ok\n...\n
$ containerd config default > /etc/k0s/containerd.toml
/etc/k0s/containerd.toml
:...\n[plugins.\"io.containerd.grpc.v1.cri\".containerd]\nsnapshotter = \"overlayfs\"\n...\n
to
...\n[plugins.\"io.containerd.grpc.v1.cri\".containerd]\nsnapshotter = \"zfs\"\n...\n
$ zfs create -o mountpoint=/var/lib/k0s/containerd/io.containerd.snapshotter.v1.zfs rpool/containerd
$ k0s install controller --single -c /etc/k0s/k0s.yaml
Once we enable cloud provider support on kubelet on worker nodes, kubelet will automatically add a taint node.cloudprovider.kubernetes.io/uninitialized
for the node. This tain will prevent normal workloads to be scheduled on the node until the cloud provider controller actually runs second initialization on the node and removes the taint. This means that these nodes are not available for scheduling until the cloud provider controller is actually successfully running on the cluster.
For troubleshooting your specific cloud provider see its documentation.
"},{"location":"troubleshooting/#k0s-not-working-with-read-only-usr","title":"k0s not working with read only/usr
","text":"By default k0s does not run on nodes where /usr
is read only.
This can be fixed by changing the default path for volumePluginDir
in your k0s config. You will need to change to values, one for the kubelet itself, and one for Calico.
Here is a snippet of an example config with the default values changed:
spec:\ncontrollerManager:\nextraArgs:\nflex-volume-plugin-dir: \"/etc/kubernetes/kubelet-plugins/volume/exec\"\nnetwork:\ncalico:\nflexVolumeDriverPath: /etc/k0s/kubelet-plugins/volume/exec/nodeagent~uds\nworkerProfiles:\n- name: coreos\nvalues:\nvolumePluginDir: /etc/k0s/kubelet-plugins/volume/exec/\n
With this config you can start your controller as usual. Any workers will need to be started with
k0s worker --profile coreos [TOKEN]\n
"},{"location":"troubleshooting/#profiling","title":"Profiling","text":"We drop any debug related information and symbols from the compiled binary by utilzing -w -s
linker flags.
To keep those symbols use DEBUG
env variable:
DEBUG=true make k0s\n
Any value not equal to the \"false\" would work.
To add custom linker flags use LDFLAGS
variable.
LD_FLAGS=\"--custom-flag=value\" make k0s\n
"},{"location":"troubleshooting/#im-using-custom-cri-and-missing-some-labels-in-prometheus-metrics","title":"I'm using custom CRI and missing some labels in Prometheus metrics","text":"Due to removal of the embedded dockershim from Kubelet, the Kubelet's embedded cAdvisor metrics got slightly broken. If your container runtime is a custom containerd you can add --kubelet-extra-flags=\"--containerd=<path/to/containerd.sock>\"
into k0s worker startup. That configures the Kubelet embedded cAdvisor to talk directly with containerd to gather the metrics and thus gets the expected labels in place.
Unfortunately this does not work on when using Docker via cri-dockerd shim. Currently, there is no easy solution to this problem.
In the future Kubelet will be refactored to get the container metrics from CRI interface rather than from the runtime directly. This work is specified and followed up in KEP-2371 but until that work completes the only option is to run a standalone cAdvisor. The known issues section in the official Kubernetes documentation about migrating away from dockershim explains the current shortcomings and shows how to run cAdvisor as a standalone DaemonSet.
"},{"location":"troubleshooting/#customized-configurations","title":"Customized configurations","text":"/var/lib/k0s
, for example:/var/lib/k0s/kubelet
/var/lib/k0s/etcd
The k0s upgrade is a simple process due to its single binary distribution. The k0s single binary file includes all the necessary parts for the upgrade and essentially the upgrade process is to replace that file and restart the service.
This tutorial explains two different approaches for k0s upgrade:
If your k0s cluster has been deployed with k0sctl, then k0sctl provides the easiest upgrade method. In that case jump to the next chapter. However, if you have deployed k0s without k0sctl, then follow the upgrade method explained in this chapter.
Before starting the upgrade, consider moving your applications to another node if you want to avoid downtime. This can be done by draining a worker node. Remember to uncordon the worker node afterwards to tell Kubernetes that it can resume scheduling new pods onto the node.
The upgrade process is started by stopping the currently running k0s service.
sudo k0s stop\n
Now you can replace the old k0s binary file. The easiest way is to use the download script. It will download the latest k0s binary and replace the old binary with it. You can also do this manually without the download script.
curl -sSLf https://get.k0s.sh | sudo sh\n
Then you can start the service (with the upgraded k0s) and your upgrade is done.
sudo k0s start\n
"},{"location":"upgrade/#upgrade-a-k0s-cluster-using-k0sctl","title":"Upgrade a k0s cluster using k0sctl","text":"The upgrading of k0s clusters using k0sctl occurs not through a particular command (there is no upgrade
sub-command in k0sctl) but by way of the configuration file. The configuration file describes the desired state of the cluster, and when you pass the description to the k0sctl apply
command a discovery of the current state is performed and the system does whatever is necessary to bring the cluster to the desired state (for example, perform an upgrade).
The following operations occur during a k0sctl upgrade:
Upgrade of each controller, one at a time. There is no downtime if multiple controllers are configured.
Upgrade of workers, in batches of 10%.
Draining of workers, which allows the workload to move to other nodes prior to the actual upgrade of the worker node components. (To skip the drain process, use the --no-drain
option.)
The upgrade process continues once the upgraded nodes return to Ready state.
You can configure the desired cluster version in the k0sctl configuration by setting the value of spec.k0s.version
:
spec:\nk0s:\nversion: 1.27.5+k0s.0\n
If you do not specify a version, k0sctl checks online for the latest version and defaults to it.
k0sctl apply\n
...\n...\nINFO[0001] ==> Running phase: Upgrade controllers\nINFO[0001] [ssh] 10.0.0.23:22: starting upgrade\nINFO[0001] [ssh] 10.0.0.23:22: Running with legacy service name, migrating...\nINFO[0011] [ssh] 10.0.0.23:22: waiting for the k0s service to start\nINFO[0016] ==> Running phase: Upgrade workers\nINFO[0016] Upgrading 1 workers in parallel\nINFO[0016] [ssh] 10.0.0.17:22: upgrade starting\nINFO[0027] [ssh] 10.0.0.17:22: waiting for node to become ready again\nINFO[0027] [ssh] 10.0.0.17:22: upgrade successful\nINFO[0027] ==> Running phase: Disconnect from hosts\nINFO[0027] ==> Finished in 27s\nINFO[0027] k0s cluster version 1.27.5+k0s.0 is now installed\nINFO[0027] Tip: To access the cluster you can now fetch the admin kubeconfig using:\nINFO[0027] k0sctl kubeconfig\n
"},{"location":"user-management/","title":"User Management","text":""},{"location":"user-management/#adding-a-cluster-user","title":"Adding a Cluster User","text":"Run the kubeconfig create command on the controller to add a user to the cluster. The command outputs a kubeconfig for the user, to use for authentication.
k0s kubeconfig create [username]\n
"},{"location":"user-management/#enabling-access-to-cluster-resources","title":"Enabling Access to Cluster Resources","text":"Create the user with the system:masters
group to grant the user access to the cluster:
k0s kubeconfig create --groups \"system:masters\" testUser > k0s.config\n
Create a roleBinding
to grant the user access to the resources:
k0s kubectl create clusterrolebinding --kubeconfig k0s.config testUser-admin-binding --clusterrole=admin --user=testUser\n
"},{"location":"worker-node-config/","title":"Configuration options for worker nodes","text":"Although the k0s worker
command does not take in any special yaml configuration, there are still methods for configuring the workers to run various components.
The k0s worker
command accepts the --labels
flag, with which you can make the newly joined worker node the register itself, in the Kubernetes API, with the given set of labels.
For example, running the worker with k0s worker --token-file k0s.token --labels=\"k0sproject.io/foo=bar,k0sproject.io/other=xyz\"
results in:
kubectl get node --show-labels\n
NAME STATUS ROLES AGE VERSION LABELS\nworker0 NotReady <none> 10s v1.27.5+k0s beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,k0sproject.io/foo=bar,k0sproject.io/other=xyz,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker0,kubernetes.io/os=linux\n
Controller worker nodes are assigned node.k0sproject.io/role=control-plane
and node-role.kubernetes.io/control-plane=true
labels:
kubectl get node --show-labels\n
NAME STATUS ROLES AGE VERSION LABELS\ncontroller0 NotReady control-plane 10s v1.27.5+k0s beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=worker0,kubernetes.io/os=linux,node.k0sproject.io/role=control-plane,node-role.kubernetes.io/control-plane=true\n
Note: Setting the labels is only effective on the first registration of the node. Changing the labels thereafter has no effect.
"},{"location":"worker-node-config/#taints","title":"Taints","text":"The k0s worker
command accepts the --taints
flag, with which you can make the newly joined worker node the register itself with the given set of taints.
Note: Controller nodes running with --enable-worker
are assigned node-role.kubernetes.io/master:NoExecute
taint automatically. You can disable default taints using --no-taints
parameter.
kubectl get nodes -o custom-columns=NAME:.metadata.name,TAINTS:.spec.taints\n
NAME TAINTS\ncontroller0 [map[effect:NoSchedule key:node-role.kubernetes.io/master]]\nworker0 <none>\n
"},{"location":"worker-node-config/#kubelet-configuration","title":"Kubelet configuration","text":"The k0s worker
command accepts a generic flag to pass in any set of arguments for kubelet process.
For example, running k0s worker --token-file=k0s.token --kubelet-extra-args=\"--node-ip=1.2.3.4 --address=0.0.0.0\"
passes in the given flags to Kubelet as-is. As such, you must confirm that any flags you are passing in are properly formatted and valued as k0s will not validate those flags.
Kubelet configuration fields can also be set via a worker profiles. Worker profiles are defined in the main k0s.yaml and are used to generate ConfigMaps containing a custom kubelet.config.k8s.io/v1beta1/KubeletConfiguration
object. To see examples of k0s.yaml containing worker profiles: go here. For a list of possible Kubelet configuration fields: go here.
k0s detects iptables backend automatically based on the existing records. On a brand-new setup, iptables-nft
will be used. There is a --iptables-mode
flag to specify the mode explicitly. Valid values: nft
, legacy
and auto
(default).
k0s worker --iptables-mode=nft\n
"},{"location":"cli/","title":"Index","text":""},{"location":"cli/#k0s","title":"k0s","text":"k0s - Zero Friction Kubernetes
"},{"location":"cli/#synopsis","title":"Synopsis","text":"k0s - The zero friction Kubernetes - https://k0sproject.io This software is built and distributed by Mirantis, Inc., and is subject to EULA https://k0sproject.io/licenses/eula
"},{"location":"cli/#options","title":"Options","text":" -h, --help help for k0s\n
"},{"location":"cli/#see-also","title":"SEE ALSO","text":"k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s/#synopsis","title":"Synopsis","text":"k0s - The zero friction Kubernetes - https://k0sproject.io This software is built and distributed by Mirantis, Inc., and is subject to EULA https://k0sproject.io/licenses/eula
"},{"location":"cli/k0s/#options","title":"Options","text":" -h, --help help for k0s\n
"},{"location":"cli/k0s/#see-also","title":"SEE ALSO","text":"Manage airgap setup
"},{"location":"cli/k0s_airgap/#options","title":"Options","text":" -c, --config string config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for airgap\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_airgap/#see-also","title":"SEE ALSO","text":"List image names and version needed for air-gap install
k0s airgap list-images [flags]\n
"},{"location":"cli/k0s_airgap_list-images/#examples","title":"Examples","text":"k0s airgap list-images\n
"},{"location":"cli/k0s_airgap_list-images/#options","title":"Options","text":" --all include all images, even if they are not used in the current configuration\n -c, --config string config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for list-images\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_airgap_list-images/#see-also","title":"SEE ALSO","text":"Run the controller API
k0s api [flags]\n
"},{"location":"cli/k0s_api/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for api\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_api/#see-also","title":"SEE ALSO","text":"Back-Up k0s configuration. Must be run as root (or with sudo)
k0s backup [flags]\n
"},{"location":"cli/k0s_backup/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for backup\n --save-path string destination directory path for backup assets, use '-' for stdout\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_backup/#see-also","title":"SEE ALSO","text":"Generate completion script
"},{"location":"cli/k0s_completion/#synopsis","title":"Synopsis","text":"To load completions:
Bash:
$ source <(k0s completion bash)
"},{"location":"cli/k0s_completion/#to-load-completions-for-each-session-execute-once","title":"To load completions for each session, execute once:","text":"$ k0s completion bash > /etc/bash_completion.d/k0s
Zsh:
"},{"location":"cli/k0s_completion/#if-shell-completion-is-not-already-enabled-in-your-environment-you-will-need","title":"If shell completion is not already enabled in your environment you will need","text":""},{"location":"cli/k0s_completion/#to-enable-it-you-can-execute-the-following-once","title":"to enable it. You can execute the following once:","text":"$ echo \"autoload -U compinit; compinit\" >> ~/.zshrc
"},{"location":"cli/k0s_completion/#to-load-completions-for-each-session-execute-once_1","title":"To load completions for each session, execute once:","text":"$ k0s completion zsh > \"${fpath[1]}/_k0s\"
"},{"location":"cli/k0s_completion/#you-will-need-to-start-a-new-shell-for-this-setup-to-take-effect","title":"You will need to start a new shell for this setup to take effect.","text":"Fish:
$ k0s completion fish | source
"},{"location":"cli/k0s_completion/#to-load-completions-for-each-session-execute-once_2","title":"To load completions for each session, execute once:","text":"$ k0s completion fish > ~/.config/fish/completions/k0s.fish
k0s completion <bash|zsh|fish|powershell>\n
"},{"location":"cli/k0s_completion/#options","title":"Options","text":" -h, --help help for completion\n
"},{"location":"cli/k0s_completion/#see-also","title":"SEE ALSO","text":"Configuration related sub-commands
"},{"location":"cli/k0s_config/#options","title":"Options","text":" -h, --help help for config\n
"},{"location":"cli/k0s_config/#see-also","title":"SEE ALSO","text":"Output the default k0s configuration yaml to stdout
k0s config create [flags]\n
"},{"location":"cli/k0s_config_create/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for create\n --include-images include the default images in the output\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_config_create/#see-also","title":"SEE ALSO","text":"Launch the editor configured in your shell to edit k0s configuration
k0s config edit [flags]\n
"},{"location":"cli/k0s_config_edit/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n --debug Debug logging [$DEBUG]\n -h, --help help for edit\n
"},{"location":"cli/k0s_config_edit/#see-also","title":"SEE ALSO","text":"Display dynamic configuration reconciliation status
k0s config status [flags]\n
"},{"location":"cli/k0s_config_status/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n --debug Debug logging [$DEBUG]\n -h, --help help for status\n -o, --output string Output format. Must be one of yaml|json\n
"},{"location":"cli/k0s_config_status/#see-also","title":"SEE ALSO","text":"Validate k0s configuration
"},{"location":"cli/k0s_config_validate/#synopsis","title":"Synopsis","text":"Example: k0s config validate --config path_to_config.yaml
k0s config validate [flags]\n
"},{"location":"cli/k0s_config_validate/#options","title":"Options","text":" -c, --config string config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for validate\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_config_validate/#see-also","title":"SEE ALSO","text":"Run controller
k0s controller [join-token] [flags]\n
"},{"location":"cli/k0s_controller/#examples","title":"Examples","text":" Command to associate master nodes:\n CLI argument:\n $ k0s controller [join-token]\n\n or CLI flag:\n $ k0s controller --token-file [path_to_file]\n Note: Token can be passed either as a CLI argument or as a flag\n
"},{"location":"cli/k0s_controller/#options","title":"Options","text":" --api-server string HACK: api-server for the windows worker node\n --cidr-range string HACK: cidr range for the windows worker node (default \"10.96.0.0/12\")\n --cluster-dns string HACK: cluster dns for the windows worker node (default \"10.96.0.10\")\n -c, --config string config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n --cri-socket string container runtime socket to use, default to internal containerd. Format: [remote|docker]:[path-to-socket]\n --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n --disable-components strings disable components (valid items: autopilot,control-api,coredns,csr-approver,endpoint-reconciler,helm,konnectivity-server,kube-controller-manager,kube-proxy,kube-scheduler,metrics-server,network-provider,node-role,system-rbac,worker-config)\n --enable-cloud-provider Whether or not to enable cloud provider support in kubelet\n --enable-dynamic-config enable cluster-wide dynamic config based on custom resource\n --enable-k0s-cloud-provider enables the k0s-cloud-provider (default false)\n --enable-metrics-scraper enable scraping metrics from the controller components (kube-scheduler, kube-controller-manager)\n --enable-worker enable worker (default false)\n -h, --help help for controller\n --ignore-pre-flight-checks continue even if pre-flight checks fail\n --iptables-mode string iptables mode (valid values: nft, legacy, auto). default: auto\n --k0s-cloud-provider-port int the port that k0s-cloud-provider binds on (default 10258)\n --k0s-cloud-provider-update-frequency duration the frequency of k0s-cloud-provider node updates (default 2m0s)\n --kube-controller-manager-extra-args string extra args for kube-controller-manager\n --kubelet-extra-args string extra args for kubelet\n --labels strings Node labels, list of key=value pairs\n -l, --logging stringToString Logging Levels for the different components (default [kube-proxy=1,etcd=info,containerd=info,konnectivity-server=1,kube-apiserver=1,kube-controller-manager=1,kube-scheduler=1,kubelet=1])\n --no-taints disable default taints for controller node\n --profile string worker profile to use on the node (default \"default\")\n --single enable single node (implies --enable-worker, default false)\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n --taints strings Node taints, list of key=value:effect strings\n --token-file string Path to the file containing join-token.\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_controller/#see-also","title":"SEE ALSO","text":"containerd CLI
"},{"location":"cli/k0s_ctr/#synopsis","title":"Synopsis","text":"ctr is an unsupported debug and administrative client for interacting with the containerd daemon. Because it is unsupported, the commands, options, and operations are not guaranteed to be backward compatible or stable from release to release of the containerd project.
k0s ctr [flags]\n
"},{"location":"cli/k0s_ctr/#options","title":"Options","text":" -h, --help help for ctr\n
"},{"location":"cli/k0s_ctr/#see-also","title":"SEE ALSO","text":"Generate k0s command documentation
k0s docs <markdown|man> [flags]\n
"},{"location":"cli/k0s_docs/#options","title":"Options","text":" -h, --help help for docs\n
"},{"location":"cli/k0s_docs/#see-also","title":"SEE ALSO","text":"Manage etcd cluster
"},{"location":"cli/k0s_etcd/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for etcd\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_etcd/#see-also","title":"SEE ALSO","text":"Sign off a given etc node from etcd cluster
k0s etcd leave [flags]\n
"},{"location":"cli/k0s_etcd_leave/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for leave\n --peer-address string etcd peer address\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_etcd_leave/#see-also","title":"SEE ALSO","text":"Returns etcd cluster members list
k0s etcd member-list [flags]\n
"},{"location":"cli/k0s_etcd_member-list/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for member-list\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_etcd_member-list/#see-also","title":"SEE ALSO","text":"Install k0s on a brand-new system. Must be run as root (or with sudo)
"},{"location":"cli/k0s_install/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -e, --env stringArray set environment variable\n --force force init script creation\n -h, --help help for install\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_install/#see-also","title":"SEE ALSO","text":"Install k0s controller on a brand-new system. Must be run as root (or with sudo)
k0s install controller [flags]\n
"},{"location":"cli/k0s_install_controller/#examples","title":"Examples","text":"All default values of controller command will be passed to the service stub unless overridden.\n\nWith the controller subcommand you can setup a single node cluster by running:\n\n k0s install controller --single\n
"},{"location":"cli/k0s_install_controller/#options","title":"Options","text":" --api-server string HACK: api-server for the windows worker node\n --cidr-range string HACK: cidr range for the windows worker node (default \"10.96.0.0/12\")\n --cluster-dns string HACK: cluster dns for the windows worker node (default \"10.96.0.10\")\n -c, --config string config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n --cri-socket string container runtime socket to use, default to internal containerd. Format: [remote|docker]:[path-to-socket]\n --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n --disable-components strings disable components (valid items: autopilot,control-api,coredns,csr-approver,endpoint-reconciler,helm,konnectivity-server,kube-controller-manager,kube-proxy,kube-scheduler,metrics-server,network-provider,node-role,system-rbac,worker-config)\n --enable-cloud-provider Whether or not to enable cloud provider support in kubelet\n --enable-dynamic-config enable cluster-wide dynamic config based on custom resource\n --enable-k0s-cloud-provider enables the k0s-cloud-provider (default false)\n --enable-metrics-scraper enable scraping metrics from the controller components (kube-scheduler, kube-controller-manager)\n --enable-worker enable worker (default false)\n -h, --help help for controller\n --iptables-mode string iptables mode (valid values: nft, legacy, auto). default: auto\n --k0s-cloud-provider-port int the port that k0s-cloud-provider binds on (default 10258)\n --k0s-cloud-provider-update-frequency duration the frequency of k0s-cloud-provider node updates (default 2m0s)\n --kube-controller-manager-extra-args string extra args for kube-controller-manager\n --kubelet-extra-args string extra args for kubelet\n --labels strings Node labels, list of key=value pairs\n -l, --logging stringToString Logging Levels for the different components (default [kube-apiserver=1,kube-controller-manager=1,kube-scheduler=1,kubelet=1,kube-proxy=1,etcd=info,containerd=info,konnectivity-server=1])\n --no-taints disable default taints for controller node\n --profile string worker profile to use on the node (default \"default\")\n --single enable single node (implies --enable-worker, default false)\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n --taints strings Node taints, list of key=value:effect strings\n --token-file string Path to the file containing join-token.\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_install_controller/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --env stringArray set environment variable\n --force force init script creation\n
"},{"location":"cli/k0s_install_controller/#see-also","title":"SEE ALSO","text":"Install k0s worker on a brand-new system. Must be run as root (or with sudo)
k0s install worker [flags]\n
"},{"location":"cli/k0s_install_worker/#examples","title":"Examples","text":"Worker subcommand allows you to pass in all available worker parameters.\nAll default values of worker command will be passed to the service stub unless overridden.\n\nWindows flags like \"--api-server\", \"--cidr-range\" and \"--cluster-dns\" will be ignored since install command doesn't yet support Windows services\n
"},{"location":"cli/k0s_install_worker/#options","title":"Options","text":" --api-server string HACK: api-server for the windows worker node\n --cidr-range string HACK: cidr range for the windows worker node (default \"10.96.0.0/12\")\n --cluster-dns string HACK: cluster dns for the windows worker node (default \"10.96.0.10\")\n --cri-socket string container runtime socket to use, default to internal containerd. Format: [remote|docker]:[path-to-socket]\n --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n --enable-cloud-provider Whether or not to enable cloud provider support in kubelet\n -h, --help help for worker\n --iptables-mode string iptables mode (valid values: nft, legacy, auto). default: auto\n --kubelet-extra-args string extra args for kubelet\n --labels strings Node labels, list of key=value pairs\n -l, --logging stringToString Logging Levels for the different components (default [containerd=info,konnectivity-server=1,kube-apiserver=1,kube-controller-manager=1,kube-scheduler=1,kubelet=1,kube-proxy=1,etcd=info])\n --profile string worker profile to use on the node (default \"default\")\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n --taints strings Node taints, list of key=value:effect strings\n --token-file string Path to the file containing token.\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_install_worker/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --env stringArray set environment variable\n --force force init script creation\n
"},{"location":"cli/k0s_install_worker/#see-also","title":"SEE ALSO","text":"Create a kubeconfig file for a specified user
k0s kubeconfig [command] [flags]\n
"},{"location":"cli/k0s_kubeconfig/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for kubeconfig\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_kubeconfig/#see-also","title":"SEE ALSO","text":"Display Admin's Kubeconfig file
"},{"location":"cli/k0s_kubeconfig_admin/#synopsis","title":"Synopsis","text":"Print kubeconfig for the Admin user to stdout
k0s kubeconfig admin [flags]\n
"},{"location":"cli/k0s_kubeconfig_admin/#examples","title":"Examples","text":" $ k0s kubeconfig admin > ~/.kube/config\n $ export KUBECONFIG=~/.kube/config\n $ kubectl get nodes\n
"},{"location":"cli/k0s_kubeconfig_admin/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for admin\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_kubeconfig_admin/#see-also","title":"SEE ALSO","text":"Create a kubeconfig for a user
"},{"location":"cli/k0s_kubeconfig_create/#synopsis","title":"Synopsis","text":"Create a kubeconfig with a signed certificate and public key for a given user (and optionally user groups) Note: A certificate once signed cannot be revoked for a particular user
k0s kubeconfig create username [flags]\n
"},{"location":"cli/k0s_kubeconfig_create/#examples","title":"Examples","text":" Command to create a kubeconfig for a user:\n CLI argument:\n $ k0s kubeconfig create username\n\n optionally add groups:\n $ k0s kubeconfig create username --groups [groups]\n
"},{"location":"cli/k0s_kubeconfig_create/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n --groups string Specify groups\n -h, --help help for create\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_kubeconfig_create/#see-also","title":"SEE ALSO","text":"kubectl controls the Kubernetes cluster manager
"},{"location":"cli/k0s_kubectl/#synopsis","title":"Synopsis","text":"kubectl controls the Kubernetes cluster manager.
Find more information at: https://kubernetes.io/docs/reference/kubectl/
k0s kubectl [flags]\n
"},{"location":"cli/k0s_kubectl/#options","title":"Options","text":" --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace.\n --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups.\n --as-uid string UID to impersonate for the operation.\n --cache-dir string Default cache directory (default \"/home/runner/.kube/cache\")\n --certificate-authority string Path to a cert file for the certificate authority\n --client-certificate string Path to a client certificate file for TLS\n --client-key string Path to a client key file for TLS\n --cluster string The name of the kubeconfig cluster to use\n --context string The name of the kubeconfig context to use\n --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n --debug Debug logging [$DEBUG]\n --disable-compression If true, opt-out of response compression for all requests to the server\n -h, --help help for kubectl\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Path to the kubeconfig file to use for CLI requests.\n --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)\n --match-server-version Require server version to match client version\n -n, --namespace string If present, the namespace scope for this CLI request\n --password string Password for basic authentication to the API server\n --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default \"none\")\n --profile-output string Name of the file to write the profile to (default \"profile.pprof\")\n --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default \"0\")\n -s, --server string The address and port of the Kubernetes API server\n --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used\n --token string Bearer token for authentication to the API server\n --user string The name of the kubeconfig user to use\n --username string Username for basic authentication to the API server\n -v, --v Level number for the log level verbosity\n --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging (only works for the default text log format)\n --warnings-as-errors Treat warnings received from the server as errors and exit with a non-zero exit code\n
"},{"location":"cli/k0s_kubectl/#see-also","title":"SEE ALSO","text":"Uninstall k0s. Must be run as root (or with sudo)
k0s reset [flags]\n
"},{"location":"cli/k0s_reset/#options","title":"Options","text":" -c, --config string config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n --cri-socket string container runtime socket to use, default to internal containerd. Format: [remote|docker]:[path-to-socket]\n --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for reset\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_reset/#see-also","title":"SEE ALSO","text":"restore k0s state from given backup archive. Use '-' as filename to read from stdin. Must be run as root (or with sudo)
k0s restore filename [flags]\n
"},{"location":"cli/k0s_restore/#options","title":"Options","text":" --config-out string Specify desired name and full path for the restored k0s.yaml file (default: /home/runner/work/k0s/k0s/k0s_<archive timestamp>.yaml\n --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for restore\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_restore/#see-also","title":"SEE ALSO","text":"Start the k0s service configured on this host. Must be run as root (or with sudo)
k0s start [flags]\n
"},{"location":"cli/k0s_start/#options","title":"Options","text":" -h, --help help for start\n
"},{"location":"cli/k0s_start/#see-also","title":"SEE ALSO","text":"Get k0s instance status information
k0s status [flags]\n
"},{"location":"cli/k0s_status/#examples","title":"Examples","text":"The command will return information about system init, PID, k0s role, kubeconfig and similar.\n
"},{"location":"cli/k0s_status/#options","title":"Options","text":" -h, --help help for status\n -o, --out string sets type of output to json or yaml\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n
"},{"location":"cli/k0s_status/#see-also","title":"SEE ALSO","text":"Get k0s instance component status information
k0s status components [flags]\n
"},{"location":"cli/k0s_status_components/#examples","title":"Examples","text":"The command will return information about k0s components.\n
"},{"location":"cli/k0s_status_components/#options","title":"Options","text":" -h, --help help for components\n --max-count int how many latest probes to show (default 1)\n
"},{"location":"cli/k0s_status_components/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -o, --out string sets type of output to json or yaml\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n
"},{"location":"cli/k0s_status_components/#see-also","title":"SEE ALSO","text":"Stop the k0s service configured on this host. Must be run as root (or with sudo)
k0s stop [flags]\n
"},{"location":"cli/k0s_stop/#options","title":"Options","text":" -h, --help help for stop\n
"},{"location":"cli/k0s_stop/#see-also","title":"SEE ALSO","text":"Display system information
"},{"location":"cli/k0s_sysinfo/#synopsis","title":"Synopsis","text":"Runs k0s's pre-flight checks and issues the results to stdout.
k0s sysinfo [flags]\n
"},{"location":"cli/k0s_sysinfo/#options","title":"Options","text":" --controller Include controller-specific sysinfo (default true)\n --data-dir string Data Directory for k0s (default \"/var/lib/k0s\")\n -h, --help help for sysinfo\n --worker Include worker-specific sysinfo (default true)\n
"},{"location":"cli/k0s_sysinfo/#see-also","title":"SEE ALSO","text":"Manage join tokens
"},{"location":"cli/k0s_token/#options","title":"Options","text":" -h, --help help for token\n
"},{"location":"cli/k0s_token/#see-also","title":"SEE ALSO","text":"Create join token
k0s token create [flags]\n
"},{"location":"cli/k0s_token_create/#examples","title":"Examples","text":"k0s token create --role worker --expiry 100h //sets expiration time to 100 hours\nk0s token create --role worker --expiry 10m //sets expiration time to 10 minutes\n
"},{"location":"cli/k0s_token_create/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n --expiry string Expiration time of the token. Format 1.5h, 2h45m or 300ms. (default \"0s\")\n -h, --help help for create\n --role string Either worker or controller (default \"worker\")\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n --wait wait forever (default false)\n
"},{"location":"cli/k0s_token_create/#see-also","title":"SEE ALSO","text":"Invalidates existing join token
k0s token invalidate [flags]\n
"},{"location":"cli/k0s_token_invalidate/#examples","title":"Examples","text":"k0s token invalidate xyz123\n
"},{"location":"cli/k0s_token_invalidate/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for invalidate\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_token_invalidate/#see-also","title":"SEE ALSO","text":"List join tokens
k0s token list [flags]\n
"},{"location":"cli/k0s_token_list/#examples","title":"Examples","text":"k0s token list --role worker // list worker tokens\n
"},{"location":"cli/k0s_token_list/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for list\n --role string Either worker, controller or empty for all roles\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_token_list/#see-also","title":"SEE ALSO","text":"Generates token and secret and stores them as a files
k0s token pre-shared [flags]\n
"},{"location":"cli/k0s_token_pre-shared/#examples","title":"Examples","text":"k0s token pre-shared --role worker --cert <path>/<to>/ca.crt --url https://<controller-ip>:<port>/\n
"},{"location":"cli/k0s_token_pre-shared/#options","title":"Options","text":" --cert string path to the CA certificate file\n --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for pre-shared\n --out string path to the output directory. Default: current dir (default \".\")\n --role string token role. valid values: worker, controller. Default: worker (default \"worker\")\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n --url string url of the api server to join\n --valid duration how long token is valid, in Go duration format\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_token_pre-shared/#see-also","title":"SEE ALSO","text":"Print the k0s version
k0s version [flags]\n
"},{"location":"cli/k0s_version/#options","title":"Options","text":" -a, --all use to print all k0s version info\n -h, --help help for version\n -j, --json use to print all k0s version info in json\n
"},{"location":"cli/k0s_version/#see-also","title":"SEE ALSO","text":"Run worker
k0s worker [join-token] [flags]\n
"},{"location":"cli/k0s_worker/#examples","title":"Examples","text":" Command to add worker node to the master node:\n CLI argument:\n $ k0s worker [token]\n\n or CLI flag:\n $ k0s worker --token-file [path_to_file]\n Note: Token can be passed either as a CLI argument or as a flag\n
"},{"location":"cli/k0s_worker/#options","title":"Options","text":" --api-server string HACK: api-server for the windows worker node\n --cidr-range string HACK: cidr range for the windows worker node (default \"10.96.0.0/12\")\n --cluster-dns string HACK: cluster dns for the windows worker node (default \"10.96.0.10\")\n --cri-socket string container runtime socket to use, default to internal containerd. Format: [remote|docker]:[path-to-socket]\n --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n --enable-cloud-provider Whether or not to enable cloud provider support in kubelet\n -h, --help help for worker\n --ignore-pre-flight-checks continue even if pre-flight checks fail\n --iptables-mode string iptables mode (valid values: nft, legacy, auto). default: auto\n --kubelet-extra-args string extra args for kubelet\n --labels strings Node labels, list of key=value pairs\n -l, --logging stringToString Logging Levels for the different components (default [kube-controller-manager=1,kube-scheduler=1,kubelet=1,kube-proxy=1,etcd=info,containerd=info,konnectivity-server=1,kube-apiserver=1])\n --profile string worker profile to use on the node (default \"default\")\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n --taints strings Node taints, list of key=value:effect strings\n --token-file string Path to the file containing token.\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_worker/#see-also","title":"SEE ALSO","text":"k0s follows the CNCF Code of Conduct.
"},{"location":"contributors/github_workflow/","title":"GitHub Workflow","text":"This guide assumes you have already cloned the upstream repo to your system via git clone
, or via go get github.com/k0sproject/k0s
.
export GITHUB_USER={ your github username }\n
cd $WORKDIR/k0s\ngit remote add $GITHUB_USER git@github.com:${GITHUB_USER}/k0s.git\n\n# Prevent push to Upstream\ngit remote set-url --push origin no_push\n\n# Set your fork remote as a default push target\ngit push --set-upstream $GITHUB_USER main\n
Your remotes should look something like this:
git remote -v\n
origin https://github.com/k0sproject/k0s (fetch)\norigin no_push (push)\nmy_fork git@github.com:{ github_username }/k0s.git (fetch)\nmy_fork git@github.com:{ github_username }/k0s.git (push)\n
"},{"location":"contributors/github_workflow/#create-rebase-your-feature-branch","title":"Create & Rebase Your Feature Branch","text":"Create a feature branch and switch to it:
git checkout -b my_feature_branch\n
Rebase your branch:
git fetch origin && \\\ngit rebase origin/main\n
Current branch my_feature_branch is up to date.\n
Please don't use git pull
instead of the above fetch
/ rebase
. git pull
does a merge, which leaves merge commits. These make the commit history messy and violate the principle that commits ought to be individually understandable and useful.
Commit and sign your changes:
git commit --signoff\n
The commit message should have a short, capitalized title without trailing period as first line. After the title a blank line and then a longer description that explains why the change was made, unless it is obvious.
Use imperative mood in the commit message.
For example:
Summarize changes in around 50 characters or less\n\nMore detailed explanatory text, if necessary. Wrap it to about 72\ncharacters or so. In some contexts, the first line is treated as the\nsubject of the commit and the rest of the text as the body. The\nblank line separating the summary from the body is critical (unless\nyou omit the body entirely); various tools like `log`, `shortlog`\nand `rebase` can get confused if you run the two together.\n\nExplain the problem that this commit is solving. Focus on why you\nare making this change as opposed to how (the code explains that).\nAre there side effects or other unintuitive consequences of this\nchange? Here's the place to explain them.\n\nFurther paragraphs come after blank lines.\n\n - Bullet points are okay, too\n\n - Typically a hyphen or asterisk is used for the bullet, preceded\n by a single space, with blank lines in between.\n\nIf you use an issue tracker, put references to them at the bottom,\nlike this:\n\nFixes: https://github.com/k0sproject/k0s/issues/373\nSee also: #456, #789\n\nSigned-off-by: Name Lastname <user@example.com>\n
You can go back and edit/build/test some more, then commit --amend
in a few cycles.
When ready, push your changes to your fork's repository:
git push --set-upstream my_fork my_feature_branch\n
"},{"location":"contributors/github_workflow/#open-a-pull-request","title":"Open a Pull Request","text":"See GitHub's docs on how to create a pull request from a fork.
"},{"location":"contributors/github_workflow/#get-a-code-review","title":"Get a code review","text":"Once your pull request has been opened it will be assigned to one or more reviewers, and will go through a series of smoke tests.
Commit changes made in response to review comments should be added to the same branch on your fork.
Very small PRs are easy to review. Very large PRs are very difficult to review.
"},{"location":"contributors/github_workflow/#squashing-commits","title":"Squashing Commits","text":"Commits on your branch should represent meaningful milestones or units of work. Small commits that contain typo fixes, rebases, review feedbacks, etc should be squashed.
To do that, it's best to perform an interactive rebase:
"},{"location":"contributors/github_workflow/#example","title":"Example","text":"Rebase your feature branch against upstream main branch:
git rebase -i origin/main\n
If your PR has 3 commits, output would be similar to this:
pick f7f3f6d Changed some code\npick 310154e fixed some typos\npick a5f4a0d made some review changes\n\n# Rebase 710f0f8..a5f4a0d onto 710f0f8\n#\n# Commands:\n# p, pick <commit> = use commit\n# r, reword <commit> = use commit, but edit the commit message\n# e, edit <commit> = use commit, but stop for amending\n# s, squash <commit> = use commit, but meld into previous commit\n# f, fixup <commit> = like \"squash\", but discard this commit's log message\n# x, exec <command> = run command (the rest of the line) using shell\n# b, break = stop here (continue rebase later with 'git rebase --continue')\n# d, drop <commit> = remove commit\n# l, label <label> = label current HEAD with a name\n# t, reset <label> = reset HEAD to a label\n# m, merge [-C <commit> | -c <commit>] <label> [# <oneline>]\n# . create a merge commit using the original merge commit's\n# . message (or the oneline, if no original merge commit was\n# . specified). Use -c <commit> to reword the commit message.\n#\n# These lines can be re-ordered; they are executed from top to bottom.\n#\n# However, if you remove everything, the rebase will be aborted.\n#\n# Note that empty commits are commented out\n
Use a command line text editor to change the word pick
to f
of fixup
for the commits you want to squash, then save your changes and continue the rebase:
Per the output above, you can see that:
fixup <commit> = like \"squash\", but discard this commit's log message\n
Which means that when rebased, the commit message \"fixed some typos\" will be removed, and squashed with the parent commit.
"},{"location":"contributors/github_workflow/#push-your-final-changes","title":"Push Your Final Changes","text":"Once done, you can push the final commits to your branch:
git push --force\n
You can run multiple iteration of rebase
/push -f
, if needed.
Thank you for taking the time to make a contribution to k0s. The following document is a set of guidelines and instructions for contributing to k0s.
When contributing to this repository, please consider first discussing the change you wish to make by opening an issue.
"},{"location":"contributors/overview/#code-of-conduct","title":"Code of Conduct","text":"Our code of conduct can be found in the link below. Please follow it in all your interactions with the project.
We use GitHub flow, so all code changes are tracked via Pull Requests. A detailed guide on the recommended workflow can be found below:
All submitted PRs go through a set of tests and reviews. You can run most of these tests before a PR is submitted. In fact, we recommend it, because it will save on many possible review iterations and automated tests. The testing guidelines can be found here:
By contributing, you agree that your contributions will be licensed as followed:
Some of you might have noticed we have official community blog hosted on Medium. If you are not yet following us, we'd like to invite you to do so now! Make sure to follow us on Twitter as well \ud83d\ude0a
We have also decided to participate in the Lens Forums. As part of our ongoing collaboration with the Lens IDE team, who are not only close friends of the k0s crew but also widely embraced by the Kubernetes user community, it was only natural for us to join forces on their platform. By becoming a part of the Lens Forums, you can easily connect with us through the dedicated k0s categories. Stay in the loop with the latest news, engage in technical discussions, and contribute your expertise and feedback!
"},{"location":"contributors/testing/","title":"Testing Your Code","text":"k0s uses github actions to run automated tests on any PR, before merging. However, a PR will not be reviewed before all tests are green, so to save time and prevent your PR from going stale, it is best to test it before submitting the PR.
"},{"location":"contributors/testing/#run-local-verifications","title":"Run Local Verifications","text":"Please run the following style and formatting commands and fix/check-in any changes:
Linting
We use golangci-lint
for style verification. In the repository's root directory, simply run:
make lint\n
There's no need to install golangci-lint
manually. The build system will take care of that.
Go fmt
go fmt ./...\n
Checking the documentation
Verify any changes to the documentation by following the instructions here.
Pre-submit Flight Checks
In the repository root directory, make sure that:
make build && git diff --exit-code
runs successfully. Verifies that the build is working and that the generated source code matches the one that's checked into source control.make check-unit
runs successfully. Verifies that all the unit tests pass.make check-basic
runs successfully. Verifies basic cluster functionality using one controller and two workers.make check-hacontrolplane
runs successfully. Verifies that joining of controllers works.Please note that this last test is prone to \"flakiness\", so it might fail on occasion. If it fails constantly, take a deeper look at your code to find the source of the problem.
If you find that all tests passed, you may open a pull request upstream.
You may open a pull request in draft mode. All automated tests will still run against the PR, but the PR will not be assigned for review. Once a PR is ready for review, transition it from Draft mode, and code owners will be notified.
"},{"location":"contributors/testing/#conformance-testing","title":"Conformance Testing","text":"Once a PR has been reviewed and all other tests have passed, a code owner will run a full end-to-end conformance test against the PR. This is usually the last step before merging.
"},{"location":"contributors/testing/#pre-requisites-for-pr-merge","title":"Pre-Requisites for PR Merge","text":"In order for a PR to be merged, the following conditions should exist:
--signoff
option.In order to clean up the local workspace, run make clean
. It will clean up all of the intermediate files and directories created during the k0s build. Note that you can't just use git clean -X
or even rm -rf
, since the Go modules cache sets all of its subdirectories to read-only. If you get in trouble while trying to delete your local workspace, try chmod -R u+w /path/to/workspace && rm -rf /path/to/workspace
.
You can configure k0s with the Ambassador API Gateway and a MetalLB service loadbalancer. To do this you leverage Helm's extensible bootstrapping functionality to add the correct extensions to the k0s.yaml
file during cluster configuration.
Note: Currently Ambassador API Gateway does not support Kubernetes v1.22 or above. See here for details.
"},{"location":"examples/ambassador-ingress/#use-docker-for-non-native-k0s-platforms","title":"Use Docker for non-native k0s platforms","text":"With Docker you can run k0s on platforms that the distribution does not natively support (refer to Run k0s in Docker). Skip this section if you are on a platform that k0s natively supports.
As you need to create a custom configuration file to install Ambassador Gateway, you will first need to map that file into the k0s container and to expose the ports Ambassador needs for outside access.
Run k0s under Docker:
docker run -d --name k0s --hostname k0s --privileged -v /var/lib/k0s -p 6443:6443 docker.io/k0sproject/k0s:latest\n
Export the default k0s configuration file:
docker exec k0s k0s config create > k0s.yaml\n
Export the cluster config, so you can access it using kubectl:
docker exec k0s cat /var/lib/k0s/pki/admin.conf > k0s-cluster.conf\nexport KUBECONFIG=\"$KUBECONFIG:$PWD/k0s-cluster.conf\"\n
k0s.yaml
for Ambassador Gateway","text":"Open the k0s.yml
file and append the following extensions at the end:
extensions:\nhelm:\nrepositories:\n- name: datawire\nurl: https://www.getambassador.io\n- name: bitnami\nurl: https://charts.bitnami.com/bitnami\ncharts:\n- name: ambassador\nchartname: datawire/ambassador\nversion: \"6.5.13\"\nnamespace: ambassador\nvalues: |2\nservice:\nexternalIPs:\n- 172.17.0.2\n- name: metallb\nchartname: bitnami/metallb\nversion: \"1.0.1\"\nnamespace: default\nvalues: |2\nconfigInline:\naddress-pools:\n- name: generic-cluster-pool\nprotocol: layer2\naddresses:\n- 172.17.0.2\n
Note: It may be necessary to replace the 172.17.0.2 IP with your local IP address.
This action adds both Ambassador and MetalLB (required for LoadBalancers) with the corresponding repositories and (minimal) configurations. Be aware that the provided example illustrates the use of your local network and that you will want to provide a range of IPs for MetalLB that are addressable on your LAN to access these services from anywhere on your network.
Stop/remove your k0s container:
docker stop k0s\ndocker rm k0s\n
Retart your k0s container, this time with additional ports and the above config file mapped into it:
docker run --name k0s --hostname k0s --privileged -v /var/lib/k0s -v \"$PWD\"/k0s.yaml:/k0s.yaml -p 6443:6443 -p 80:80 -p 443:443 -p 8080:8080 docker.io/k0sproject/k0s:latest\n
After some time, you will be able to list the Ambassador Services:
kubectl get services -n ambassador\n
Output:
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\nambassador-1611224811 LoadBalancer 10.99.84.151 172.17.0.2 80:30327/TCP,443:30355/TCP 2m11s\nambassador-1611224811-admin ClusterIP 10.96.79.130 <none> 8877/TCP 2m11s\nambassador-1611224811-redis ClusterIP 10.110.33.229 <none> 6379/TCP 2m11s\n
Install the Ambassador edgectl tool and run the login command:
edgectl login --namespace=ambassador localhost\n
Your browser will open and deeliver you to the Ambassador Console.
Create a YAML file for the service (for example purposes, create a Swagger Petstore service using a petstore.YAML file):
---\napiVersion: v1\nkind: Service\nmetadata:\nname: petstore\nnamespace: ambassador\nspec:\nports:\n- name: http\nport: 80\ntargetPort: 8080\nselector:\napp: petstore\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: petstore\nnamespace: ambassador\nspec:\nreplicas: 1\nselector:\nmatchLabels:\napp: petstore\nstrategy:\ntype: RollingUpdate\ntemplate:\nmetadata:\nlabels:\napp: petstore\nspec:\ncontainers:\n- name: petstore-backend\nimage: docker.io/swaggerapi/petstore3:unstable\nports:\n- name: http\ncontainerPort: 8080\n---\napiVersion: getambassador.io/v2\nkind: Mapping\nmetadata:\nname: petstore\nnamespace: ambassador\nspec:\nprefix: /petstore/\nservice: petstore\n
Apply the YAML file:
kubectl apply -f petstore.yaml\n
Output:
service/petstore created\ndeployment.apps/petstore created\nmapping.getambassador.io/petstore created\n
Validate that the service is running.
In the terminal using curl:
curl -k 'https://localhost/petstore/api/v3/pet/findByStatus?status=available'\n
Output:
[{\"id\":1,\"category\":{\"id\":2,\"name\":\"Cats\"},\"name\":\"Cat 1\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag1\"},{\"id\":2,\"name\":\"tag2\"}],\"status\":\"available\"},{\"id\":2,\"category\":{\"id\":2,\"name\":\"Cats\"},\"name\":\"Cat 2\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag2\"},{\"id\":2,\"name\":\"tag3\"}],\"status\":\"available\"},{\"id\":4,\"category\":{\"id\":1,\"name\":\"Dogs\"},\"name\":\"Dog 1\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag1\"},{\"id\":2,\"name\":\"tag2\"}],\"status\":\"available\"},{\"id\":7,\"category\":{\"id\":4,\"name\":\"Lions\"},\"name\":\"Lion 1\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag1\"},{\"id\":2,\"name\":\"tag2\"}],\"status\":\"available\"},{\"id\":8,\"category\":{\"id\":4,\"name\":\"Lions\"},\"name\":\"Lion 2\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag2\"},{\"id\":2,\"name\":\"tag3\"}],\"status\":\"available\"},{\"id\":9,\"category\":{\"id\":4,\"name\":\"Lions\"},\"name\":\"Lion 3\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag3\"},{\"id\":2,\"name\":\"tag4\"}],\"status\":\"available\"},{\"id\":10,\"category\":{\"id\":3,\"name\":\"Rabbits\"},\"name\":\"Rabbit 1\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag3\"},{\"id\":2,\"name\":\"tag4\"}],\"status\":\"available\"}]\n
Or by way of your browser:
Open https://localhost/petstore/ in your browser and change the URL in the field at the top of the page to https://localhost/petstore/api/v3/openapi.json (as it is mapped to the /petstore prefix) and click Explore.
Navigate to the Mappings area in the Ambassador Console to view the corresponding PetStore mapping as configured.
Ansible is a popular infrastructure-as-code tool that can use to automate tasks for the purpose of achieving the desired state in a system. With Ansible (and the k0s-Ansible playbook) you can quickly install a multi-node Kubernetes Cluster.
Note: Before using Ansible to create a cluster, you should have a general understanding of Ansible (refer to the official Ansible User Guide.
"},{"location":"examples/ansible-playbook/#prerequisites","title":"Prerequisites","text":"You will require the following tools to install k0s on local virtual machines:
Tool Detailmultipass
A lightweight VM manager that uses KVM on Linux, Hyper-V on Windows, and hypervisor.framework on macOS. Installation information ansible
An infrastructure as code tool. Installation Guide kubectl
Command line tool for running commands against Kubernetes clusters. Kubernetes Install Tools"},{"location":"examples/ansible-playbook/#create-the-cluster","title":"Create the cluster","text":"Download k0s-ansible
Clone the k0s-ansible repository on your local machine:
git clone https://github.com/movd/k0s-ansible.git\ncd k0s-ansible\n
Create virtual machines
Note: Though multipass is the VM manager in use here, there is no interdependence.
Create a number of virtual machines. For the automation to work, each instance must have passwordless SSH access. To achieve this, provision each instance with a cloud-init manifest that imports your current users' public SSH key and into a user k0s
(refer to the bash script below).
This creates 7 virtual machines:
./tools/multipass_create_instances.sh 7\n
Create cloud-init to import ssh key...\n[1/7] Creating instance k0s-1 with multipass...\nLaunched: k0s-1\n[2/7] Creating instance k0s-2 with multipass...\nLaunched: k0s-2\n[3/7] Creating instance k0s-3 with multipass...\nLaunched: k0s-3\n[4/7] Creating instance k0s-4 with multipass...\nLaunched: k0s-4\n[5/7] Creating instance k0s-5 with multipass...\nLaunched: k0s-5\n[6/7] Creating instance k0s-6 with multipass...\nLaunched: k0s-6\n[7/7] Creating instance k0s-7 with multipass...\nLaunched: k0s-7\nName State IPv4 Image\nk0s-1 Running 192.168.64.32 Ubuntu 20.04 LTS\nk0s-2 Running 192.168.64.33 Ubuntu 20.04 LTS\nk0s-3 Running 192.168.64.56 Ubuntu 20.04 LTS\nk0s-4 Running 192.168.64.57 Ubuntu 20.04 LTS\nk0s-5 Running 192.168.64.58 Ubuntu 20.04 LTS\nk0s-6 Running 192.168.64.60 Ubuntu 20.04 LTS\nk0s-7 Running 192.168.64.61 Ubuntu 20.04 LTS\n
Create Ansible inventory
1. Copy the sample to create the inventory directory:
```shell\n cp -rfp inventory/sample inventory/multipass\n ```\n
2. Create the inventory.
Assign the virtual machines to the different host groups, as required by the playbook logic.\n\n | Host group | Detail |\n |:----------------------|:------------------------------------------|\n | `initial_controller` | Must contain a single node that creates the worker and controller tokens needed by the other nodes|\n | `controller` | Can contain nodes that, together with the host from `initial_controller`, form a highly available isolated control plane |\n | `worker` | Must contain at least one node, to allow for the deployment of Kubernetes objects |\n
3. Fill in inventory/multipass/inventory.yml
. This can be done by direct entry using the metadata provided by multipass list,
, or you can use the following Python script multipass_generate_inventory.py
:
```shell\n ./tools/multipass_generate_inventory.py\n ```\n\n ```shell\n Designate first three instances as control plane\n Created Ansible Inventory at: /Users/dev/k0s-ansible/tools/inventory.yml\n $ cp tools/inventory.yml inventory/multipass/inventory.yml\n ```\n\n Your `inventory/multipass/inventory.yml` should resemble the example below:\n\n ```yaml\n ---\n all:\n children:\n initial_controller:\n hosts:\n k0s-1:\n controller:\n hosts:\n k0s-2:\n k0s-3:\n worker:\n hosts:\n k0s-4:\n k0s-5:\n k0s-6:\n k0s-7:\n hosts:\n k0s-1:\n ansible_host: 192.168.64.32\n k0s-2:\n ansible_host: 192.168.64.33\n k0s-3:\n ansible_host: 192.168.64.56\n k0s-4:\n ansible_host: 192.168.64.57\n k0s-5:\n ansible_host: 192.168.64.58\n k0s-6:\n ansible_host: 192.168.64.60\n k0s-7:\n ansible_host: 192.168.64.61\n vars:\n ansible_user: k0s\n ```\n
Test the virtual machine connections
Run the following command to test the connection to your hosts:
ansible -i inventory/multipass/inventory.yml -m ping\n
k0s-4 | SUCCESS => {\n\"ansible_facts\": {\n\"discovered_interpreter_python\": \"/usr/bin/python3\"\n},\n \"changed\": false,\n \"ping\": \"pong\"\n}\n...\n
If the test result indicates success, you can proceed.
Provision the cluster with Ansible
Applying the playbook, k0s download and be set up on all nodes, tokens will be exchanged, and a kubeconfig will be dumped to your local deployment environment.
ansible-playbook site.yml -i inventory/multipass/inventory.yml\n
TASK [k0s/initial_controller : print kubeconfig command] *******************************************************\nTuesday 22 December 2020 17:43:20 +0100 (0:00:00.257) 0:00:41.287 ******\nok: [k0s-1] => {\n\"msg\": \"To use Cluster: export KUBECONFIG=/Users/dev/k0s-ansible/inventory/multipass/artifacts/k0s-kubeconfig.yml\"\n}\n...\nPLAY RECAP *****************************************************************************************************\nk0s-1 : ok=21 changed=11 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0\nk0s-2 : ok=10 changed=5 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0\nk0s-3 : ok=10 changed=5 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0\nk0s-4 : ok=9 changed=5 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0\nk0s-5 : ok=9 changed=5 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0\nk0s-6 : ok=9 changed=5 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0\nk0s-7 : ok=9 changed=5 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0\n\nTuesday 22 December 2020 17:43:36 +0100 (0:00:01.204) 0:00:57.478 ******\n===============================================================================\nprereq : Install apt packages -------------------------------------------------------------------------- 22.70s\nk0s/controller : Wait for k8s apiserver ----------------------------------------------------------------- 4.30s\nk0s/initial_controller : Create worker join token ------------------------------------------------------- 3.38s\nk0s/initial_controller : Wait for k8s apiserver --------------------------------------------------------- 3.36s\ndownload : Download k0s binary k0s-v0.9.0-rc1-amd64 ----------------------------------------------------- 3.11s\nGathering Facts ----------------------------------------------------------------------------------------- 2.85s\nGathering Facts ----------------------------------------------------------------------------------------- 1.95s\nprereq : Create k0s Directories ------------------------------------------------------------------------- 1.53s\nk0s/worker : Enable and check k0s service --------------------------------------------------------------- 1.20s\nprereq : Write the k0s config file ---------------------------------------------------------------------- 1.09s\nk0s/initial_controller : Enable and check k0s service --------------------------------------------------- 0.94s\nk0s/controller : Enable and check k0s service ----------------------------------------------------------- 0.73s\nGathering Facts ----------------------------------------------------------------------------------------- 0.71s\nGathering Facts ----------------------------------------------------------------------------------------- 0.66s\nGathering Facts ----------------------------------------------------------------------------------------- 0.64s\nk0s/worker : Write the k0s token file on worker --------------------------------------------------------- 0.64s\nk0s/worker : Copy k0s service file ---------------------------------------------------------------------- 0.53s\nk0s/controller : Write the k0s token file on controller ------------------------------------------------- 0.41s\nk0s/controller : Copy k0s service file ------------------------------------------------------------------ 0.40s\nk0s/initial_controller : Copy k0s service file ---------------------------------------------------------- 0.36s\n
A kubeconfig was copied to your local machine while the playbook was running which you can use to gain access to your new Kubernetes cluster:
export KUBECONFIG=/Users/dev/k0s-ansible/inventory/multipass/artifacts/k0s-kubeconfig.yml\nkubectl cluster-info\n
Kubernetes control plane is running at https://192.168.64.32:6443\nCoreDNS is running at https://192.168.64.32:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy\nMetrics-server is running at https://192.168.64.32:6443/api/v1/namespaces/kube-system/services/https:metrics-server:/proxy\n\n$ kubectl get nodes -o wide\nNAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME\nk0s-4 Ready <none> 21s v1.20.1-k0s1 192.168.64.57 <none> Ubuntu 20.04.1 LTS 5.4.0-54-generic containerd://1.4.3\nk0s-5 Ready <none> 21s v1.20.1-k0s1 192.168.64.58 <none> Ubuntu 20.04.1 LTS 5.4.0-54-generic containerd://1.4.3\nk0s-6 NotReady <none> 21s v1.20.1-k0s1 192.168.64.60 <none> Ubuntu 20.04.1 LTS 5.4.0-54-generic containerd://1.4.3\nk0s-7 NotReady <none> 21s v1.20.1-k0s1 192.168.64.61 <none> Ubuntu 20.04.1 LTS 5.4.0-54-generic containerd://1.4.3\n
Note: The first three control plane nodes will not display, as the control plane is fully isolated. To check on the distributed etcd cluster, you can use ssh to securely log a controller node, or you can run the following ad-hoc command:
ansible k0s-1 -a \"k0s etcd member-list -c /etc/k0s/k0s.yaml\" -i inventory/multipass/inventory.yml | tail -1 | jq\n
{\n\"level\": \"info\",\n\"members\": {\n\"k0s-1\": \"https://192.168.64.32:2380\",\n\"k0s-2\": \"https://192.168.64.33:2380\",\n\"k0s-3\": \"https://192.168.64.56:2380\"\n},\n\"msg\": \"done\",\n\"time\": \"2020-12-23T00:21:22+01:00\"\n}\n
Once all worker nodes are at Ready
state you can use the cluster. You can test the cluster state by creating a simple nginx deployment.
kubectl create deployment nginx --image=gcr.io/google-containers/nginx --replicas=5\n
deployment.apps/nginx created\n
kubectl expose deployment nginx --target-port=80 --port=8100\n
service/nginx exposed\n
kubectl run hello-k0s --image=quay.io/prometheus/busybox --rm -it --restart=Never --command -- wget -qO- nginx:8100\n
<!DOCTYPE html>\n<html>\n<head>\n<title>Welcome to nginx on Debian!</title>\n...\npod \"hello-k0s\" deleted\n
Note: k0s users are the developers of k0s-ansible. Please send your feedback, bug reports, and pull requests to github.com/movd/k0s-ansible._
"},{"location":"examples/gitops-flux/","title":"Using GitOps with Flux","text":"This tutorial describes the benefits of using GitOps with k0s and provides an example of deploying an application with Flux v2.
GitOps is a practice where you leverage Git as the single source of truth. It offers a declarative way to do Kubernetes cluster management and application delivery. The desired states, using Kubernetes manifests and helm packages, are pulled from a git repository and automatically deployed to the cluster. This also makes it quick to re-deploy and recover applications whenever needed.
"},{"location":"examples/gitops-flux/#why-gitops-with-k0s","title":"Why GitOps with k0s","text":"k0s doesn't come with a lot of different extensions and add-ons that some users might find useful (and some not). Instead, k0s comes with 100% upstream Kubernetes and is compatible with all Kubernetes extensions. This makes it easy for k0s users to freely select the needed extensions that their applications and infrastructure need, without conflicting to any predefined options. Now, GitOps is a perfect practice to deploy these extensions automatically with applications by defining and configuring them directly in Git. This will also help with cluster security as the cluster doesn't need to be accessed directly when application changes are needed. However, this puts more stress on the Git access control, because changes in Git are propagated automatically to the cluster.
"},{"location":"examples/gitops-flux/#install-k0s","title":"Install k0s","text":"Let's start by installing k0s. Any k0s deployment option will do, but to keep things simple, this Quick Start Guide gets you started with a single node k0s cluster.
Run these three commands to download k0s, install and start it:
curl -sSLf https://get.k0s.sh | sudo sh\nsudo k0s install controller --single\nsudo k0s start\n
"},{"location":"examples/gitops-flux/#set-kubeconfig","title":"Set kubeconfig","text":"Next, you need to set the KUBECONFIG variable, which is needed by Flux CLI later on.
sudo k0s kubeconfig admin > kubeconfig\nexport KUBECONFIG=$PWD/kubeconfig\n
"},{"location":"examples/gitops-flux/#install-flux","title":"Install Flux","text":"To proceed with Flux, install the Flux CLI, which is used for configuring Flux to your Kubernetes cluster. For macOS and Linux, this can be done either with brew or bash script. Use one of them:
brew install fluxcd/tap/flux\n
or
curl -s https://fluxcd.io/install.sh | sudo bash\n
For more details of the Flux installation, check the Flux documentation.
"},{"location":"examples/gitops-flux/#configure-flux-for-a-github-repository","title":"Configure Flux for a GitHub repository","text":"Export your GitHub personal access token (instructions how to get it) and username:
export GITHUB_TOKEN=<your-token>\nexport GITHUB_USER=<your-username>\n
Come up with a GitHub repo name (e.g. flux-demo), which will be used by Flux to store (and sync) the config files.
export GITHUB_REPO_NAME=<select-repo-name-to-be-created>\n
Bootstrap flux to your cluster. The GitHub repo will be created automatically by Flux:
flux bootstrap github \\\n--owner=$GITHUB_USER \\\n--repository=$GITHUB_REPO_NAME \\\n--branch=main \\\n--path=./clusters/my-cluster \\\n--personal\n
Now you are all set with Flux and can proceed to deploy your first application.
"},{"location":"examples/gitops-flux/#deploy-example-application","title":"Deploy example application","text":"Next, we'll deploy a simple web application and expose it using a NodePort service. In the previous step, we configured Flux to track the path /clusters/my-cluster/ in your repository. Now clone the repo to your local machine:
git clone git@github.com:$GITHUB_USER/$GITHUB_REPO_NAME.git\ncd $GITHUB_REPO_NAME/clusters/my-cluster/\n
Create the following YAML file (simple-web-server-with-nodeport.yaml) into the same directory:
apiVersion: v1\nkind: Namespace\nmetadata:\nname: web\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: web-server\nnamespace: web\nspec:\nselector:\nmatchLabels:\napp: web\ntemplate:\nmetadata:\nlabels:\napp: web\nspec:\ncontainers:\n- name: httpd\nimage: httpd:2.4.53-alpine\nports:\n- containerPort: 80\n---\napiVersion: v1\nkind: Service\nmetadata:\nname: web-server-service\nnamespace: web\nspec:\ntype: NodePort\nselector:\napp: web\nports:\n- port: 80\ntargetPort: 80\nnodePort: 30003\n
Then push the new file to the repository:
git add .\ngit commit -m \"Add web server manifest\"\ngit push\n
Check that Flux detects your changes and the web server gets applied (by default this should happen within 1 min):
flux get kustomizations\n
If the deployment went successfully, you should see the newly added objects:
sudo k0s kubectl get all -n web\n
You can try to access the web application using
curl localhost:30003\n
or by using a web browser http://localhost:30003.
Voil\u00e0! You have now installed the example application using the GitOps method with Flux. As a next step you can try to modify the web app YAML file or add another application directly in to the Git repo and see how Flux will automatically pick up the changes without accessing the cluster with kubectl.
"},{"location":"examples/gitops-flux/#uninstall-flux","title":"Uninstall Flux","text":"If you want to uninstall Flux from the cluster, run:
flux uninstall --namespace=flux-system\n
Your applications, which were installed by Flux, will remain in the cluster, but you don't have the Flux processes anymore to sync up the desired state from Git.
"},{"location":"examples/metallb-loadbalancer/","title":"Installing MetalLB Load Balancer","text":"This tutorial covers the installation of MetalLB load balancer on k0s. k0s doesn't come with an in-built load balancer, but it's easy to deploy MetalLB as shown in this document.
"},{"location":"examples/metallb-loadbalancer/#about-load-balancers","title":"About Load Balancers","text":"Load balancers can be used for exposing applications to the external network. Load balancer provides a single IP address to route incoming requests to your app. In order to successfully create Kubernetes services of type LoadBalancer, you need to have the load balancer (implementation) available for Kubernetes.
Load balancer can be implemented by a cloud provider as an external service (with additional cost). This can also be implemented internally in the Kubernetes cluster (pure SW solution) with MetalLB.
"},{"location":"examples/metallb-loadbalancer/#metallb","title":"MetalLB","text":"MetalLB implements the Kubernetes service of type LoadBalancer. When a LoadBalancer service is requested, MetalLB allocates an IP address from the configured range and makes the network aware that the IP \u201clives\u201d in the cluster.
One of the benefits of MetalLB is that you avoid all cloud provider dependencies. That's why MetalLB is typically used for bare-metal deployments.
See the MetalLB requirements in the MetalLB's official documentation. By default, k0s runs with Kube-Router CNI, which is compatible with MetalLB as long as you don't use MetalLB\u2019s BGP mode. If you are not using Kube-Router and you are using kube-proxy in IPVS mode, you need to enable strict ARP mode in kube-proxy (see MetalLB preparations):
apiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nmetadata:\nname: k0s\nspec:\nnetwork:\nkubeProxy:\nmode: ipvs\nipvs:\nstrictARP: true\n
Port 7946 (TCP & UDP) must be allowed between the nodes. In addition, before installing MetalLB, make sure there is no other software running on port 7946 on the nodes, such as docker daemon.
"},{"location":"examples/metallb-loadbalancer/#install-metallb","title":"Install MetalLB","text":"Install MetalLB using the official Helm chart and k0s Helm extension manager:
apiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\n metadata:\nname: k0s\nspec:\n extensions:\n helm:\n repositories:\n - name: metallb\n url: https://metallb.github.io/metallb\n charts:\n - name: metallb\n chartname: metallb/metallb\n namespace: metallb\n
Other installation methods are available in the MetalLB's official documentation.
Create ConfigMap for MetalLB
Next you need to create ConfigMap, which includes an IP address range for the load balancer. The pool of IPs must be dedicated to MetalLB's use. You can't reuse for example the Kubernetes node IPs or IPs controlled by other services. You can, however, use private IP addresses, for example 192.168.1.180-192.168.1.199, but then you need to take care of the routing from the external network if you need external access. In this example, we don't need it.
Create a YAML file accordingly, and deploy it: kubectl apply -f metallb-l2-pool.yaml
---\napiVersion: metallb.io/v1beta1\nkind: IPAddressPool\nmetadata:\nname: first-pool\nnamespace: metallb-system\nspec:\naddresses:\n- <ip-address-range-start>-<ip-address-range-stop>\n---\napiVersion: metallb.io/v1beta1\nkind: L2Advertisement\nmetadata:\nname: example\nnamespace: metallb-system\n
Deploy an example application (web server) with a load balancer
apiVersion: v1\nkind: Namespace\nmetadata:\nname: web\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: web-server\nnamespace: web\nspec:\nselector:\nmatchLabels:\napp: web\ntemplate:\nmetadata:\nlabels:\napp: web\nspec:\ncontainers:\n- name: httpd\nimage: httpd:2.4.53-alpine\nports:\n- containerPort: 80\n---\napiVersion: v1\nkind: Service\nmetadata:\nname: web-server-service\nnamespace: web\nspec:\nselector:\napp: web\nports:\n- protocol: TCP\nport: 80\ntargetPort: 80\ntype: LoadBalancer\n
Check your LoadBalancer
Run the following command to see your LoadBalancer with the external-ip and port.
kubectl get service -n web\n
Access your example application
If you used private IP addresses for MetalLB in the ConfigMap (in step 2), you should run the following command from the local network. Use the IP address from the previous step.
curl <EXTERNAL-IP>\n
If you are successful, you should see <html><body><h1>It works!</h1></body></html>
.
For more information about MetalLB installation, take a look at the official MetalLB documentation.
"},{"location":"examples/metallb-loadbalancer/#alternative-examples","title":"Alternative examples","text":"Get load balancer using cloud provider.
"},{"location":"examples/nginx-ingress/","title":"Installing NGINX Ingress Controller","text":"This tutorial covers the installation of NGINX Ingress controller, which is an open source project made by the Kubernetes community. k0s doesn't come with an in-built Ingress controller, but it's easy to deploy NGINX Ingress as shown in this document. Other Ingress solutions can be used as well (see the links at the end of the page).
"},{"location":"examples/nginx-ingress/#nodeport-vs-loadbalancer-vs-ingress-controller","title":"NodePort vs LoadBalancer vs Ingress controller","text":"Kubernetes offers multiple options for exposing services to external networks. The main options are NodePort, LoadBalancer and Ingress controller.
NodePort, as the name says, means that a port on a node is configured to route incoming requests to a certain service. The port range is limited to 30000-32767, so you cannot expose commonly used ports like 80 or 443 with NodePort.
LoadBalancer is a service, which is typically implemented by the cloud provider as an external service (with additional cost). Load balancers can also be installed internally in the Kubernetes cluster with MetalLB, which is typically used for bare-metal deployments. Load balancer provides a single IP address to access your services, which can run on multiple nodes.
Ingress controller helps to consolidate routing rules of multiple applications into one entity. Ingress controller is exposed to an external network with the help of NodePort, LoadBalancer or host network. You can also use Ingress controller to terminate TLS for your domain in one place, instead of terminating TLS for each application separately.
"},{"location":"examples/nginx-ingress/#nginx-ingress-controller","title":"NGINX Ingress Controller","text":"NGINX Ingress Controller is a very popular Ingress for Kubernetes. In many cloud environments, it can be exposed to an external network by using the load balancer offered by the cloud provider. However, cloud load balancers are not necessary. Load balancer can also be implemented with MetalLB, which can be deployed in the same Kubernetes cluster. Another option to expose the Ingress controller to an external network is to use NodePort. The third option is to use host network. All of these alternatives are described in more detail on below, with separate examples.
"},{"location":"examples/nginx-ingress/#install-nginx-using-nodeport","title":"Install NGINX using NodePort","text":"Installing NGINX using NodePort is the most simple example for Ingress Controller as we can avoid the load balancer dependency. NodePort is used for exposing the NGINX Ingress to the external network.
Install NGINX Ingress Controller (using the official manifests by the ingress-nginx project)
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.3/deploy/static/provider/baremetal/deploy.yaml\n
Check that the Ingress controller pods have started
kubectl get pods -n ingress-nginx\n
Check that you can see the NodePort service
kubectl get services -n ingress-nginx\n
From version v1.0.0
of the Ingress-NGINX Controller, a ingressclass object is required.
In the default installation, an ingressclass object named nginx
has already been created.
$ kubectl -n ingress-nginx get ingressclasses\nNAME CONTROLLER PARAMETERS AGE\nnginx k8s.io/ingress-nginx <none> 162m\n
If this is only instance of the Ingresss-NGINX controller, you should add the annotation ingressclass.kubernetes.io/is-default-class
in your ingress class:
kubectl -n ingress-nginx annotate ingressclasses nginx ingressclass.kubernetes.io/is-default-class=\"true\"\n
Try connecting the Ingress controller using the NodePort from the previous step (in the range of 30000-32767)
curl <worker-external-ip>:<node-port>\n
If you don't yet have any backend service configured, you should see \"404 Not Found\" from nginx. This is ok for now. If you see a response from nginx, the Ingress Controller is running and you can reach it.
Deploy a small test application (httpd web server) to verify your Ingress controller.
Create the following YAML file and name it \"simple-web-server-with-ingress.yaml\":
apiVersion: v1\nkind: Namespace\nmetadata:\nname: web\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: web-server\nnamespace: web\nspec:\nselector:\nmatchLabels:\napp: web\ntemplate:\nmetadata:\nlabels:\napp: web\nspec:\ncontainers:\n- name: httpd\nimage: httpd:2.4.53-alpine\nports:\n- containerPort: 80\n---\napiVersion: v1\nkind: Service\nmetadata:\nname: web-server-service\nnamespace: web\nspec:\nselector:\napp: web\nports:\n- protocol: TCP\nport: 5000\ntargetPort: 80\n---\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\nname: web-server-ingress\nnamespace: web\nspec:\ningressClassName: nginx\nrules:\n- host: web.example.com\nhttp:\npaths:\n- path: /\npathType: Prefix\nbackend:\nservice:\nname: web-server-service\nport:\nnumber: 5000\n
Deploy the app:
kubectl apply -f simple-web-server-with-ingress.yaml\n
Verify that you can access your application using the NodePort from step 3.
curl <worker-external-ip>:<node-port> -H 'Host: web.example.com'\n
If you are successful, you should see <html><body><h1>It works!</h1></body></html>
.
In this example you'll install NGINX Ingress controller using LoadBalancer on k0s.
Install LoadBalancer
There are two alternatives to install LoadBalancer on k0s. Follow the links in order to install LoadBalancer.
- MetalLB as a pure SW solution running internally in the k0s cluster - Cloud provider's load balancer running outside of the k0s cluster
Verify LoadBalancer
In order to proceed you need to have a load balancer available for the Kubernetes cluster. To verify that it's available, deploy a simple load balancer service.
apiVersion: v1\nkind: Service\nmetadata:\nname: example-load-balancer\nspec:\nselector:\napp: web\nports:\n- protocol: TCP\nport: 80\ntargetPort: 80\ntype: LoadBalancer\n
kubectl apply -f example-load-balancer.yaml\n
Then run the following command to see your LoadBalancer with an external IP address.
kubectl get service example-load-balancer\n
If the LoadBalancer is not available, you won't get an IP address for EXTERNAL-IP. Instead, it's <pending>
. In this case you should go back to the previous step and check your load balancer availability.
If you are successful, you'll see a real IP address and you can proceed further.
You can delete the example-load-balancer:
kubectl delete -f example-load-balancer.yaml\n
Install NGINX Ingress Controller by following the steps in the previous chapter (step 1 to step 4).
Edit the NGINX Ingress Controller to use LoadBalancer instead of NodePort
kubectl edit service ingress-nginx-controller -n ingress-nginx\n
Find the spec.type field and change it from \"NodePort\" to \"LoadBalancer\".
Check that you can see the ingress-nginx-controller with type LoadBalancer.
kubectl get services -n ingress-nginx\n
Try connecting to the Ingress controller
If you used private IP addresses for MetalLB in step 2, you should run the following command from the local network. Use the IP address from the previous step, column EXTERNAL-IP.
curl <EXTERNAL-IP>\n
If you don't yet have any backend service configured, you should see \"404 Not Found\" from nginx. This is ok for now. If you see a response from nginx, the Ingress Controller is running and you can reach it using LoadBalancer.
Deploy a small test application (httpd web server) to verify your Ingress.
Create the YAML file \"simple-web-server-with-ingress.yaml\" as described in the previous chapter (step 6) and deploy it.
kubectl apply -f simple-web-server-with-ingress.yaml\n
Verify that you can access your application through the LoadBalancer and Ingress controller.
curl <worker-external-ip> -H 'Host: web.example.com'\n
If you are successful, you should see <html><body><h1>It works!</h1></body></html>
.
The host network option exposes Ingress directly using the worker nodes' IP addresses. It also allows you to use ports 80 and 443. This option doesn't use any Service objects (ClusterIP, NodePort, LoadBalancer) and it has the limitation that only one Ingress controller Pod may be scheduled on each cluster node.
Download the official NGINX Ingress Controller manifests:
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.3/deploy/static/provider/baremetal/deploy.yaml\n
Edit deploy.yaml. Find the Deployment ingress-nginx-controller and enable the host network option by adding the hostNetwork line:
spec:\n template:\n spec:\n hostNetwork: true\n
You can also remove the Service ingress-nginx-controller completely, because it won't be needed.
Install Ingress
kubectl apply -f deploy.yaml\n
Try to connect to the Ingress controller, deploy a test application and verify the access. These steps are similar to the previous install methods.
For more information about NGINX Ingress Controller installation, take a look at the official ingress-nginx installation guide and bare-metal considerations.
"},{"location":"examples/nginx-ingress/#alternative-examples-for-ingress-controllers-on-k0s","title":"Alternative examples for Ingress Controllers on k0s","text":"Traefik Ingress
"},{"location":"examples/rook-ceph/","title":"Installing Ceph Storage with Rook","text":"In this tutorial you'll create a Ceph storage for k0s. Ceph is a highly scalable, distributed storage solution. It offers object, block, and file storage, and it's designed to run on any common hardware. Ceph implements data replication into multiple volumes that makes it fault-tolerant. Another clear advantage of Ceph in Kubernetes is the dynamic provisioning. This means that applications just need to request the storage (persistent volume claim) and Ceph will automatically provision the requested storage without a manual creation of the persistent volume each time.
Unfortunately, the Ceph deployment as such can be considered a bit complex. To make the deployment easier, we'll use Rook operator. Rook is a CNCF project and it's dedicated to storage orchestration. Rook supports several storage solutions, but in this tutorial we will use it to manage Ceph.
This tutorial uses three worker nodes and one controller. It's possible to use less nodes, but using three worker nodes makes it a good example for deploying a high-available storage cluster. We use external storage partitions, which are assigned to the worker nodes to be used by Ceph.
After the Ceph deployment we'll deploy a sample application (MongoDB) to use the storage in practice.
"},{"location":"examples/rook-ceph/#prerequisites","title":"Prerequisites","text":"In this example we'll use Terraform to create four Ubuntu VMs on AWS. Using Terraform makes the VM deployment fast and repeatable. You can avoid manually setting up everything in the AWS GUI. Moreover, when you have finished with the tutorial, it's very easy to tear down the VMs with Terraform (with one command). However, you can set up the nodes in many different ways and it doesn't make a difference in the following steps.
We will use k0sctl to create the k0s cluster. k0sctl repo also includes a ready-made Terraform configuration to create the VMs on AWS. We'll use that. Let's start be cloning the k0sctl repo.
git clone git@github.com:k0sproject/k0sctl.git\n
Take a look at the Terraform files
cd k0sctl/examples/aws-tf\nls -l\n
Open variables.tf
and set the number of controller and worker nodes like this:
variable \"cluster_name\" {\ntype = string\ndefault = \"k0sctl\"\n}\n\nvariable \"controller_count\" {\ntype = number\ndefault = 1\n}\n\nvariable \"worker_count\" {\ntype = number\ndefault = 3\n}\n\nvariable \"cluster_flavor\" {\ntype = string\ndefault = \"t3.small\"\n}\n
Open main.tf
to check or modify k0s version near the end of the file.
You can also configure a different name to your cluster and change the default VM type. t3.small
(2 vCPUs, 2 GB RAM) runs just fine for this tutorial.
For AWS, you need an account. Terraform will use the following environment variable: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_SESSION_TOKEN
. You can easily copy-paste them from the AWS portal. For more information, see the AWS documentation.
When the environment variables are set, you can proceed with Terraform and deploy the VMs.
terraform init\nterraform apply\n
If you decide to create the VMs manually using AWS GUI, you need to disable source / destination checking. This needs to be disbled always for multi-node Kubernetes clusters in order to get the node-to-node communication working due to Network Address Translation. For Terraform this is already taken care of in the default configuration.
"},{"location":"examples/rook-ceph/#3-create-and-attach-the-volumes","title":"3. Create and attach the volumes","text":"Ceph requires one of the following storage options for storing the data:
We will be using raw partititions (AWS EBS volumes), which can be easily attached to the worker node VMs. They are automatically detected by Ceph with its default configuration.
Deploy AWS EBS volumes, one for each worker node. You can manually create three EBS volumes (for example 10 GB each) using the AWS GUI and attach those to your worker nodes. Formatting shouldn't be done. Instead, Ceph handles that part automatically.
After you have attached the EBS volumes to the worker nodes, log in to one of the workers and check the available block devices:
lsblk -f\n
NAME FSTYPE LABEL UUID FSAVAIL FSUSE% MOUNTPOINT\nloop0 squashfs 0 100% /snap/amazon-ssm-agent/3552\nloop1 squashfs 0 100% /snap/core18/1997\nloop2 squashfs 0 100% /snap/snapd/11588\nloop3 squashfs 0 100% /snap/lxd/19647\nnvme0n1\n\u2514\u2500nvme0n1p1 ext4 cloudimg-rootfs e8070c31-bfee-4314-a151-d1332dc23486 5.1G 33% /\nnvme1n1\n
The last line (nvme1n1) in this example printout corresponds to the attached EBS volume. Note that it doesn't have any filesystem (FSTYPE is empty). This meets the Ceph storage requirements and you are good to proceed.
"},{"location":"examples/rook-ceph/#4-install-k0s-using-k0sctl","title":"4. Install k0s using k0sctl","text":"You can use terraform to automatically output a config file for k0sctl with the ip addresses and access details.
terraform output -raw k0s_cluster > k0sctl.yaml\n
After that deploying k0s becomes very easy with the ready-made configuration.
k0sctl apply --config k0sctl.yaml\n
It might take around 2-3 minutes for k0sctl to connect each node, install k0s and connect the nodes together to form a cluster.
"},{"location":"examples/rook-ceph/#5-access-k0s-cluster","title":"5. Access k0s cluster","text":"To access your new cluster remotely, you can use k0sctl to fetch kubeconfig and use that with kubectl or Lens.
k0sctl kubeconfig --config k0sctl.yaml > kubeconfig\nexport KUBECONFIG=$PWD/kubeconfig\nkubectl get nodes\n
The other option is to login to your controller node and use the k0s in-built kubectl to access the cluster. Then you don't need to worry about kubeconfig (k0s takes care of that automatically).
ssh -i aws.pem <username>@<ip-address>\nsudo k0s kubectl get nodes\n
"},{"location":"examples/rook-ceph/#6-deploy-rook","title":"6. Deploy Rook","text":"To get started with Rook, let's first clone the Rook GitHub repo:
git clone --single-branch --branch release-1.7 https://github.com/rook/rook.git\ncd rook/cluster/examples/kubernetes/ceph\n
We will use mostly the default Rook configuration. However, k0s kubelet drectory must be configured in operator.yaml
like this
ROOK_CSI_KUBELET_DIR_PATH: \"/var/lib/k0s/kubelet\"\n
To create the resources, which are needed by the Rook\u2019s Ceph operator, run
kubectl apply -f crds.yaml -f common.yaml -f operator.yaml\n
Now you should see the operator running. Check them with
kubectl get pods -n rook-ceph\n
"},{"location":"examples/rook-ceph/#7-deploy-ceph-cluster","title":"7. Deploy Ceph Cluster","text":"Then you can proceed to create a Ceph cluster. Ceph will use the three EBS volumes attached to the worker nodes:
kubectl apply -f cluster.yaml\n
It takes some minutes to prepare the volumes and create the cluster. Once this is completed you should see the following output:
kubectl get pods -n rook-ceph\n
NAME READY STATUS RESTARTS AGE\ncsi-cephfsplugin-nhxc8 3/3 Running 0 2m48s\ncsi-cephfsplugin-provisioner-db45f85f5-ldhjp 6/6 Running 0 2m48s\ncsi-cephfsplugin-provisioner-db45f85f5-sxfm8 6/6 Running 0 2m48s\ncsi-cephfsplugin-tj2bh 3/3 Running 0 2m48s\ncsi-cephfsplugin-z2rrl 3/3 Running 0 2m48s\ncsi-rbdplugin-5q7gq 3/3 Running 0 2m49s\ncsi-rbdplugin-8sfpd 3/3 Running 0 2m49s\ncsi-rbdplugin-f2xdz 3/3 Running 0 2m49s\ncsi-rbdplugin-provisioner-d85cbdb48-g6vck 6/6 Running 0 2m49s\ncsi-rbdplugin-provisioner-d85cbdb48-zpmvr 6/6 Running 0 2m49s\nrook-ceph-crashcollector-ip-172-31-0-76-64cb4c7775-m55x2 1/1 Running 0 45s\nrook-ceph-crashcollector-ip-172-31-13-183-654b46588d-djqsd 1/1 Running 0 2m57s\nrook-ceph-crashcollector-ip-172-31-15-5-67b68698f-gcjb7 1/1 Running 0 2m46s\nrook-ceph-mgr-a-5ffc65c874-8pxgv 1/1 Running 0 58s\nrook-ceph-mon-a-ffcd85c5f-z89tb 1/1 Running 0 2m59s\nrook-ceph-mon-b-fc8f59464-lgczk 1/1 Running 0 2m46s\nrook-ceph-mon-c-69bd87b558-kl4nl 1/1 Running 0 91s\nrook-ceph-operator-54cf7487d4-pl66p 1/1 Running 0 4m57s\nrook-ceph-osd-0-dd4fd8f6-g6s9m 1/1 Running 0 48s\nrook-ceph-osd-1-7c478c49c4-gkqml 1/1 Running 0 47s\nrook-ceph-osd-2-5b887995fd-26492 1/1 Running 0 46s\nrook-ceph-osd-prepare-ip-172-31-0-76-6b5fw 0/1 Completed 0 28s\nrook-ceph-osd-prepare-ip-172-31-13-183-cnkf9 0/1 Completed 0 25s\nrook-ceph-osd-prepare-ip-172-31-15-5-qc6pt 0/1 Completed 0 23s\n
"},{"location":"examples/rook-ceph/#8-configure-ceph-block-storage","title":"8. Configure Ceph block storage","text":"Before Ceph can provide storage to your cluster, you need to create a ReplicaPool and a StorageClass. In this example, we use the default configuration to create the block storage.
kubectl apply -f ./csi/rbd/storageclass.yaml\n
"},{"location":"examples/rook-ceph/#9-request-storage","title":"9. Request storage","text":"Create a new manifest file mongo-pvc.yaml
with the following content:
apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\nname: mongo-pvc\nspec:\nstorageClassName: rook-ceph-block\naccessModes:\n- ReadWriteOnce\nresources:\nrequests:\nstorage: 2Gi\n
This will create Persistent Volume Claim (PVC) to request a 2 GB block storage from Ceph. Provioning will be done dynamically. You can define the block size freely as long as it fits to the available storage size.
kubectl apply -f mongo-pvc.yaml\n
You can now check the status of your PVC:
kubectl get pvc\n
When the PVC gets the requested volume reserved (bound), it should look like this:
kubectl get pvc\n
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE\nmongo-pvc Bound pvc-08337736-65dd-49d2-938c-8197a8871739 2Gi RWO rook-ceph-block 6s\n
"},{"location":"examples/rook-ceph/#10-deploy-an-example-application","title":"10. Deploy an example application","text":"Let's deploy a Mongo database to verify the Ceph storage. Create a new file mongo.yaml
with the following content:
apiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: mongo\nspec:\nselector:\nmatchLabels:\napp: mongo\ntemplate:\nmetadata:\nlabels:\napp: mongo\nspec:\ncontainers:\n- image: mongo:4.0\nname: mongo\nports:\n- containerPort: 27017\nname: mongo\nvolumeMounts:\n- name: mongo-persistent-storage\nmountPath: /data/db\nvolumes:\n- name: mongo-persistent-storage\npersistentVolumeClaim:\nclaimName: mongo-pvc\n
Deploy the database:
kubectl apply -f mongo.yaml\n
"},{"location":"examples/rook-ceph/#11-access-the-application","title":"11. Access the application","text":"Open the MongoDB shell using the mongo pod:
kubectl get pods\n
NAME READY STATUS RESTARTS AGE\nmongo-b87cbd5cc-4wx8t 1/1 Running 0 76s\n
kubectl exec -it mongo-b87cbd5cc-4wx8t -- mongo\n
Create a DB and insert some data:
> use testDB\nswitched to db testDB\n> db.testDB.insertOne( {name: \"abc\", number: 123 })\n{\n \"acknowledged\" : true,\n \"insertedId\" : ObjectId(\"60815690a709d344f83b651d\")\n}\n> db.testDB.insertOne( {name: \"bcd\", number: 234 })\n{\n \"acknowledged\" : true,\n \"insertedId\" : ObjectId(\"6081569da709d344f83b651e\")\n}\n
Read the data:
> db.getCollection(\"testDB\").find()\n{ \"_id\" : ObjectId(\"60815690a709d344f83b651d\"), \"name\" : \"abc\", \"number\" : 123 }\n{ \"_id\" : ObjectId(\"6081569da709d344f83b651e\"), \"name\" : \"bcd\", \"number\" : 234 }\n>\n
You can also try to restart the mongo pod or restart the worker nodes to verity that the storage is persistent.
"},{"location":"examples/rook-ceph/#12-clean-up","title":"12. Clean-up","text":"You can use Terraform to take down the VMs:
terraform destroy\n
Remember to delete the EBS volumes separately.
"},{"location":"examples/rook-ceph/#conclusions","title":"Conclusions","text":"You have now created a replicated Ceph storage for k0s. All you data is stored to multiple disks at the same time so you have a fault-tolerant solution. You also have enabled dynamic provisioning. Your applications can request the available storage without a manual creation of the persistent volumes each time.
This was just one example to deploy distributed storage to k0s cluster using an operator. You can easily use different Kubernetes storage solutions with k0s.
"},{"location":"examples/traefik-ingress/","title":"Installing Traefik Ingress Controller","text":"You can configure k0s with the Traefik ingress controller, a MetalLB service loadbalancer, and deploy the Traefik Dashboard using a service sample. To do this you leverage Helm's extensible bootstrapping functionality to add the correct extensions to the k0s.yaml
file during cluster configuration.
Configure k0s to install Traefik and MetalLB during cluster bootstrapping by adding their Helm charts as extensions in the k0s configuration file (k0s.yaml
).
Note:
A good practice is to have a small range of IP addresses that are addressable on your network, preferably outside the assignment pool your DHCP server allocates (though any valid IP range should work locally on your machine). Providing an addressable range allows you to access your load balancer and Ingress services from anywhere on your local network.
extensions:\nhelm:\nrepositories:\n- name: traefik\nurl: https://traefik.github.io/charts\n- name: bitnami\nurl: https://charts.bitnami.com/bitnami\ncharts:\n- name: traefik\nchartname: traefik/traefik\nversion: \"20.5.3\"\nnamespace: default\n- name: metallb\nchartname: bitnami/metallb\nversion: \"2.5.4\"\nnamespace: default\nvalues: |2\nconfigInline:\naddress-pools:\n- name: generic-cluster-pool\nprotocol: layer2\naddresses:\n- 192.168.0.5-192.168.0.10\n
"},{"location":"examples/traefik-ingress/#2-retrieve-the-load-balancer-ip","title":"2. Retrieve the Load Balancer IP","text":"After you start your cluster, run kubectl get all
to confirm the deployment of Traefik and MetalLB. The command should return a response with the metallb
and traefik
resources, along with a service load balancer that has an assigned EXTERNAL-IP
.
kubectl get all\n
Output:
NAME READY STATUS RESTARTS AGE\npod/metallb-1607085578-controller-864c9757f6-bpx6r 1/1 Running 0 81s\npod/metallb-1607085578-speaker-245c2 1/1 Running 0 60s\npod/traefik-1607085579-77bbc57699-b2f2t 1/1 Running 0 81s\n\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\nservice/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 96s\nservice/traefik-1607085579 LoadBalancer 10.105.119.102 192.168.0.5 80:32153/TCP,443:30791/TCP 84s\n\nNAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE\ndaemonset.apps/metallb-1607085578-speaker 1 1 1 1 1 kubernetes.io/os=linux 87s\n\nNAME READY UP-TO-DATE AVAILABLE AGE\ndeployment.apps/metallb-1607085578-controller 1/1 1 1 87s\ndeployment.apps/traefik-1607085579 1/1 1 1 84s\n\nNAME DESIRED CURRENT READY AGE\nreplicaset.apps/metallb-1607085578-controller-864c9757f6 1 1 1 81s\nreplicaset.apps/traefik-1607085579-77bbc57699 1 1 1 81s\n
Take note of the EXTERNAL-IP
given to the service/traefik-n
load balancer. In this example, 192.168.0.5
has been assigned and can be used to access services via the Ingress proxy:
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\nservice/traefik-1607085579 LoadBalancer 10.105.119.102 192.168.0.5 80:32153/TCP,443:30791/TCP 84s\n
Receiving a 404 response here is normal, as you've not configured any Ingress resources to respond yet:
curl http://192.168.0.5\n
404 page not found\n
"},{"location":"examples/traefik-ingress/#3-deploy-and-access-the-traefik-dashboard","title":"3. Deploy and access the Traefik Dashboard","text":"With an available and addressable load balancer present on your cluster, now you can quickly deploy the Traefik dashboard and access it from anywhere on your LAN (assuming that MetalLB is configured with an addressable range).
Create the Traefik Dashboard IngressRoute in a YAML file:
apiVersion: traefik.containo.us/v1alpha1\nkind: IngressRoute\nmetadata:\nname: dashboard\nspec:\nentryPoints:\n- web\nroutes:\n- match: PathPrefix(`/dashboard`) || PathPrefix(`/api`)\nkind: Rule\nservices:\n- name: api@internal\nkind: TraefikService\n
Deploy the resource:
kubectl apply -f traefik-dashboard.yaml\n
Output:
ingressroute.traefik.containo.us/dashboard created\n
At this point you should be able to access the dashboard using the EXTERNAL-IP
that you noted above by visiting http://192.168.0.5/dashboard/
in your browser:
Create a simple whoami
Deployment, Service, and Ingress manifest:
apiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: whoami-deployment\nspec:\nreplicas: 1\nselector:\nmatchLabels:\napp: whoami\ntemplate:\nmetadata:\nlabels:\napp: whoami\nspec:\ncontainers:\n- name: whoami-container\nimage: containous/whoami\n---\napiVersion: v1\nkind: Service\nmetadata:\nname: whoami-service\nspec:\nports:\n- name: http\ntargetPort: 80\nport: 80\nselector:\napp: whoami\n---\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\nname: whoami-ingress\nspec:\nrules:\n- http:\npaths:\n- path: /whoami\npathType: Exact\nbackend:\nservice:\nname: whoami-service\nport:\nnumber: 80\n
Apply the manifests:
kubectl apply -f whoami.yaml\n
Output:
deployment.apps/whoami-deployment created\nservice/whoami-service created\ningress.networking.k8s.io/whoami-ingress created\n
Test the ingress and service:
curl http://192.168.0.5/whoami\n
Output:
Hostname: whoami-deployment-85bfbd48f-7l77c\nIP: 127.0.0.1\nIP: ::1\nIP: 10.244.214.198\nIP: fe80::b049:f8ff:fe77:3e64\nRemoteAddr: 10.244.214.196:34858\nGET /whoami HTTP/1.1\nHost: 192.168.0.5\nUser-Agent: curl/7.68.0\nAccept: */*\nAccept-Encoding: gzip\nX-Forwarded-For: 192.168.0.82\nX-Forwarded-Host: 192.168.0.5\nX-Forwarded-Port: 80\nX-Forwarded-Proto: http\nX-Forwarded-Server: traefik-1607085579-77bbc57699-b2f2t\nX-Real-Ip: 192.168.0.82\n
With the Traefik Ingress Controller it is possible to use 3rd party tools, such as ngrok, to go further and expose your load balancer to the world. In doing this you enable dynamic certificate provisioning through Let's Encrypt, using either cert-manager or Traefik's own built-in ACME provider.
"},{"location":"examples/oidc/oidc-cluster-configuration/","title":"OpenID Connect integration","text":"Developers use kubectl
to access Kubernetes clusters. By default kubectl
uses a certificate to authenticate to the Kubernetes API. This means that when multiple developers need to access a cluster, the certificate needs to be shared. Sharing the credentials to access a Kubernetes cluster presents a significant security problem. Compromise of the certificate is very easy and the consequences can be catastrophic.
In this tutorial, we walk through how to set up your Kubernetes cluster to add Single Sign-On support for kubectl using OpenID Connect (OIDC).
"},{"location":"examples/oidc/oidc-cluster-configuration/#openid-connect-based-authentication","title":"OpenID Connect based authentication","text":"OpenID Connect can be enabled by modifying k0s configuration (using extraArgs).
"},{"location":"examples/oidc/oidc-cluster-configuration/#configuring-k0s-overview","title":"Configuring k0s: overview","text":"There are list of arguments for the kube-api that allows us to manage OIDC based authentication
Parameter Description Example Required--oidc-issuer-url
URL of the provider which allows the API server to discover public signing keys. Only URLs which use the https://
scheme are accepted. This is typically the provider's discovery URL without a path, for example \"https://accounts.google.com\" or \"https://login.salesforce.com\". This URL should point to the level below .well-known/openid-configuration If the discovery URL is https://accounts.google.com/.well-known/openid-configuration
, the value should be https://accounts.google.com
Yes --oidc-client-id
A client id that all tokens must be issued for. kubernetes Yes --oidc-username-claim
JWT claim to use as the user name. By default sub
, which is expected to be a unique identifier of the end user. Admins can choose other claims, such as email
or name
, depending on their provider. However, claims other than email
will be prefixed with the issuer URL to prevent naming clashes with other plugins. sub No --oidc-username-prefix
Prefix prepended to username claims to prevent clashes with existing names (such as system:
users). For example, the value oidc:
will create usernames like oidc:jane.doe
. If this flag isn't provided and --oidc-username-claim
is a value other than email
the prefix defaults to ( Issuer URL )#
where ( Issuer URL )
is the value of --oidc-issuer-url
. The value -
can be used to disable all prefixing. oidc:
No --oidc-groups-claim
JWT claim to use as the user's group. If the claim is present it must be an array of strings. groups No --oidc-groups-prefix
Prefix prepended to group claims to prevent clashes with existing names (such as system:
groups). For example, the value oidc:
will create group names like oidc:engineering
and oidc:infra
. oidc:
No --oidc-required-claim
A key=value pair that describes a required claim in the ID Token. If set, the claim is verified to be present in the ID Token with a matching value. Repeat this flag to specify multiple claims. claim=value
No --oidc-ca-file
The path to the certificate for the CA that signed your identity provider's web certificate. Defaults to the host's root CAs. /etc/kubernetes/ssl/kc-ca.pem
No To set up bare minimum example we need to use:
You will require:
Please, refer to providers configuration guide or your selected OIDC provider's own documentation (we don't cover all of them in k0s docs).
"},{"location":"examples/oidc/oidc-cluster-configuration/#configuration-example","title":"Configuration example","text":"apiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nspec:\napi:\nextraArgs:\noidc-issuer-url: <issuer-url>\noidc-client-id: <client-id>\noidc-username-claim: email # we use email token claim field as a username\n
Use the configuration as a starting point. Continue with configuration guide for finishing k0s cluster installation.
"},{"location":"examples/oidc/oidc-cluster-configuration/#openid-connect-based-authorisation","title":"OpenID Connect based authorisation","text":"There are two alternative options to implement authorization
"},{"location":"examples/oidc/oidc-cluster-configuration/#provider-based-role-mapping","title":"Provider based role mapping","text":"Please refer to the providers configuration guide. Generally speaking, using the oidc-groups-claim
argument let's you specify which token claim is used a list of RBAC roles for a given user. You still need somehow sync up that data between your OIDC provider and kube-api RBAC system.
To use manual role management for each user you will need to create a role and role-binding for each new user within k0s cluster. The role can be shared for all the users. Role example:
---\nkind: Role\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nnamespace: default\nname: dev-role\nrules:\n- apiGroups: [\"*\"]\nresources: [\"*\"]\nverbs: [\"*\"]\n
RoleBinding example:
kind: RoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nname: dev-role-binding\nsubjects:\n- kind: User\nname: <provider side user id>\nroleRef:\nkind: Role\nname: dev-role\napiGroup: rbac.authorization.k8s.io\n
The provided Role example is an all-inclusive and comprehensive example and should be tuned up to your actual requirements.
"},{"location":"examples/oidc/oidc-cluster-configuration/#kubeconfig-management","title":"kubeconfig management","text":"NB: it's not safe to provide full content of the /var/lib/k0s/pki/admin.conf
to the end-user. Instead, create a user specific kubeconfig with limited permissions.
The authorization side of the kubeconfig management is described in provider specific guides. Use /var/lib/k0s/pki/admin.conf
as a template for cluster specific kubeconfig.
OAuth2 spec Kubernetes authorization system (RBAC) Kubernetes authenticating system
"},{"location":"examples/oidc/oidc-provider-configuration/","title":"Providers","text":"We use Google Cloud as a provider for the sake of the example. Check your vendor documentation in case if you use some other vendor.
"},{"location":"examples/oidc/oidc-provider-configuration/#notes-on-stand-alone-providers","title":"Notes on stand-alone providers","text":"If you are using stand-alone OIDC provider, you might need to specify oidc-ca-file
argument for the kube-api.
We use k8s-oidc-helper tool to create proper kubeconfig user record.
The issuer URL for the Google cloud is https://accounts.google.com
Use the command and follow the instructions:
k8s-oidc-helper --client-id=<CLIENT_ID> \\\n--client-secret=<CLIENT_SECRET> \\\n--write=true\n
"},{"location":"examples/oidc/oidc-provider-configuration/#using-kubelogin","title":"Using kubelogin","text":"For other OIDC providers it is possible to use kubelogin
plugin. Please refer to the setup guide for details.
kubelogin
","text":"kubectl oidc-login setup \\\n--oidc-issuer-url=https://accounts.google.com \\\n--oidc-client-id=<CLIENT_ID> \\\n--oidc-client-secret=<CLIENT_SECRET>\n\n kubectl config set-credentials oidc \\\n--exec-api-version=client.authentication.k8s.io/v1beta1 \\\n--exec-command=kubectl \\\n--exec-arg=oidc-login \\\n--exec-arg=get-token \\\n--exec-arg=--oidc-issuer-url=https://accounts.google.com \\\n--exec-arg=--oidc-client-id=<CLIENT_ID> \\\n--exec-arg=--oidc-client-secret=<CLIENT_SECRET>\n
You can switch the current context to oidc.
kubectl config set-context --current --user=oidc
We use mkdocs and mike for publishing docs to docs.k0sproject.io. This guide will provide a simple how-to on how to configure and deploy newly added docs to our website.
"},{"location":"internal/publishing_docs_using_mkdocs/#requirements","title":"Requirements","text":"Install mike: https://github.com/jimporter/mike#installation
"},{"location":"internal/publishing_docs_using_mkdocs/#adding-a-new-link-to-the-navigation","title":"Adding A New link to the Navigation","text":"docs
directory (I.E., changes to the main README.md
are not reflected in the website).nav
in the main mkdocs.yml file:nav:\n- Overview: README.md\n- Creating A Cluster:\n- Quick Start Guide: create-cluster.md\n- Run in Docker: k0s-in-docker.md\n- Single node set-up: k0s-single-node.md\n- Configuration Reference:\n- Architecture: architecture.md\n- Networking: networking.md\n- Configuration Options: configuration.md\n- Using Cloud Providers: cloud-providers.md\n- Running k0s with Traefik: examples/traefik-ingress.md\n- Running k0s as a service: install.md\n- k0s CLI Help Pages: cli/k0s.md\n- Deploying Manifests: manifests.md\n- FAQ: FAQ.md\n- Troubleshooting: troubleshooting.md\n- Contributing:\n- Overview: contributors/overview.md\n- Workflow: contributors/github_workflow.md\n- Testing: contributors/testing.md\n
main
, the \"Publish Docs\" jos will start running: https://github.com/k0sproject/k0s/actions?query=workflow%3A%22Publish+docs+via+GitHub+Pages%22gh-pages
deployment page: https://github.com/k0sproject/k0s/deployments/activity_log?environment=github-pagesWe've got a dockerized setup for easily testing docs locally. Simply run make docs-serve-dev
. The docs will be available on http://localhost:8000.
Note If you have something already running locally on port 8000
you can choose another port like so: make docs-serve-dev DOCS_DEV_PORT=9999
. The docs will then be available on http://localhost:9999.
k0s bundles Kubernetes manifests for Calico. The manifests are retrieved from the official Calico docs.
As fetching and modifying the entire multi-thousand line file is error-prone, you may follow these steps to upgrade Calico to the latest version:
./get-calico.sh
make bindata-manifests
Note: All manual adjustments should be fairly obvious from the git diff. This section attempts to provide a sanity checklist to go through and make sure we still have those changes applied. The code blocks in this section are our modifications, not the calico originals.
To see the diff without CRDs, you can do something like:
git diff ':!static/manifests/calico/CustomResourceDefinition'\n
That'll make it easier to spot any needed changes.
static/manifests/calico/DaemonSet/calico-node.yaml
:
ipip
to find):{{- if eq .Mode \"ipip\" }}\n# Enable IPIP\n- name: CALICO_IPV4POOL_IPIP\nvalue: {{ .Overlay }}\n# Enable or Disable VXLAN on the default IP pool.\n- name: CALICO_IPV4POOL_VXLAN\nvalue: \"Never\"\n{{- else if eq .Mode \"vxlan\" }}\n# Disable IPIP\n- name: CALICO_IPV4POOL_IPIP\nvalue: \"Never\"\n# Enable VXLAN on the default IP pool.\n- name: CALICO_IPV4POOL_VXLAN\nvalue: {{ .Overlay }}\n- name: FELIX_VXLANPORT\nvalue: \"{{ .VxlanPort }}\"\n- name: FELIX_VXLANVNI\nvalue: \"{{ .VxlanVNI }}\"\n{{- end }}\n
# Auto detect the iptables backend\n- name: FELIX_IPTABLESBACKEND\nvalue: \"auto\"\n
{{- if .EnableWireguard }}\n- name: FELIX_WIREGUARDENABLED\nvalue: \"true\"\n{{- end }}\n
- name: CALICO_IPV4POOL_CIDR\nvalue: \"{{ .ClusterCIDR }}\"\n
# calico-config.yaml\ncalico_backend: \"{{ .Mode }}\"\nveth_mtu: \"{{ .MTU }}\"\n
CLUSTER_TYPE
- name: CLUSTER_TYPE\nvalue: \"k8s\"\n
-bird-ready
and -bird-live
from the readiness and liveness probes respectivelyInstead of hardcoded image names and versions use placeholders to support configuration level settings. Following placeholders are used:
CalicoCNIImage
for calico/cniCalicoNodeImage
for calico/nodeCalicoKubeControllersImage
for calico/kube-controllersAlso, all containers in manifests were modified to have 'imagePullPolicy' field:
imagePullPolicy: {{ .PullPolicy }}\n
Example:
# calico-node.yaml\nimage: {{ .CalicoCNIImage }}\n
"}]}
\ No newline at end of file
+{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"k0s - The Zero Friction Kubernetes","text":"k0s is an open source, all-inclusive Kubernetes distribution, which is configured with all of the features needed to build a Kubernetes cluster. Due to its simple design, flexible deployment options and modest system requirements, k0s is well suited for
k0s drastically reduces the complexity of installing and running a CNCF certified Kubernetes distribution. With k0s new clusters can be bootstrapped in minutes and developer friction is reduced to zero. This allows anyone with no special skills or expertise in Kubernetes to easily get started.
k0s is distributed as a single binary with zero host OS dependencies besides the host OS kernel. It works with any Linux without additional software packages or configuration. Any security vulnerabilities or performance issues can be fixed directly in the k0s distribution that makes it extremely straightforward to keep the clusters up-to-date and secure.
"},{"location":"#what-happened-to-github-stargazers","title":"What happened to Github stargazers?","text":"In September 2022 we made a human error while creating some build automation scripts&tools for the Github repository. Our automation accidentally changed the repo to a private one for few minutes. That itself is not a big deal and everything was restored quickly. But the nasty side effect is that it also removed all the stargazers at that point. :(
Before that mishap we had 4776 stargazers, making k0s one of the most popular Kubernetes distro out there.
**So if you are reading this, and have not yet starred k0s repo we would highly appreciate the :star: to get our numbers closer to what they used to be.
"},{"location":"#key-features","title":"Key Features","text":"Quick Start Guide for creating a full Kubernetes cluster with a single node.
"},{"location":"#demo","title":"Demo","text":""},{"location":"#community-support","title":"Community Support","text":"We welcome your help in building k0s! If you are interested, we invite you to check out the Contributing Guide and the Code of Conduct.
"},{"location":"#commercial-support","title":"Commercial Support","text":"Mirantis offers technical support, professional services and training for k0s. The support subscriptions include, for example, prioritized support (Phone, Web, Email) and access to verified extensions on top of your k0s cluster.
For any k0s inquiries, please contact us via email info@k0sproject.io.
"},{"location":"CODE_OF_CONDUCT/","title":"k0s Community Code Of Conduct","text":"Please refer to our contributor code of conduct.
"},{"location":"FAQ/","title":"Frequently asked questions","text":""},{"location":"FAQ/#how-is-k0s-pronounced","title":"How is k0s pronounced?","text":"kay-zero-ess
"},{"location":"FAQ/#how-do-i-run-a-single-node-cluster","title":"How do I run a single node cluster?","text":"The cluster can be started with:
k0s controller --single\n
See also the Getting Started tutorial.
"},{"location":"FAQ/#how-do-i-connect-to-the-cluster","title":"How do I connect to the cluster?","text":"You find the config in ${DATADIR}/pki/admin.conf
(default: /var/lib/k0s/pki/admin.conf
). Copy this file, and change the localhost
entry to the public ip of the controller. Use the modified config to connect with kubectl:
export KUBECONFIG=/path/to/admin.conf\nkubectl ...\n
"},{"location":"FAQ/#why-doesnt-kubectl-get-nodes-list-the-k0s-controllers","title":"Why doesn't kubectl get nodes
list the k0s controllers?","text":"As a default, the control plane does not run kubelet at all, and will not accept any workloads, so the controller will not show up on the node list in kubectl. If you want your controller to accept workloads and run pods, you do so with: k0s controller --enable-worker
(recommended only as test/dev/POC environments).
Yes, k0sproject is 100% open source. The source code is under Apache 2 and the documentation is under the Creative Commons License. Mirantis, Inc. is the main contributor and sponsor for this OSS project: building all the binaries from upstream, performing necessary security scans and calculating checksums so that it's easy and safe to use. The use of these ready-made binaries are subject to Mirantis EULA and the binaries include only open source software.
"},{"location":"airgap-install/","title":"Airgap install","text":"You can install k0s in an environment with restricted Internet access. Airgap installation requires an image bundle, which contains all the needed container images. There are two options to get the image bundle:
In order to create your own image bundle, you need
ctr
, installed on the worker machine (refer to the ContainerD getting-started guide).k0s/containerd uses OCI (Open Container Initiative) bundles for airgap installation. OCI bundles must be uncompressed. As OCI bundles are built specifically for each architecture, create an OCI bundle that uses the same processor architecture (x86-64, ARM64, ARMv7) as on the target system.
k0s offers two methods for creating OCI bundles, one using Docker and the other using a previously set up k0s worker. Be aware, though, that you cannot use the Docker method for the ARM architectures due to kube-proxy image multiarch manifest problem.
Note: k0s strictly matches image architecture, e.g. arm/v7 images won't work for arm64.
"},{"location":"airgap-install/#docker","title":"Docker","text":"Pull the images.
k0s airgap list-images | xargs -I{} docker pull {}\n
Create a bundle.
docker image save $(k0s airgap list-images | xargs) -o bundle_file\n
As containerd pulls all the images during the k0s worker normal bootstrap, you can use it to build the OCI bundle with images.
Use the following commands on a machine with an installed k0s worker:
ctr --namespace k8s.io \\\n--address /run/k0s/containerd.sock \\\nimages export bundle_file $(k0s airgap list-images | xargs)\n
"},{"location":"airgap-install/#2a-sync-the-bundle-file-with-the-airgapped-machine-locally","title":"2a. Sync the bundle file with the airgapped machine (locally)","text":"Copy the bundle_file
you created in the previous step or downloaded from the releases page to the target machine into the images
directory in the k0s data directory. Copy the bundle only to the worker nodes. Controller nodes don't use it.
# mkdir -p /var/lib/k0s/images\n# cp bundle_file /var/lib/k0s/images/bundle_file\n
"},{"location":"airgap-install/#2b-sync-the-bundle-file-with-the-airgapped-machines-remotely-with-k0sctl","title":"2b. Sync the bundle file with the airgapped machines (remotely with k0sctl)","text":"As an alternative to the previous step, you can use k0sctl to upload the bundle file to the worker nodes. k0sctl can also be used to upload k0s binary file to all nodes. Take a look at this example (k0sctl.yaml) with one controller and one worker node to upload the bundle file and k0s binary:
apiVersion: k0sctl.k0sproject.io/v1beta1\nkind: ClusterConfig\nmetadata:\nname: k0s-cluster\nspec:\nk0s:\nversion: 1.28.1+k0s.0\nhosts:\n- role: controller\nssh:\naddress: <controller-ip-address>\nuser: ubuntu\nkeyPath: /path/.ssh/id_rsa\n\n# uploadBinary: <boolean>\n# When true the k0s binaries are cached and uploaded\n# from the host running k0sctl instead of downloading\n# directly to the target host.\nuploadBinary: true\n\n# k0sBinaryPath: <local filepath>\n# Upload a custom or manually downloaded k0s binary\n# from a local path on the host running k0sctl to the\n# target host.\n# k0sBinaryPath: path/to/k0s_binary/k0s\n\n- role: worker\nssh:\naddress: <worker-ip-address>\nuser: ubuntu\nkeyPath: /path/.ssh/id_rsa\nuploadBinary: true\nfiles:\n# This airgap bundle file will be uploaded from the k0sctl\n# host to the specified directory on the target host\n- src: /local/path/to/bundle-file/airgap-bundle-amd64.tar\ndstDir: /var/lib/k0s/images/\nperm: 0755\n
"},{"location":"airgap-install/#3-ensure-pull-policy-in-the-k0syaml-optional","title":"3. Ensure pull policy in the k0s.yaml (optional)","text":"Use the following k0s.yaml
to ensure that containerd does not pull images for k0s components from the Internet at any time.
apiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nmetadata:\nname: k0s\nspec:\nimages:\ndefault_pull_policy: Never\n
"},{"location":"airgap-install/#4-set-up-the-controller-and-worker-nodes","title":"4. Set up the controller and worker nodes","text":"Refer to the Manual Install for information on setting up the controller and worker nodes locally. Alternatively, you can use k0sctl.
Note: During the worker start up k0s imports all bundles from the $K0S_DATA_DIR/images
before starting kubelet
.
Note: As k0s is a new and dynamic project, the product architecture may occasionally outpace the documentation. The high level concepts and patterns, however, should always apply.
"},{"location":"architecture/#packaging","title":"Packaging","text":"The k0s package is a single, self-extracting binary that embeds Kubernetes binaries, the benefits of which include:
As a single binary, k0s acts as the process supervisor for all other control plane components. As such, there is no container engine or kubelet running on controllers by default, which thus means that a cluster user cannot schedule workloads onto controller nodes.
Using k0s you can create, manage, and configure each of the components, running each as a \"naked\" process. Thus, there is no container engine running on the controller node.
"},{"location":"architecture/#storage","title":"Storage","text":"Kubernetes control plane typically supports only etcd as the datastore. k0s, however, supports many other datastore options in addition to etcd, which it achieves by including kine. Kine allows the use of a wide variety of backend data stores, such as MySQL, PostgreSQL, SQLite, and dqlite (refer to the spec.storage
documentation).
In the case of k0s managed etcd, k0s manages the full lifecycle of the etcd cluster. For example, by joining a new controller node with k0s controller \"long-join-token\"
k0s atomatically adjusts the etcd cluster membership info to allow the new member to join the cluster.
Note: k0s cannot shrink the etcd cluster. As such, to shut down the k0s controller on a node that node must first be manually removed from the etcd cluster.
"},{"location":"architecture/#worker-node","title":"Worker node","text":"As with the control plane, with k0s you can create and manage the core worker components as naked processes on the worker node.
By default, k0s workers use containerd as a high-level runtime and runc as a low-level runtime. Custom runtimes are also supported, refer to Using custom CRI runtime.
"},{"location":"autopilot-multicommand/","title":"Multi-Command Plans","text":"Autopilot relies on a Plan for defining the Commands that should be executed, the Signal Nodes that each should be run on, and the status of each Command.
A Plan:
A Command:
A Signal Node:
The execution of a Plan is the result of processing Commands through a number of Processing States.
When a Plan is executed, each of the Commands are executed in the order of their appearance in the Plan.
The progress and state of each Command is recorded in the Plan status.
1
, and so does its status.The following is an example of a Plan that has been applied as is currently being processed by autopilot.
(line numbers added for commentary below)
1: apiVersion: autopilot.k0sproject.io/v1beta2\n2: kind: Plan\n3: metadata:\n4: annotations:\n5: <omitted>\n6: spec:\n7: commands:\n8: - airgapupdate:\n9: version: v1.28.1+k0s.0\n10: platforms:\n11: linux-amd64:\n12: url: https://github.com/k0sproject/k0s/releases/download/v1.28.1+k0s.0/k0s-airgap-bundle-v1.28.1+k0s.0-amd64\n13: workers:\n14: discovery:\n15: static:\n16: nodes:\n17: - worker0\n18: - k0supdate:\n19: version: v1.28.1+k0s.0\n20: platforms:\n21: linux-amd64:\n22: url: https://github.com/k0sproject/k0s/releases/download/v1.28.1+k0s.0/k0s-v1.28.1+k0s.0-amd64\n23: targets:\n24: controllers:\n25: discovery:\n26: static:\n27: nodes:\n28: - controller0\n29: workers:\n30: discovery:\n31: static:\n32: nodes:\n33: - worker0\n34: id: id123\n35: timestamp: now\n36: status:\n37: commands:\n38: - airgapupdate:\n39: workers:\n40: - lastUpdatedTimestamp: \"2022-05-11T19:13:02Z\"\n41: name: worker0\n42: state: SignalSent\n43: id: 0\n44: state: SchedulableWait\n45: - id: 1\n46: k0supdate:\n47: controllers:\n48: - lastUpdatedTimestamp: \"2022-05-11T19:13:02Z\"\n49: name: controller0\n50: state: SignalPending\n51: workers:\n52: - lastUpdatedTimestamp: \"2022-05-11T19:13:02Z\"\n53: name: worker0\n54: state: SignalPending\n55: state: SchedulableWait\n56: state: SchedulableWait\n
airgapupdate
and k0supdate
.The state of this Plan exerpt is that autopilot has successfully processed the Plan, and has begun processing the airgapupdate
Command. Its status indicates SignalSent which means that the Signal Node has been sent signaling information to perform an airgap update.
The following are the various states that both Plan
s and Command
s adhere to.
stateDiagram-v2\n [*]-->NewPlan\n NewPlan-->SchedulableWait\n NewPlan-->Errors***\n\n SchedulableWait-->Schedulable\n SchedulableWait-->Completed\n Schedulable-->SchedulableWait\n\n Errors***-->[*]\n Completed-->[*]
Note that the Errors state is elaborated in detail below in Error States*.
"},{"location":"autopilot-multicommand/#newplan","title":"NewPlan","text":"When a Plan is created with the name autopilot
, the NewPlan state processing takes effect.
It is the responsibility of NewPlan to ensure that the status of all the Commands are represented in the Plan status. This Plan status is needed at later points in Plan processing to determine if the entire Plan is completed.
The main difference between NewPlan and all the other states is that NewPlan will iterate over all commands; the other states deal with the active command.
"},{"location":"autopilot-multicommand/#schedulablewait","title":"SchedulableWait","text":"Used to evaluate a Command to determine if it can be scheduled for processing. If the Command is determined that it can be processed, the state is set to Schedulable.
"},{"location":"autopilot-multicommand/#schedulable","title":"Schedulable","text":"The Schedulable state is set by SchedulableWait to indicate that this command should execute. The execution of a Command in this state will be whichever logic is defined by the Command.
The ending of this state should either transition to SchedulableWait for further processing + completion detection, or transition to an error.
"},{"location":"autopilot-multicommand/#completed","title":"Completed","text":"The Completed state indicates that the command has finished processing. Once a plan/command are in the Completed state, no further processing will occur on this plan/command.
"},{"location":"autopilot-multicommand/#error-states","title":"Error States","text":"When a plan or command processing goes into one of the designated error states, this is considered fatal and the plan/command processing will terminate.
Error states are generally defined by the Command implementation. The core autopilot functionality is only interested when in the 4 core states (NewPlan, SchedulableWait, Schedulable, Completed), and treats all other states as an error.
flowchart TD\n Errors --> InconsistentTargets\n Errors --> IncompleteTargets\n Errors --> Restricted\n Errors --> MissingPlatform\n Errors --> MissingSignalNode
Error State Command States Description InconsistentTargets k0supdate
Schedulable Indicates that a Signal Node probe has failed for any node that was previously discovered during NewPlan. IncompleteTargets airgapupdate
, k0supdate
NewPlan, Schedulable Indicates that a Signal Node that existed during the discover phase in NewPlan no longer exists (ie. no ControlNode
or Node
object) Restricted airgapupdate
, k0supdate
NewPlan Indicates that a Plan has requested an update of a Signal Node type that contradicts the startup exclusions (the --exclude-from-plans
argument) MissingSignalNode airgapupdate
, k0supdate
Schedulable Indicates that a Signal Node that existed during the discover phase in NewPlan no longer exists (ie. no matching ControlNode
or Node
object)"},{"location":"autopilot-multicommand/#sequence-example","title":"Sequence: Example","text":"Using the example above as a reference, this outlines the basic sequence of events of state transitions to the operations performed on each object.
sequenceDiagram\n PlanStateHandler->>+AirgapUpdateCommand: State: NewPlan\n AirgapUpdateCommand->>-AirgapUpdateCommand: cmd.NewPlan() -- >SchedulableWait\n PlanStateHandler->>+K0sUpdateCommand: State: NewPlan\n K0sUpdateCommand->>-K0sUpdateCommand: cmd.NewPlan() --> SchedulableWait\n Note over PlanStateHandler,SignalNode(worker0): NewPlan Finished / All Commands\n\n PlanStateHandler->>+AirgapUpdateCommand: State: SchedulableWait\n AirgapUpdateCommand->>-AirgapUpdateCommand: cmd.SchedulableWait() --> Schedulable\n PlanStateHandler->>+AirgapUpdateCommand: State: Schedulable\n AirgapUpdateCommand->>-SignalNode(worker0): signal_v2(airgap-data) --> SchedulableWait\n PlanStateHandler->>+AirgapUpdateCommand: State: SchedulableWait\n AirgapUpdateCommand->>-AirgapUpdateCommand: cmd.SchedulableWait() --> Completed\n Note over PlanStateHandler,SignalNode(worker0): AirgapUpdate Finished / worker0\n\n PlanStateHandler->>+K0sUpdateCommand: State: SchedulableWait\n K0sUpdateCommand->>-K0sUpdateCommand: cmd.SchedulableWait() --> Schedulable\n PlanStateHandler->>+K0sUpdateCommand: State: Schedulable\n K0sUpdateCommand->>-SignalNode(controller0): signal_v2(k0s-data) --> SchedulableWait\n PlanStateHandler->>+K0sUpdateCommand: State: SchedulableWait\n K0sUpdateCommand->>-K0sUpdateCommand: cmd.SchedulableWait() --> Completed\n Note over PlanStateHandler,SignalNode(controller0): K0sUpdate Finished / controller0\n\n PlanStateHandler->>+K0sUpdateCommand: State: SchedulableWait\n K0sUpdateCommand->>-K0sUpdateCommand: cmd.SchedulableWait() --> Schedulable\n PlanStateHandler->>+K0sUpdateCommand: State: Schedulable\n K0sUpdateCommand->>-SignalNode(worker0): signal_v2(k0s-data) --> SchedulableWait\n PlanStateHandler->>+K0sUpdateCommand: State: SchedulableWait\n K0sUpdateCommand->>-K0sUpdateCommand: cmd.SchedulableWait() --> Completed\n Note over PlanStateHandler,SignalNode(worker0): K0sUpdate Finished / worker0\n\n PlanStateHandler->>PlanStateHandler: Completed
"},{"location":"autopilot/","title":"Autopilot","text":"A tool for updating your k0s
controller and worker nodes using specialized plans. There is a public update-server hosted on the same domain as the documentation site. See the example below on how to use it. There is only a single channel edge_release
available. The channel exposes the latest released version.
Plan
YAMLk0s
, URLs for platforms, etc)Plan
Plan
is a simple kubectl apply
operation.Plan
provides a status that details the progress.To enable automatic updates, create an UpdateConfig
object:
apiVersion: autopilot.k0sproject.io/v1beta2\nkind: UpdateConfig\nmetadata:\nname: example\nnamespace: default\nspec:\nchannel: edge_release\nupdateServer: https://docs.k0sproject.io/\nupgradeStrategy:\ncron: \"0 12 * * TUE,WED\" # Check for updates at 12:00 on Tuesday and Wednesday.\n
"},{"location":"autopilot/#safeguards","title":"Safeguards","text":"There are a number of safeguards in place to avoid breaking a cluster.
"},{"location":"autopilot/#stateless-component","title":"Stateless Component","text":"Plan
is applied that has both controller and worker nodes, all of the controller nodes will be updated first. It is only when all controllers have updated successfully that worker nodes will receive their update instructions.Plan
, autopilot evaluates all of the controllers and workers that should be included into the Plan
, and tracks them in the status. After this point, no additional changes to the plan (other than status) will be recognized.selector
discovery method no longer exist by the time the update is ready to be scheduled./ready
/ready
will the current controller get sent update signaling.Plan
transitions into an InconsistentTargets
state, and the Plan
execution ends.update
object payload can provide an optional sha256
hash of the update content (specified in url
), which is compared against the update content after it downloads.Autopilot relies on a Plan
object on its instructions on what to update.
Here is an arbitrary Autopilot plan:
apiVersion: autopilot.k0sproject.io/v1beta2\nkind: Plan\nmetadata:\nname: autopilot\n\nspec:\nid: id1234\ntimestamp: now\n\ncommands:\n- k0supdate:\nversion: v1.28.1+k0s.0\nplatforms:\nlinux-amd64:\nurl: https://github.com/k0sproject/k0s/releases/download/v1.28.1+k0s.0/k0s-v1.28.1+k0s.0-amd64\nsha256: '0000000000000000000000000000000000000000000000000000000000000000'\ntargets:\ncontrollers:\ndiscovery:\nstatic:\nnodes:\n- ip-172-31-44-131\n- ip-172-31-42-134\n- ip-172-31-39-65\nworkers:\nlimits:\nconcurrent: 5\ndiscovery:\nselector:\nlabels: environment=staging\nfields: metadata.name=worker2\n
"},{"location":"autopilot/#core-fields","title":"Core Fields","text":""},{"location":"autopilot/#apiversion-string-required","title":"apiVersion <string> (required)
","text":"v1beta2
, with a full group-version of autopilot.k0sproject.io/v1beta2
metadata.name <string> (required)
","text":"autopilot
spec.id <string> (optional)
","text":"spec.timestamp <string> (optional)
","text":"spec.commands[] (required)
","text":"commands
contains all of the commands that should be performed as a part of the plan.k0supdate
Command","text":""},{"location":"autopilot/#speccommandsk0supdateversion-string-required","title":"spec.commands[].k0supdate.version <string> (required)
","text":"spec.commands[].k0supdate.platforms.*.url <string> (required)
","text":"$GOOS
and $GOARCH
, separated by a hyphen (-
)linux-amd64
, linux-arm64
, linux-arm
linux
. Autopilot may work on other platforms, however this has not been tested.spec.commands[].k0supdate.platforms.*.sha256 <string> (optional)
","text":"spec.commands[].k0supdate.targets.controllers <object> (optional)
","text":"controllers
should be updated.spec.commands[].k0supdate.targets.controllers.limits.concurrent <int> (fixed as 1)
","text":"1
.spec.commands[].k0supdate.targets.workers <object> (optional)
","text":"workers
should be updated.spec.commands[].k0supdate.targets.workers.limits.concurrent <int> (optional, default = 1)
","text":"concurrent
value for worker targets will allow for that number of workers to be updated at a time. If no value is provided, 1
is assumed.airgapupdate
Command","text":""},{"location":"autopilot/#speccommandsairgapupdateversion-string-required","title":"spec.commands[].airgapupdate.version <string> (required)
","text":"spec.commands[].airgapupdate.platforms.*.url <string> (required)
","text":"$GOOS
and $GOARCH
, separated by a hyphen (-
)linux-amd64
, linux-arm64
, linux-arm
linux
. Autopilot may work on other platforms, however this has not been tested.spec.commands[].airgapupdate.platforms.*.sha256 <string> (optional)
","text":"spec.commands[].airgapupdate.targets.workers <object> (optional)
","text":"workers
should be updated.spec.commands[].airgapupdate.targets.workers.limits.concurrent <int> (optional, default = 1)
","text":"concurrent
value for worker targets will allow for that number of workers to be updated at a time. If no value is provided, 1
is assumed.This defines the static
discovery method used for this set of targets (controllers
, workers
). The static
discovery method relies on a fixed set of hostnames defined in .nodes
.
It is expected that a Node
(workers) or ControlNode
(controllers) object exists with the same name.
static:\nnodes:\n- ip-172-31-44-131\n- ip-172-31-42-134\n- ip-172-31-39-65\n
"},{"location":"autopilot/#speccommandsk0supdatetargetsdiscoverystaticnodes-string-required-for-static","title":"spec.commands[].k0supdate.targets.*.discovery.static.nodes[] <string> (required for static)
","text":"controllers
, workers
).The selector
target discovery method relies on a dynamic query to the Kubernetes API using labels and fields to produce a set of hosts that should be updated.
Providing both labels
and fields
in the selector
definition will result in a logical AND
of both operands.
selector:\nlabels: environment=staging\nfields: metadata.name=worker2\n
Specifying an empty selector will result in all nodes being selected for this target set.
selector: {}\n
"},{"location":"autopilot/#speccommandsk0supdatetargetsdiscoveryselectorlabels-string-optional","title":"spec.commands[].k0supdate.targets.*.discovery.selector.labels <string> (optional)
","text":"spec.commands[].k0supdate.targets.*.discovery.selector.fields <string> (optional)
","text":"metadata.name
is available as a query field.After a Plan
has been applied, its progress can be viewed in the .status
of the autopilot
Plan.
kubectl get plan autopilot -oyaml\n
An example of a Plan
status:
status:\nstate: SchedulableWait\ncommands:\n- state: SchedulableWait\nk0supdate:\ncontrollers:\n- lastUpdatedTimestamp: \"2022-04-07T15:52:44Z\"\nname: controller0\nstate: SignalCompleted\n- lastUpdatedTimestamp: \"2022-04-07T15:52:24Z\"\nname: controller1\nstate: SignalCompleted\n- lastUpdatedTimestamp: \"2022-04-07T15:52:24Z\"\nname: controller2\nstate: SignalPending\nworkers:\n- lastUpdatedTimestamp: \"2022-04-07T15:52:24Z\"\nname: worker0\nstate: SignalPending\n- lastUpdatedTimestamp: \"2022-04-07T15:52:24Z\"\nname: worker1\nstate: SignalPending\n- lastUpdatedTimestamp: \"2022-04-07T15:52:24Z\"\nname: worker2\nstate: SignalPending\n
To read this status, this indicates that:
SchedulableWait
, meaning that autopilot is waiting for the next opportunity to process a command.SignalCompleted
successfullySignalPending
)SignalPending
)The Plan
status at .status.status
represents the overall status of the autopilot update operation. There are a number of statuses available:
IncompleteTargets
There are nodes in the resolved Plan
that do not have associated Node
(worker) or ControlNode
(controller) objects. Yes InconsistentTargets
A controller has reported itself as not-ready during the selection of the next controller to update. Yes Schedulable
Indicates that the Plan
can be re-evaluated to determine which next node to update. No SchedulableWait
Scheduling operations are in progress, and no further update scheduling should occur. No Completed
The Plan
has run successfully to completion. Yes Restricted
The Plan
included node types (controller or worker) that violates the --exclude-from-plans
restrictions. Yes"},{"location":"autopilot/#node-status","title":"Node Status","text":"Similar to the Plan Status, the individual nodes can have their own statuses:
Status DescriptionSignalPending
The node is available and awaiting an update signal SignalSent
Update signaling has been successfully applied to this node. MissingPlatform
This node is a platform that an update has not been provided for. MissingSignalNode
This node does have an associated Node
(worker) or ControlNode
(controller) object."},{"location":"autopilot/#updateconfig","title":"UpdateConfig","text":""},{"location":"autopilot/#updateconfig-core-fields","title":"UpdateConfig Core Fields","text":""},{"location":"autopilot/#apiversion-string-required-field","title":"apiVersion <string> (required field)
","text":"v1beta2
, with a full group-version of autopilot.k0sproject.io/v1beta2
metadata.name <string> (required field)
","text":"spec.channel <string> (optional)
","text":"stable
(default), unstable
.spec.updateServer <string> (optional)
","text":"spec.upgradeStrategy.cron <string> (optional)
","text":"spec.planSpec <string> (optional)
","text":"Plan
apiVersion: autopilot.k0sproject.io/v1beta2\nkind: UpdaterConfig\nmetadata:\nname: example\nspec:\nchannel: stable\nupdateServer: https://updates.k0sproject.io/\nupgradeStrategy:\ncron: \"0 12 * * TUE,WED\" # Check for updates at 12:00 on Tuesday and Wednesday.\n# Optional. Specifies a created Plan object\nplanSpec:\ncommands:\n- k0supdate: # optional\nforceupdate: true # optional\ntargets:\ncontrollers:\ndiscovery:\nstatic:\nnodes:\n- ip-172-31-44-131\n- ip-172-31-42-134\n- ip-172-31-39-65\nworkers:\nlimits:\nconcurrent: 5\ndiscovery:\nselector:\nlabels: environment=staging\nfields: metadata.name=worker2\nairgapupdate: # optional\nworkers:\nlimits:\nconcurrent: 5\ndiscovery:\nselector:\nlabels: environment=staging\nfields: metadata.name=worker2\n
"},{"location":"autopilot/#faq","title":"FAQ","text":""},{"location":"autopilot/#q-how-do-i-apply-the-plan-and-controlnode-crds","title":"Q: How do I apply the Plan
and ControlNode
CRDs?","text":"A: These CRD definitions are embedded in the autopilot binary and applied on startup. No additional action is needed.
"},{"location":"autopilot/#q-how-will-controlnode-instances-get-removed","title":"Q: How willControlNode
instances get removed?","text":"A: ControlNode
instances are created by autopilot controllers as they startup. When controllers disappear, they will not remove their associated ControlNode
instance. It is the responsibility of the operator/administrator to ensure their maintenance.
You probably upgraded your workers to an API version greater than what is available on the API server.
https://kubernetes.io/releases/version-skew-policy/
Make sure that your controllers are at the desired version first before upgrading workers.
"},{"location":"backup/","title":"Backup/Restore overview","text":"k0s has integrated support for backing up cluster state and configuration. The k0s backup utility is aiming to back up and restore k0s managed parts of the cluster.
The backups created by k0s backup
command have following pieces of your cluster:
<data-dir>/pki
directory)<data-dir>/manifests
<data-dir>/images
Parts NOT covered by the backup utility:
<data-dir>/manifests
)Any of the backup/restore related operations MUST be performed on the controller node.
"},{"location":"backup/#backuprestore-a-k0s-node-locally","title":"Backup/restore a k0s node locally","text":""},{"location":"backup/#backup-local","title":"Backup (local)","text":"To create backup run the following command on the controller node:
k0s backup --save-path=<directory>\n
The directory used for the save-path
value must exist and be writable. The default value is the current working directory. The command provides backup archive using following naming convention: k0s_backup_<ISODatetimeString>.tar.gz
Because of the DateTime usage, it is guaranteed that none of the previously created archives would be overwritten.
To output the backup archive to stdout, use -
as the save path.
To restore cluster state from the archive use the following command on the controller node:
k0s restore /tmp/k0s_backup_2021-04-26T19_51_57_000Z.tar.gz\n
The command would fail if the data directory for the current controller has overlapping data with the backup archive content.
The command would use the archived k0s.yaml
as the cluster configuration description.
In case if your cluster is HA, after restoring single controller node, join the rest of the controller nodes to the cluster. E.g. steps for N nodes cluster would be:
To read the backup archive from stdin, use -
as the file path.
By using -
as the save or restore path, it is possible to pipe the backup archive through an encryption utility such as GnuPG or OpenSSL.
Note that unencrypted data will still briefly exist as temporary files on the local file system during the backup archvive generation.
"},{"location":"backup/#encrypting-backups-using-gnupg","title":"Encrypting backups using GnuPG","text":"Follow the instructions for your operating system to install the gpg
command if it is not already installed.
This tutorial only covers the bare minimum for example purposes. For secure key management practices and advanced usage refer to the GnuPG user manual.
To generate a new key-pair, use:
gpg --gen-key\n
The key will be stored in your key ring.
gpg --list-keys\n
This will output a list of keys:
/home/user/.gnupg/pubring.gpg\n------------------------------\npub 4096R/BD33228F 2022-01-13\nuid Example User <user@example.com>\nsub 4096R/2F78C251 2022-01-13\n
To export the private key for decrypting the backup on another host, note the key ID (\"BD33228F\" in this example) in the list and use:
gpg --export-secret-keys --armor BD33228F > k0s.key\n
To create an encrypted k0s backup:
k0s backup --save-path - | gpg --encrypt --recipient user@example.com > backup.tar.gz.gpg\n
"},{"location":"backup/#restoring-encrypted-backups-using-gnupg","title":"Restoring encrypted backups using GnuPG","text":"You must have the private key in your gpg keychain. To import the key that was exported in the previous example, use:
gpg --import k0s.key\n
To restore the encrypted backup, use:
gpg --decrypt backup.tar.gz.gpg | k0s restore -\n
"},{"location":"backup/#backuprestore-a-k0s-cluster-using-k0sctl","title":"Backup/restore a k0s cluster using k0sctl","text":"With k0sctl you can perform cluster level backup and restore remotely with one command.
"},{"location":"backup/#backup-remote","title":"Backup (remote)","text":"To create backup run the following command:
k0sctl backup\n
k0sctl connects to the cluster nodes to create a backup. The backup file is stored in the current working directory.
"},{"location":"backup/#restore-remote","title":"Restore (remote)","text":"To restore cluster state from the archive use the following command:
k0sctl apply --restore-from /path/to/backup_file.tar.gz\n
The control plane load balancer address (externalAddress) needs to remain the same between backup and restore. This is caused by the fact that all worker node components connect to this address and cannot currently be re-configured.
"},{"location":"cis_benchmark/","title":"Kube-bench Security Benchmark","text":"Kube-bench is an open source tool which can be used to verify security best practices as defined in CIS Kubernetes Benchmark. It provides a number of tests to help harden your k0s clusters. By default, k0s will pass Kube-bench benchmarks with some exceptions, which are shown below.
"},{"location":"cis_benchmark/#run","title":"Run","text":"Follow the Kube-bench quick start instructions.
After installing the Kube-bench on the host that is running k0s
cluster run the following command:
kube-bench run --config-dir docs/kube-bench/cfg/ --benchmark k0s-1.0\n
"},{"location":"cis_benchmark/#summary-of-disabled-checks","title":"Summary of disabled checks","text":""},{"location":"cis_benchmark/#master-node-security-configuration","title":"Master Node Security Configuration","text":"The current configuration has in total 8 master checks disabled:
id: 1.2.10 - EventRateLimit requires external yaml config. It is left for the users to configure it
type: skip\ntext: \"Ensure that the admission control plugin EventRateLimit is set (Manual)\"\n
id: 1.2.12 - By default this isn't passed to the apiserver for air-gap functionality
type: skip\ntext: \"Ensure that the admission control plugin AlwaysPullImages is set (Manual)\"\n
id: 1.2.22 - For sake of simplicity of k0s all audit configurations are skipped. It is left for the users to configure it
type: skip\ntext: \"Ensure that the --audit-log-path argument is set (Automated)\"\n
id: 1.2.23 - For sake of simplicity of k0s all audit configuration are skipped. It is left for the users to configure it
type: skip\ntext: \"Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)\"\n
id: 1.2.24 - For sake of simplicity of k0s all audit configurations are skipped. It is left for the users to configure it
type: skip\ntext: \"Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)\"\n
id: 1.2.25 - For sake of simplicity of k0s all audit configurations are skipped. It is left for the users to configure it
type: skip\ntext: \"Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)\"\n
id: 1.2.33 - By default it is not enabled. Left for the users to decide
type: skip\ntext: \"Ensure that the --encryption-provider-config argument is set as appropriate (Manual)\"\n
id: 1.2.34 - By default it is not enabled. Left for the users to decide
type: skip\ntext: \"Ensure that encryption providers are appropriately configured (Manual)\"\n
and 4 node checks disabled:
id: 4.1.1 - not applicable since k0s does not use kubelet service file
type: skip\ntext: \"Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)\"\n
id: 4.1.2 - not applicable since k0s does not use kubelet service file
type: skip\ntext: \"Ensure that the kubelet service file ownership is set to root:root (Automated)\"\n
id: 4.2.6 - k0s does not set this. See https://github.com/kubernetes/kubernetes/issues/66693
type: skip\ntext: \"Ensure that the --protect-kernel-defaults argument is set to true (Automated)\"\n
id: 4.2.10 - k0s doesn't set this up because certs get auto rotated
type: skip\ntext: \"Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)\"\n
3 checks for the control plane:
id: 3.1.1 - For purpose of being fully automated k0s is skipping this check
type: skip\ntext: \"Client certificate authentication should not be used for users (Manual)\"\n
id: 3.2.1 - out-of-the box configuration does not have any audit policy configuration but users can customize it in spec.api.extraArgs section of the config
type: skip\ntext: \"Ensure that a minimal audit policy is created (Manual)\"\n
id: 3.2.2 - Same as previous
type: skip\ntext: \"Ensure that the audit policy covers key security concerns (Manual)\"\n
Policy checks are also disabled. The checks are manual and are up to the end user to decide on them.
"},{"location":"cloud-providers/","title":"Cloud providers","text":"k0s builds Kubernetes components in providerless mode, meaning that cloud providers are not built into k0s-managed Kubernetes components. As such, you must externally configure the cloud providers to enable their support in your k0s cluster (for more information on running Kubernetes with cloud providers, refer to the Kubernetes documentation.
"},{"location":"cloud-providers/#external-cloud-providers","title":"External Cloud Providers","text":""},{"location":"cloud-providers/#enable-cloud-provider-support-in-kubelet","title":"Enable cloud provider support in kubelet","text":"Even when all components are built with providerless mode, you must be able to enable cloud provider mode for kubelet. To do this, run the workers with --enable-cloud-provider=true
.
When deploying with k0sctl, you can add this into the installFlags
of worker hosts.
spec:\nhosts:\n- ssh:\naddress: 10.0.0.1\nuser: root\nkeyPath: ~/.ssh/id_rsa\ninstallFlags:\n- --enable-cloud-provider\n- --kubelet-extra-args=\"--cloud-provider=external\"\nrole: worker\n
"},{"location":"cloud-providers/#deploy-the-cloud-provider","title":"Deploy the cloud provider","text":"The easiest way to deploy cloud provider controllers is on the k0s cluster.
Use the built-in manifest deployer built into k0s to deploy your cloud provider as a k0s-managed stack. Next, just drop all required manifests into the /var/lib/k0s/manifests/aws/
directory, and k0s will handle the deployment.
Note: The prerequisites for the various cloud providers can vary (for example, several require that configuration files be present on all of the nodes). Refer to your chosen cloud provider's documentation as necessary.
"},{"location":"cloud-providers/#k0s-cloud-provider","title":"k0s Cloud Provider","text":"Alternatively, k0s provides its own lightweight cloud provider that can be used to statically assign ExternalIP
values to worker nodes via Kubernetes annotations. This is beneficial for those who need to expose worker nodes externally via static IP assignments.
To enable this functionality, add the parameter --enable-k0s-cloud-provider=true
to all controllers, and --enable-cloud-provider=true
to all workers.
Adding a static IP address to a node using kubectl
:
kubectl annotate \\\nnode <node> \\\nk0sproject.io/node-ip-external=<external IP>\n
Both IPv4 and IPv6 addresses are supported.
"},{"location":"cloud-providers/#defaults","title":"Defaults","text":"The default node refresh interval is 2m
, which can be overridden using the --k0s-cloud-provider-update-frequency=<duration>
parameter when launching the controller(s).
The default port that the cloud provider binds to can be overridden using the --k0s-cloud-provider-port=<int>
parameter when launching the controller(s).
Commercial support for k0s if offered by Mirantis Inc..
Mirantis can provide various different levels of support starting from DevCare (9-to-5) all the way to OpsCare+ with fully managed service.
On top of our normal release and support model our commercial customers have access to critical security patches even for released versions that fall outside of the Open Source maintained releases.1 Commercial support also includes support for k0s related tooling such as k0sctl.
If you are interested in commercial support for k0s check out our support description and please contact us for further details.
This is assuming there is a compatible release of upstream project with the fix\u00a0\u21a9
k0s command-line interface has the ability to validate config syntax:
k0s validate config --config path/to/config/file\n
validate config
sub-command can validate the following:
k0s can be installed without a config file. In that case the default configuration will be used. You can, though, create and run your own non-default configuration (used by the k0s controller nodes).
k0s supports providing only partial configurations. In case of partial configuration is provided, k0s will use the defaults for any missing values.
Generate a yaml config file that uses the default settings.
mkdir -p /etc/k0s\nk0s config create > /etc/k0s/k0s.yaml\n
Modify the new yaml config file according to your needs, refer to Configuration file reference below. You can remove the default values if wanted as k0s supports partial configs too.
Install k0s with your new config file.
sudo k0s install controller -c /etc/k0s/k0s.yaml\n
If you need to modify your existing configuration later on, you can change your config file also when k0s is running, but remember to restart k0s to apply your configuration changes.
sudo k0s stop\nsudo k0s start\n
k0sctl can deploy your configuration options at cluster creation time. Your options should be placed in the spec.k0s.config
section of the k0sctl's configuration file. See the section on how to install k0s via k0sctl and the k0sctl README for more information.
CAUTION: As many of the available options affect items deep in the stack, you should fully understand the correlation between the configuration file components and your specific environment before making any changes.
A YAML config file follows, with defaults as generated by the k0s config create
command:
apiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nmetadata:\nname: k0s\nspec:\napi:\naddress: 192.168.68.104\nexternalAddress: my-lb-address.example.com\nk0sApiPort: 9443\nport: 6443\nsans:\n- 192.168.68.104\ncontrollerManager: {}\nextensions:\nhelm:\nconcurrencyLevel: 5\ncharts: null\nrepositories: null\nstorage:\ncreate_default_storage_class: false\ntype: external_storage\ninstallConfig:\nusers:\netcdUser: etcd\nkineUser: kube-apiserver\nkonnectivityUser: konnectivity-server\nkubeAPIserverUser: kube-apiserver\nkubeSchedulerUser: kube-scheduler\nkonnectivity:\nadminPort: 8133\nagentPort: 8132\nnetwork:\ncalico: null\nclusterDomain: cluster.local\ndualStack: {}\nkubeProxy:\nmetricsBindAddress: 0.0.0.0:10249\nmode: iptables\nkuberouter:\nautoMTU: true\nhairpin: Enabled\nipMasq: false\nmetricsPort: 8080\nmtu: 0\npeerRouterASNs: \"\"\npeerRouterIPs: \"\"\nnodeLocalLoadBalancing:\nenabled: false\nenvoyProxy:\napiServerBindPort: 7443\nimage:\nimage: docker.io/envoyproxy/envoy-distroless\nversion: v1.24.1\nkonnectivityServerBindPort: 7132\ntype: EnvoyProxy\npodCIDR: 10.244.0.0/16\nprovider: kuberouter\nserviceCIDR: 10.96.0.0/12\nscheduler: {}\nstorage:\netcd:\nexternalCluster: null\npeerAddress: 192.168.68.104\ntype: etcd\ntelemetry:\nenabled: true\nfeatureGates:\n- name: feature_XXX\nenabled: true\ncomponents: [\"kubelet\", \"kube-api\", \"kube-scheduler\"]\n- name: feature_YYY\nenabled: true\n-\nname: feature_ZZZ\nenabled: false\n
"},{"location":"configuration/#spec-key-detail","title":"spec
Key Detail","text":""},{"location":"configuration/#specapi","title":"spec.api
","text":"Element Description externalAddress
The loadbalancer address (for k0s controllers running behind a loadbalancer). Configures all cluster components to connect to this address and also configures this address for use when joining new nodes to the cluster. address
Local address on which to bind an API. Also serves as one of the addresses pushed on the k0s create service certificate on the API. Defaults to first non-local address found on the node. sans
List of additional addresses to push to API servers serving the certificate. extraArgs
Map of key-values (strings) for any extra arguments to pass down to Kubernetes api-server process. port
\u00b9 Custom port for kube-api server to listen on (default: 6443) k0sApiPort
\u00b9 Custom port for k0s-api server to listen on (default: 9443) \u00b9 If port
and k0sApiPort
are used with the externalAddress
element, the loadbalancer serving at externalAddress
must listen on the same ports.
spec.storage
","text":"Element Description type
Type of the data store (valid values:etcd
or kine
). Note: Type etcd
will cause k0s to create and manage an elastic etcd cluster within the controller nodes. etcd.peerAddress
Node address used for etcd cluster peering. etcd.extraArgs
Map of key-values (strings) for any extra arguments to pass down to etcd process. kine.dataSource
kine datasource URL."},{"location":"configuration/#specnetwork","title":"spec.network
","text":"Element Description provider
Network provider (valid values: calico
, kuberouter
, or custom
). For custom
, you can push any network provider (default: kuberouter
). Be aware that it is your responsibility to configure all of the CNI-related setups, including the CNI provider itself and all necessary host levels setups (for example, CNI binaries). Note: Once you initialize the cluster with a network provider the only way to change providers is through a full cluster redeployment. podCIDR
Pod network CIDR to use in the cluster. serviceCIDR
Network CIDR to use for cluster VIP services. clusterDomain
Cluster Domain to be passed to the kubelet and the coredns configuration."},{"location":"configuration/#specnetworkcalico","title":"spec.network.calico
","text":"Element Description mode
vxlan
(default), ipip
or bird
overlay
Overlay mode: Always
(default), CrossSubnet
or Never
(requires mode=vxlan
to disable calico overlay-network). vxlanPort
The UDP port for VXLAN (default: 4789
). vxlanVNI
The virtual network ID for VXLAN (default: 4096
). mtu
MTU for overlay network (default: 0
, which causes Calico to detect optimal MTU during bootstrap). wireguard
Enable wireguard-based encryption (default: false
). Your host system must be wireguard ready (refer to the Calico documentation for details). flexVolumeDriverPath
The host path for Calicos flex-volume-driver(default: /usr/libexec/k0s/kubelet-plugins/volume/exec/nodeagent~uds
). Change this path only if the default path is unwriteable (refer to Project Calico Issue #2712 for details). Ideally, you will pair this option with a custom volumePluginDir
in the profile you use for your worker nodes. ipAutodetectionMethod
Use to force Calico to pick up the interface for pod network inter-node routing (default: \"\"
, meaning not set, so that Calico will instead use its defaults). For more information, refer to the Calico documentation. envVars
Map of key-values (strings) for any calico-node environment variable."},{"location":"configuration/#specnetworkcalicoenvvars","title":"spec.network.calico.envVars
","text":"Environment variable's value must be string, e.g.:
spec:\nnetwork:\nprovider: calico\ncalico:\nenvVars:\nTEST_BOOL_VAR: \"true\"\nTEST_INT_VAR: \"42\"\nTEST_STRING_VAR: test\n
K0s runs Calico with some predefined vars, which can be overwritten by setting new value in spec.network.calico.envVars
:
CALICO_IPV4POOL_CIDR: \"{{ spec.network.podCIDR }}\"\nCALICO_DISABLE_FILE_LOGGING: \"true\"\nFELIX_DEFAULTENDPOINTTOHOSTACTION: \"ACCEPT\"\nFELIX_LOGSEVERITYSCREEN: \"info\"\nFELIX_HEALTHENABLED: \"true\"\nFELIX_PROMETHEUSMETRICSENABLED: \"true\"\nFELIX_FEATUREDETECTOVERRIDE: \"ChecksumOffloadBroken=true\"\n
FELIX_FEATUREDETECTOVERRIDE: ChecksumOffloadBroken=true
disables VXLAN offloading because of projectcalico/calico#4727.
In SingleStack mode there are additional vars:
FELIX_IPV6SUPPORT: \"false\"\n
In DualStack mode there are additional vars:
CALICO_IPV6POOL_NAT_OUTGOING: \"true\"\nFELIX_IPV6SUPPORT: \"true\"\nIP6: \"autodetect\"\nCALICO_IPV6POOL_CIDR: \"{{ spec.network.dualStack.IPv6podCIDR }}\"\n
"},{"location":"configuration/#specnetworkkuberouter","title":"spec.network.kuberouter
","text":"Element Description autoMTU
Autodetection of used MTU (default: true
). mtu
Override MTU setting, if autoMTU
must be set to false
). metricsPort
Kube-router metrics server port. Set to 0 to disable metrics (default: 8080
). peerRouterIPs
Comma-separated list of global peer addresses. peerRouterASNs
Comma-separated list of global peer ASNs. hairpin
Hairpin mode, supported modes Enabled
: enabled cluster wide, Allowed
: must be allowed per service using annotations, Disabled
: doesn't work at all (default: Enabled) hairpinMode
Deprecated Use hairpin
instead. If both hairpin
and hairpinMode
are defined, this is ignored. If only hairpinMode is configured explicitly activates hairpinMode (https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode). ipMasq
IP masquerade for traffic originating from the pod network, and destined outside of it (default: false) Note: Kube-router allows many networking aspects to be configured per node, service, and pod (for more information, refer to the Kube-router user guide).
"},{"location":"configuration/#specnetworkkubeproxy","title":"spec.network.kubeProxy
","text":"Element Description disabled
Disable kube-proxy altogether (default: false
). mode
Kube proxy operating mode, supported modes iptables
, ipvs
, userspace
(default: iptables
) iptables
Kube proxy iptables settings ipvs
Kube proxy ipvs settings nodePortAddresses
Kube proxy nodePortAddresses Default kube-proxy iptables settings:
iptables:\nmasqueradeAll: false\nmasqueradeBit: null\nminSyncPeriod: 0s\nsyncPeriod: 0s\n
Default kube-proxy ipvs settings:
ipvs:\nexcludeCIDRs: null\nminSyncPeriod: 0s\nscheduler: \"\"\nstrictARP: false\nsyncPeriod: 0s\ntcpFinTimeout: 0s\ntcpTimeout: 0s\nudpTimeout: 0s\n
"},{"location":"configuration/#specnetworknodelocalloadbalancing","title":"spec.network.nodeLocalLoadBalancing
","text":"Configuration options related to k0s's node-local load balancing feature.
Note: This feature is experimental! Expect instabilities and/or breaking changes.
Element Descriptionenabled
Indicates if node-local load balancing should be used to access Kubernetes API servers from worker nodes. Default: false
. type
The type of the node-local load balancer to deploy on worker nodes. Default: EnvoyProxy
. (This is the only option for now.) envoyProxy
Configuration options related to the \"EnvoyProxy\" type of load balancing."},{"location":"configuration/#specnetworknodelocalloadbalancingenvoyproxy","title":"spec.network.nodeLocalLoadBalancing.envoyProxy
","text":"Configuration options required for using Envoy as the backing implementation for node-local load balancing.
Note: This type of load balancing is not supported on ARMv7 workers.
Element Descriptionimage
The OCI image that's being used for the Envoy Pod. imagePullPolicy
The pull policy being used used for the Envoy Pod. Defaults to spec.images.default_pull_policy
if omitted. apiServerBindPort
Port number on which to bind the Envoy load balancer for the Kubernetes API server to on a worker's loopback interface. Default: 7443
. konnectivityServerBindPort
Port number on which to bind the Envoy load balancer for the konnectivity server to on a worker's loopback interface. Default: 7132
."},{"location":"configuration/#speccontrollermanager","title":"spec.controllerManager
","text":"Element Description extraArgs
Map of key-values (strings) for any extra arguments you want to pass down to the Kubernetes controller manager process."},{"location":"configuration/#specscheduler","title":"spec.scheduler
","text":"Element Description extraArgs
Map of key-values (strings) for any extra arguments you want to pass down to Kubernetes scheduler process."},{"location":"configuration/#specworkerprofiles","title":"spec.workerProfiles
","text":"Worker profiles are used to manage worker-specific configuration in a centralized manner. A ConfigMap is generated for each worker profile. Based on the --profile
argument given to the k0s worker
, the configuration in the corresponding ConfigMap is is picked up during startup.
The worker profiles are defined as an array. Each element has following properties:
Property Descriptionname
String; name to use as profile selector for the worker process values
Object; Kubelet configuration overrides, see below for details"},{"location":"configuration/#specworkerprofilesvalues-kubelet-configuration-overrides","title":"spec.workerProfiles[].values
(Kubelet configuration overrides)","text":"The Kubelet configuration overrides of a profile override the defaults defined by k0s.
Note that there are several fields that cannot be overridden:
clusterDNS
clusterDomain
apiVersion
kind
staticPodURL
spec.featureGates
","text":"Available components are:
If components
are omitted, propagates to all kube components.
Modifies extraArgs.
"},{"location":"configuration/#example","title":"Example","text":"spec:\nfeatureGates:\n- name: feature-gate-0\nenabled: true\ncomponents: [\"kube-apiserver\", \"kube-controller-manager\", \"kubelet\", \"kube-scheduler\"]\n- name: feature-gate-1\nenabled: true\n- name: feature-gate-2\nenabled: false\n
"},{"location":"configuration/#kubelet-feature-gates-example","title":"Kubelet feature gates example","text":"The below is an example of a k0s config with feature gates enabled:
spec:\nfeatureGates:\n- name: DevicePlugins\nenabled: true\ncomponents: [\"kubelet\"]\n- name: Accelerators\nenabled: true\ncomponents: [\"kubelet\"]\n- name: AllowExtTrafficLocalEndpoints\nenabled: false\n
"},{"location":"configuration/#configuration-examples","title":"Configuration examples","text":""},{"location":"configuration/#custom-volumeplugindir","title":"Custom volumePluginDir","text":"spec:\nworkerProfiles:\n- name: custom-pluginDir\nvalues:\nvolumePluginDir: /var/libexec/k0s/kubelet-plugins/volume/exec\n
"},{"location":"configuration/#eviction-policy","title":"Eviction Policy","text":"spec:\nworkerProfiles:\n- name: custom-eviction\nvalues:\nevictionHard:\nmemory.available: \"500Mi\"\nnodefs.available: \"1Gi\"\nimagefs.available: \"100Gi\"\nevictionMinimumReclaim:\nmemory.available: \"0Mi\"\nnodefs.available: \"500Mi\"\nimagefs.available: \"2Gi\"\n
"},{"location":"configuration/#unsafe-sysctls","title":"Unsafe Sysctls","text":"spec:\nworkerProfiles:\n- name: custom-eviction\nvalues:\nallowedUnsafeSysctls:\n- fs.inotify.max_user_instances\n
"},{"location":"configuration/#specimages","title":"spec.images
","text":"Nodes under the images
key all have the same basic structure:
spec:\nimages:\ncoredns:\nimage: quay.io/coredns/coredns\nversion: v1.7.0\n
If you want the list of default images and their versions to be included, use k0s config create --include-images
.
spec.images.konnectivity
spec.images.metricsserver
spec.images.kubeproxy
spec.images.coredns
spec.images.pause
spec.images.calico.cni
spec.images.calico.flexvolume
spec.images.calico.node
spec.images.calico.kubecontrollers
spec.images.kuberouter.cni
spec.images.kuberouter.cniInstaller
spec.images.repository
\u00b9\u00b9 If spec.images.repository
is set and not empty, every image will be pulled from images.repository
If spec.images.default_pull_policy
is set and not empty, it will be used as a pull policy for each bundled image.
images:\nrepository: \"my.own.repo\"\nkonnectivity:\nimage: calico/kube-controllers\nversion: v3.16.2\nmetricsserver:\nimage: registry.k8s.io/metrics-server/metrics-server\nversion: v0.6.4\n
In the runtime the image names are calculated as my.own.repo/calico/kube-controllers:v3.16.2
and my.own.repo/metrics-server/metrics-server:v0.6.4
. This only affects the the imgages pull location, and thus omitting an image specification here will not disable component deployment.
spec.extensions.helm
","text":"spec.extensions.helm
is the config file key in which you configure the list of Helm repositories and charts to deploy during cluster bootstrap (for more information, refer to Helm Charts).
spec.extensions.storage
","text":"spec.extensions.storage
controls bundled storage provider. The default value external
makes no storage deployed.
To enable embedded host-local storage provider use the following configuration:
spec:\nextensions:\nstorage:\ntype: openebs_local_storage\n
"},{"location":"configuration/#speckonnectivity","title":"spec.konnectivity
","text":"The spec.konnectivity
key is the config file key in which you configure Konnectivity-related settings.
agentPort
agent port to listen on (default 8132)adminPort
admin port to listen on (default 8133)spec.telemetry
","text":"To improve the end-user experience k0s is configured by defaul to collect telemetry data from clusters and send it to the k0s development team. To disable the telemetry function, change the enabled
setting to false
.
The telemetry interval is ten minutes.
spec:\ntelemetry:\nenabled: true\n
"},{"location":"configuration/#disabling-controller-components","title":"Disabling controller components","text":"k0s allows to completely disable some of the system components. This allows users to build a minimal Kubernetes control plane and use what ever components they need to fulfill their need for the control plane. Disabling the system components happens through a command line flag for the controller process:
--disable-components strings disable components (valid items: api-config,autopilot,control-api,coredns,csr-approver,endpoint-reconciler,helm,konnectivity-server,kube-controller-manager,kube-proxy,kube-scheduler,metrics-server,network-provider,node-role,system-rbac,worker-config)\n
Note: As of k0s 1.26, the kubelet-config component has been replaced by the worker-config component. k0s will issue a warning when the old component name is being used. It is scheduled for removal in k0s 1.27. Please update to the new component name.
If you use k0sctl, just add the flag when installing the cluster for the first controller at spec.hosts.installFlags
in the config file like e.g.:
spec:\nhosts:\n- role: controller\ninstallFlags:\n- --disable-components=metrics-server\n
As seen from the component list, the only always-on component is the Kubernetes api-server, without that k0s serves no purpose.
"},{"location":"conformance-testing/","title":"Kubernetes conformance testing for k0s","text":"We run the conformance testing for the last RC build for a release. Follow the instructions as the conformance testing repository.
In a nutshell, you need to:
sonobuoy run --mode=certified-conformance
See runtime.
"},{"location":"custom-ca/","title":"Install using custom CA certificates and SA key pair","text":"k0s generates all needed certificates automatically in the <data-dir>/pki
directory (/var/lib/k0s/pki
, by default).
But sometimes there is a need to have the CA certificates and SA key pair in advance. To make it work, just put files to the <data-dir>/pki
and <data-dir>/pki/etcd
:
export LIFETIME=365\nmkdir -p /var/lib/k0s/pki/etcd\ncd /var/lib/k0s/pki\nopenssl genrsa -out ca.key 2048\nopenssl req -x509 -new -nodes -key ca.key -sha256 -days $LIFETIME -out ca.crt -subj \"/CN=Custom CA\"\nopenssl genrsa -out sa.key 2048\nopenssl rsa -in sa.key -outform PEM -pubout -out sa.pub\ncd ./etcd\nopenssl genrsa -out ca.key 2048\nopenssl req -x509 -new -nodes -key ca.key -sha256 -days $LIFETIME -out ca.crt -subj \"/CN=Custom CA\"\n
Then you can install k0s as usual.
"},{"location":"custom-ca/#pre-generated-tokens","title":"Pre-generated tokens","text":"It's possible to get join in advance without having a running cluster.
k0s token pre-shared --role worker --cert /var/lib/k0s/pki/ca.crt --url https://<controller-ip>:6443/\n
The command above generates a join token and a Secret. A Secret should be deployed to the cluster to authorize the token. For example, you can put the Secret under the manifest directory and it will be deployed automatically.
"},{"location":"custom-cri-runtime/","title":"Custom cri runtime","text":"See runtime.
"},{"location":"dockershim/","title":"Dockershim deprecation - what does it mean for K0s?","text":"Back in December 2020, Kubernetes have announced the deprecation of the dockershim from version 1.24 onwards. As a consequence, k0s 1.24 and above don't support the dockershim as well.
"},{"location":"dockershim/#what-is-dockershim-and-why-was-it-deprecated","title":"What is dockershim and why was it deprecated?","text":"The dockershim is a transparent library that intercepts API calls to the kubernetes API and handles their operation in the Docker API. Early versions of Kubernetes used this shim in order to allow containers to run over docker. Later versions of Kubernetes started creating containers via the CRI (Container Runtime Interface). Since CRI has become the de-facto default runtime for Kubernetes, maintaining the dockershim turned into a heavy burden for Kubernetes maintainers, and so the decision to deprecate the built-in dockershim support came into being.
"},{"location":"dockershim/#so-whats-going-to-happen-to-dockershim","title":"So what's going to happen to dockershim?","text":"Dockershim is not gone. It's only changed ownership. Mirantis has agreed to maintain dockershim (now called cri-dockerd). See: The Future of Dockershim is cri-dockerd.
From Kubernetes version 1.24 you will have the built-in possibility to run containers via CRI, but if you want to continue using docker, you are free to do so, using cri-dockerd.
In order to continue to use the Docker engine with Kubernetes v1.24+, you will have to migrated all worker nodes to use cri-dockerd.
"},{"location":"dockershim/#migrating-to-cri-dockerd","title":"Migrating to CRI-Dockerd","text":"This migration guide assumes that you've been running k0s with docker on version 1.23 and below.
The following steps will need to be done on ALL k0s' worker nodes, or single-node controllers. Basically any node that runs containers will need to be migrated using the process detailed below.
Please note that there are currently some pitfalls around container metrics when using CRI-dockerd.
"},{"location":"dockershim/#cordon-and-drain-the-node","title":"Cordon and drain the node","text":"Get a list of all nodes (k0s is still version 1.23, which already includes the docker-shim):
sudo k0s kubectl get nodes -o wide\n\nNAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME\nip-10-0-49-188.eu-west-1.compute.internal Ready control-plane 52m v1.28.1+k0s 10.0.49.188 <none> Ubuntu 20.04.4 LTS 5.13.0-1022-aws docker://20.10.16\nip-10-0-62-250.eu-west-1.compute.internal Ready <none> 12s v1.28.1+k0s 10.0.62.250 <none> Ubuntu 20.04.4 LTS 5.13.0-1017-aws docker://20.10.16\n
cordon and drain the nodes (migrate one by one):
sudo k0s kubectl cordon ip-10-0-62-250.eu-west-1.compute.internal \nsudo k0s kubectl drain ip-10-0-62-250.eu-west-1.compute.internal --ignore-daemonsets\n
sudo k0s kubectl get nodes -o wide\n\nNAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME\nip-10-0-49-188.eu-west-1.compute.internal Ready control-plane 56m v1.28.1+k0s 10.0.49.188 <none> Ubuntu 20.04.4 LTS 5.13.0-1022-aws docker://20.10.16\nip-10-0-62-250.eu-west-1.compute.internal Ready,SchedulingDisabled <none> 3m40s v1.28.1+k0s 10.0.62.250 <none> Ubuntu 20.04.4 LTS 5.13.0-1017-aws docker://20.10.16\n
Stop k0s on the node:
sudo k0s stop\n
"},{"location":"dockershim/#installing-cri-dockerd","title":"Installing CRI-Dockerd","text":"Download the Latest cri-dockerd deb package:
cd /tmp\n\n# Get the deb file name for ubuntu-jammy\nOS=\"ubuntu-jammy\"\nPKG=$(curl -s https://api.github.com/repos/Mirantis/cri-dockerd/releases/latest | grep ${OS} | grep http | cut -d '\"' -f 4)\n\nwget ${PKG} -O cri-dockerd-latest.deb\n\nsudo dpkg -i cri-dockerd-latest.deb\n\nSelecting previously unselected package cri-dockerd.\n(Reading database ... 164618 files and directories currently installed.)\nPreparing to unpack cri-dockerd-latest.deb ...\nUnpacking cri-dockerd (0.2.1~3-0~ubuntu-jammy) ...\nSetting up cri-dockerd (0.2.1~3-0~ubuntu-jammy) ...\nCreated symlink /etc/systemd/system/multi-user.target.wants/cri-docker.service \u2192 /lib/systemd/system/cri-docker.service.\nCreated symlink /etc/systemd/system/sockets.target.wants/cri-docker.socket \u2192 /lib/systemd/system/cri-docker.socket.\n
Verify the correct version:
which cri-dockerd\n/usr/bin/cri-dockerd\n\ncri-dockerd --version\ncri-dockerd 0.2.1 (HEAD)\n
Make sure dockershim is started:
sudo systemctl status cri-docker.service\n\u25cf cri-docker.service - CRI Interface for Docker Application Container Engine\n Loaded: loaded (/lib/systemd/system/cri-docker.service; enabled; vendor preset: enabled)\nActive: active (running) since Wed 2022-05-25 14:27:31 UTC; 1min 23s ago\nTriggeredBy: \u25cf cri-docker.socket\n Docs: https://docs.mirantis.com\n Main PID: 1404151 (cri-dockerd)\nTasks: 9\nMemory: 15.3M\n CGroup: /system.slice/cri-docker.service\n \u2514\u25001404151 /usr/bin/cri-dockerd --container-runtime-endpoint fd:// --network-plugin=\n
"},{"location":"dockershim/#configure-k0s-to-use-dockershim","title":"Configure K0s to use dockershim","text":"Replace docker socket in the systemd file for cri-dockerd (the step below should be run AFTER upgrading k0s to version 1.24):
sudo sed -i -e 's_--cri-socket=docker:unix:///var/run/docker.sock_--cri-socket docker:unix:///var/run/cri-dockerd.sock_' /etc/systemd/system/k0sworker.service\nsudo systemctl daemon-reload\n
"},{"location":"dockershim/#start-k0s-with-cri-dockerd","title":"Start k0s with cri-dockerd","text":"sudo k0s start\n
Verify the running pods via docker ps
:
docker ps --format \"table {{.ID}}\\t{{.Names}}\\t{{.State}}\\t{{.Status}}\\t{{.Image}}\"\n\nCONTAINER ID NAMES STATE STATUS IMAGE\n1b9b4624ddfd k8s_konnectivity-agent_konnectivity-agent-5jpd7_kube-system_1b3101ea-baeb-4a22-99a2-088d7ca5be85_1 running Up 51 minutes quay.io/k0sproject/apiserver-network-proxy-agent\n414758a8a951 k8s_kube-router_kube-router-qlkgg_kube-system_9a1b67bf-5347-4acd-98ac-f9a67f2db730_1 running Up 51 minutes 3a67679337a5\nb81960bb304c k8s_kube-proxy_kube-proxy-tv95n_kube-system_164dc9f8-f47c-4f6c-acb7-ede5dbcd63cd_1 running Up 51 minutes quay.io/k0sproject/kube-proxy\nfb888cbc5ae0 k8s_POD_kube-router-qlkgg_kube-system_9a1b67bf-5347-4acd-98ac-f9a67f2db730_0 running Up 51 minutes registry.k8s.io/pause:3.1\n382d0a938c9d k8s_POD_konnectivity-agent-5jpd7_kube-system_1b3101ea-baeb-4a22-99a2-088d7ca5be85_0 running Up 51 minutes registry.k8s.io/pause:3.1\n72d4a47b5609 k8s_POD_kube-proxy-tv95n_kube-system_164dc9f8-f47c-4f6c-acb7-ede5dbcd63cd_0 running Up 51 minutes registry.k8s.io/pause:3.1\n
On the controller, you'll be able to see the worker started with the new docker container runtime:
sudo k0s kubectl get nodes -o wide\n\nNAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME\nip-10-0-49-188.eu-west-1.compute.internal Ready control-plane 117m v1.28.1+k0s 10.0.49.188 <none> Ubuntu 20.04.4 LTS 5.13.0-1022-aws docker://20.10.16\nip-10-0-62-250.eu-west-1.compute.internal Ready,SchedulingDisabled <none> 64m v1.28.1+k0s 10.0.62.250 <none> Ubuntu 20.04.4 LTS 5.13.0-1017-aws docker://20.10.16\n
"},{"location":"dockershim/#uncordon-the-node","title":"Uncordon the Node","text":"sudo k0s kubectl uncordon ip-10-0-62-250.eu-west-1.compute.internal\n\nnode/ip-10-0-62-250.eu-west-1.compute.internal uncordoned\n
You should now see the node Ready for scheduling with the docker Runtime:
sudo k0s kubectl get nodes -o wide\n\nNAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME\nip-10-0-49-188.eu-west-1.compute.internal Ready control-plane 119m v1.28.1+k0s 10.0.49.188 <none> Ubuntu 20.04.4 LTS 5.13.0-1022-aws docker://20.10.16\nip-10-0-62-250.eu-west-1.compute.internal Ready <none> 66m v1.28.1+k0s 10.0.62.250 <none> Ubuntu 20.04.4 LTS 5.13.0-1017-aws docker://20.10.16\n
"},{"location":"dual-stack/","title":"Dual-stack Networking","text":"Note: Dual stack networking setup requires that you configure Calico or a custom CNI as the CNI provider.
Use the following k0s.yaml
as a template to enable dual-stack networking. This configuration will set up bundled calico CNI, enable feature gates for the Kubernetes components, and set up kubernetes-controller-manager
.
spec:\nnetwork:\npodCIDR: \"10.244.0.0/16\"\nserviceCIDR: \"10.96.0.0/12\"\nprovider: calico\ncalico:\nmode: \"bird\"\ndualStack:\nenabled: true\nIPv6podCIDR: \"fd00::/108\"\nIPv6serviceCIDR: \"fd01::/108\"\n
"},{"location":"dual-stack/#cni-settings-calico","title":"CNI Settings: Calico","text":"For cross-pod connectivity, use BIRD for the backend. Calico does not support tunneling for the IPv6, and thus VXLAN and IPIP backends do not work.
Note: In any Calico mode other than cross-pod, the pods can only reach pods on the same node.
"},{"location":"dual-stack/#cni-settings-external-cni","title":"CNI Settings: External CNI","text":"Although the k0s.yaml
dualStack section enables all of the neccessary feature gates for the Kubernetes components, for use with an external CNI it must be set up to support IPv6.
k0s comes with the option to enable dynamic configuration for cluster level components. This covers all the components other than etcd (or sqlite) and the Kubernetes api-server. This option enables k0s configuration directly via Kubernetes API as opposed to using a configuration file for all cluster configuration.
This feature has to be enabled for every controller in the cluster using the --enable-dynamic-config
flag in k0s controller
or k0s install controller
commands. Having both types of controllers in the same cluster will cause a conflict.
The existing and enabled-by-default method is what we call static configuration. That's the way where the k0s process reads the config from the given YAML file (or uses the default config if no config is given by user) and configures every component accordingly. This means that for any configuration change the cluster admin has to restart all controllers on the cluster and have matching configs on each controller node.
In dynamic configuration mode the first controller to boot up when the cluster is created will use the given config YAML as a bootstrap configuration and stores it in the Kubernetes API. All the other controllers will find the config existing on the API and will use it as the source-of-truth for configuring all the components except for etcd and kube-apiserver. After the initial cluster bootstrap the source of truth for all controllers is the configuration object in the Kubernetes API.
"},{"location":"dynamic-configuration/#cluster-configuration-vs-controller-node-configuration","title":"Cluster configuration vs. controller node configuration","text":"In the k0s configuration options there are some options that are cluster-wide and some that are specific to each controller node in the cluster. The following list outlines which options are controller node specific and have to be configured only via the local file:
spec.api
- these options configure how the local Kubernetes API server is setupspec.storage
- these options configure how the local storage (etcd or sqlite) is setupIn case of HA control plane, all the controllers will need this part of the configuration as otherwise they will not be able to get the storage and Kubernetes API server running.
"},{"location":"dynamic-configuration/#configuration-location","title":"Configuration location","text":"The cluster wide configuration is stored in the Kubernetes API as a custom resource called clusterconfig
. There's currently only one instance named k0s
. You can edit the configuration with what ever means possible, for example with:
k0s config edit\n
This will open the configuration object for editing in your system's default editor.
"},{"location":"dynamic-configuration/#configuration-reconciliation","title":"Configuration reconciliation","text":"The dynamic configuration uses the typical operator pattern for operation. k0s controller will detect when the object changes and will reconcile the configuration changes to be reflected to how different components are configured. So say you want to change the MTU setting for kube-router CNI networking you'd change the config to contain e.g.:
kuberouter:\nmtu: 1350\nautoMTU: false\n
This will change the kube-router related configmap and thus make kube-router to use different MTU settings for new pods.
"},{"location":"dynamic-configuration/#configuration-options","title":"Configuration options","text":"The configuration object is a 1-to-1 mapping with the existing configuration YAML. All the configuration options EXCEPT options under spec.api
and spec.storage
are dynamically reconciled.
As with any Kubernetes cluster there are certain things that just cannot be changed on-the-fly, this is the list of non-changeable options:
network.podCIDR
network.serviceCIDR
network.provider
The dynamic configuration reconciler operator will write status events for all the changes it detects. To see all dynamic config related events, use:
k0s config status\n
LAST SEEN TYPE REASON OBJECT MESSAGE\n64s Warning FailedReconciling clusterconfig/k0s failed to validate config: [invalid pod CIDR invalid ip address]\n59s Normal SuccessfulReconcile clusterconfig/k0s Succesfully reconciler cluster config\n69s Warning FailedReconciling clusterconfig/k0s cannot change CNI provider from kuberouter to calico\n
"},{"location":"environment-variables/","title":"Environment variables","text":"k0s install
does not support environment variables.
Setting environment variables for components used by k0s depends on the used init system. The environment variables set in k0scontroller
or k0sworker
service will be inherited by k0s components, such as etcd
, containerd
, konnectivity
, etc.
Component specific environment variables can be set in k0scontroller
or k0sworker
service. For example: for CONTAINERD_HTTPS_PROXY
, the prefix CONTAINERD_
will be stripped and converted to HTTPS_PROXY
in the containerd
process.
For those components having env prefix convention such as ETCD_xxx
, they are handled specially, i.e. the prefix will not be stripped. For example, ETCD_MAX_WALS
will still be ETCD_MAX_WALS
in etcd process.
The proxy envs HTTP_PROXY
, HTTPS_PROXY
, NO_PROXY
are always overridden by component specific environment variables, so ETCD_HTTPS_PROXY
will still be converted to HTTPS_PROXY
in etcd process.
Create a drop-in directory and add config file with a desired environment variable:
mkdir -p /etc/systemd/system/k0scontroller.service.d\ntee -a /etc/systemd/system/k0scontroller.service.d/http-proxy.conf <<EOT\n[Service]\nEnvironment=HTTP_PROXY=192.168.33.10:3128\nEOT\n
"},{"location":"environment-variables/#openrc","title":"OpenRC","text":"Export desired environment variable overriding service configuration in /etc/conf.d directory:
echo 'export HTTP_PROXY=\"192.168.33.10:3128\"' > /etc/conf.d/k0scontroller\n
"},{"location":"experimental-windows/","title":"Run k0s worker nodes in Windows","text":"IMPORTANT: Windows support for k0s is under active development and must be considered experimental.
"},{"location":"experimental-windows/#prerequisites","title":"Prerequisites","text":"The cluster must be running at least one worker node and control plane on Linux. You can use Windows to run additional worker nodes.
"},{"location":"experimental-windows/#run-k0s","title":"Run k0s","text":"Note: The k0s.exe supervises kubelet.exe and kube-proxy.exe.
During the first run, the calico install script is created as C:\\bootstrap.ps1
. This bootstrap script downloads the calico binaries, builds pause container and sets up vSwitch settings.
Install Mirantis Container Runtime on the Windows node(s), as it is required for the initial Calico set up).
k0s worker --cri-socket=docker:tcp://127.0.0.1:2375 --cidr-range=<cidr_range> --cluster-dns=<clusterdns> --api-server=<k0s api> <token>\n
You must initiate the Cluster control with the correct config.
"},{"location":"experimental-windows/#configuration","title":"Configuration","text":""},{"location":"experimental-windows/#strict-affinity","title":"Strict-affinity","text":"You must enable strict affinity to run the windows node.
If the spec.network.calico.withWindowsNodes
field is set to true
(it is set to false
by default) the additional calico related manifest /var/lib/k0s/manifests/calico/calico-IPAMConfig-ipamconfig.yaml
is created with the following values:
---\napiVersion: crd.projectcalico.org/v1\nkind: IPAMConfig\nmetadata:\nname: default\nspec:\nstrictAffinity: true\n
Alternately, you can manually execute calicoctl:
calicoctl ipam configure --strictaffinity=true\n
"},{"location":"experimental-windows/#network-connectivity-in-aws","title":"Network connectivity in AWS","text":"Disable the Change Source/Dest. Check
option for the network interface attached to your EC2 instance. In AWS, the console option for the network interface is in the Actions menu.
k0s offers the following CLI arguments in lieu of a formal means for passing cluster settings from controller plane to worker:
kubectl run win --image=hello-world:nanoserver --command=true -i --attach=true -- cmd.exe\n
"},{"location":"experimental-windows/#manifest-for-pod-with-iis-web-server","title":"Manifest for pod with IIS web-server","text":"apiVersion: v1\nkind: Pod\nmetadata:\nname: iis\nspec:\ncontainers:\n- name: iis\nimage: mcr.microsoft.com/windows/servercore/iis\nimagePullPolicy: IfNotPresent\n
"},{"location":"extensions/","title":"Cluster extensions","text":"k0s allows users to use extensions to extend cluster functionality.
At the moment the only supported type of extensions is helm based charts.
The default configuration has no extensions.
"},{"location":"extensions/#helm-based-extensions","title":"Helm based extensions","text":""},{"location":"extensions/#configuration-example","title":"Configuration example","text":"helm:\nrepositories:\n- name: stable\nurl: https://charts.helm.sh/stable\n- name: prometheus-community\nurl: https://prometheus-community.github.io/helm-charts\ncharts:\n- name: prometheus-stack\nchartname: prometheus-community/prometheus\nversion: \"11.16.8\"\nvalues: |\nstorageSpec:\nemptyDir:\nmedium: Memory\nnamespace: default\n# We don't need to specify the repo in the repositories section for OCI charts\n- name: oci-chart\nchartname: oci://registry:8080/chart\nversion: \"0.0.1\"\nvalues: \"\"\nnamespace: default\n# Other way is to use local tgz file with chart\n# the file must exist all controller nodes\n- name: tgz-chart\nchartname: /tmp/chart.tgz\nversion: \"0.0.1\"\nvalues: \"\"\nnamespace: default\n
By using the configuration above, the cluster would:
prometheus-community/prometheus
chart of the specified version to the default
namespace.The chart installation is implemented by using CRD helm.k0sproject.io/Chart
. For every given helm extension the cluster creates a Chart CRD instance. The cluster has a controller which monitors for the Chart CRDs, supporting the following operations:
For security reasons, the cluster operates only on Chart CRDs instantiated in the kube-system
namespace, however, the target namespace could be any.
apiVersion: helm.k0sproject.io/v1beta1\nkind: Chart\nmetadata:\ncreationTimestamp: \"2020-11-10T14:17:53Z\"\ngeneration: 2\nlabels:\nk0s.k0sproject.io/stack: helm\nname: k0s-addon-chart-test-addon\nnamespace: kube-system\nresourceVersion: \"627\"\nselfLink: /apis/helm.k0sproject.io/v1beta1/namespaces/kube-system/charts/k0s-addon-chart-test-addon\nuid: ebe59ed4-1ff8-4d41-8e33-005b183651ed\nspec:\nchartName: prometheus-community/prometheus\nnamespace: default\nvalues: |\nstorageSpec:\nemptyDir:\nmedium: Memory\nversion: 11.16.8\nstatus:\nappVersion: 2.21.0\nnamespace: default\nreleaseName: prometheus-1605017878\nrevision: 2\nupdated: 2020-11-10 14:18:08.235656 +0000 UTC m=+41.871656901\nversion: 11.16.8\n
The Chart.spec
defines the chart information.
The Chart.status
keeps the information about the last operation performed by the operator.
k0s is packaged as a single binary, which includes all the needed components. All the binaries are statically linked which means that in typical use cases there's an absolute minimum of external runtime dependencies.
However, depending on the node role and cluster configuration, some of the underlying components may have specific dependencies, like OS level tools, packages and libraries. This page aims to provide a comprehensive overview.
The following command checks for known requirements on a host (currently only available on Linux):
k0s sysinfo\n
"},{"location":"external-runtime-deps/#a-unique-machine-id-for-multi-node-setups","title":"A unique machine ID for multi-node setups","text":"Whenever k0s is run in a multi-node setup (i.e. the --single
command line flag isn't used), k0s requires a machine ID: a unique host identifier that is somewhat stable across reboots. For Linux, this ID is read from the files /var/lib/dbus/machine-id
or /etc/machine-id
. For Windows, it's taken from the registry key HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Cryptography\\MachineGuid
. If neither of the OS specific sources yield a result, k0s will fallback to use a machine ID based on the hostname.
When running k0s on top of virtualized or containerized environments, you need to ensure that hosts get their own unique IDs, even if they have been created from the same image.
"},{"location":"external-runtime-deps/#linux-specific","title":"Linux specific","text":""},{"location":"external-runtime-deps/#linux-kernel-configuration","title":"Linux kernel configuration","text":"Needless to say, as k0s operates Kubernetes worker nodes, there's a certain number of needed Linux kernel modules and configurations that we need in the system. This basically stems from the need to run both containers and also be able to set up networking for the containers.
The needed kernel configuration items are listed below. All of them are available in Kernel versions 4.3 and above. If running on older kernels, check if the distro in use has backported some features; nevertheless, it might meet the requirements. k0s will check the Linux kernel release as part of its pre-flight checks and issue a warning if it's below 3.10.
The list covers ONLY the k0s/kubernetes components\u2019 needs on worker nodes. Your own workloads may require more.
CONFIG_CGROUPS
: Control Group supportCONFIG_CGROUP_FREEZER
: Freezer cgroup subsystemCONFIG_CGROUP_PIDS
: PIDs cgroup subsystem kubernetes/kubeadm#2335 (comment)CONFIG_CGROUP_DEVICE
: Device controller for cgroupsCONFIG_CPUSETS
: Cpuset supportCONFIG_CGROUP_CPUACCT
: Simple CPU accounting cgroup subsystemCONFIG_MEMCG
: Memory Resource Controller for Control GroupsCONFIG_CGROUP_HUGETLB
: HugeTLB Resource Controller for Control Groups kubernetes/kubeadm#2335 (comment)CONFIG_CGROUP_SCHED
: Group CPU schedulerCONFIG_FAIR_GROUP_SCHED
: Group scheduling for SCHED_OTHER kubernetes/kubeadm#2335 (comment)CONFIG_CFS_BANDWIDTH
: CPU bandwidth provisioning for FAIR_GROUP_SCHED Required if CPU CFS quota enforcement is enabled for containers that specify CPU limits (--cpu-cfs-quota
).CONFIG_BLK_CGROUP
: Block IO controller kubernetes/kubernetes#92287 (comment)CONFIG_NAMESPACES
: Namespaces supportCONFIG_UTS_NS
: UTS namespaceCONFIG_IPC_NS
: IPC namespaceCONFIG_PID_NS
: PID namespaceCONFIG_NET_NS
: Network namespaceCONFIG_NET
: Networking supportCONFIG_INET
: TCP/IP networkingCONFIG_NETFILTER
: Network packet filtering framework (Netfilter)CONFIG_NETFILTER_ADVANCED
: Advanced netfilter configurationCONFIG_NETFILTER_XTABLES
: Netfilter Xtables supportCONFIG_NETFILTER_XT_TARGET_REDIRECT
: REDIRECT target supportCONFIG_NETFILTER_XT_MATCH_COMMENT
: \"comment\" match supportCONFIG_EXT4_FS
: The Extended 4 (ext4) filesystemCONFIG_PROC_FS
: /proc file system supportNote: As part of its pre-flight checks, k0s will try to inspect and validate the kernel configuration. In order for that to succeed, the configuration needs to be accessible at runtime. There are some typical places that k0s will check. A bullet-proof way to ensure the accessibility is to enable CONFIG_IKCONFIG_PROC
, and, if enabled as a module, to load the configs
module: modprobe configs
.
Both cgroup v1 and cgroup v2 are supported.
Required cgroup controllers:
Optional cgroup controllers:
There are very few external tools that are needed or used.
"},{"location":"external-runtime-deps/#mountumount","title":"mount/umount","text":"When setting up pods, kubelet will call mount
binary on the host. Similarly when destroying pods it will call umount
. mount
and umount
are only needed on worker nodes where kubelet runs.
There are a few external tools that may be needed or used under specific circumstances:
"},{"location":"external-runtime-deps/#containerd-and-apparmor","title":"containerd and AppArmor","text":"In order to use containerd in conjunction with AppArmor, it must be enabled in the kernel and the /sbin/apparmor_parser
executable must be installed on the host, otherwise containerd will disable AppArmor support.
iptables may be executed to detect if there are any existing iptables rules and if those are in legacy of nft mode. If iptables is not found, k0s will assume that there are no pre-existing iptables rules.
"},{"location":"external-runtime-deps/#useradd-adduser","title":"useradd / adduser","text":"During k0s install
the external tool useradd
will be used on the controllers to create system user accounts for k0s. If this does exist it will fall-back to busybox's adduser
.
k0s reset
will execute either userdel
or deluser
to clean up system user accounts.
On k0s worker modprobe
will be executed to load missing kernel modules if they are not detected.
External /usr/bin/id
will be executed as a fallback if local user lookup fails, in case NSS is used.
iptables
Required for worker nodes. Resolved by @ncopa in #1046 by adding iptables
and friends to k0s's embedded binaries.find
, du
and nice
Required for worker nodes. Resolved upstream by @ncopa in kubernetes/kubernetes#96115, contained in Kubernetes 1.21.8 (5b13c8f68d4) and 1.22.0 (d45ba645a8f).TBD.
"},{"location":"helm-charts/","title":"Helm Charts","text":"Defining your extensions as Helm charts is one of two methods you can use to run k0s with your preferred extensions (the other being through the use of Manifest Deployer).
k0s supports two methods for deploying applications using Helm charts:
k0s.yaml
. This method does not require a separate install of helm
tool and the charts automatically deploy at the k0s bootstrap phase.Adding Helm charts into the k0s configuration file gives you a declarative way in which to configure the cluster. k0s controller manages the setup of Helm charts that are defined as extensions in the k0s configuration file.
"},{"location":"helm-charts/#wait-for-install","title":"Wait for install","text":"Each chart is proccesed the same way CLI tool does with following options:
--wait
--wait-for-jobs
--timeout 10m
It is possible to customize timeout by using .Timeout
field.
In the example, Prometheus is configured from \"stable\" Helms chart repository. Add the following to k0s.yaml
and restart k0s, after which Prometheus should start automatically with k0s.
spec:\nextensions:\nhelm:\nconcurrencyLevel: 5\nrepositories:\n- name: stable\nurl: https://charts.helm.sh/stable\n- name: prometheus-community\nurl: https://prometheus-community.github.io/helm-charts\n- name: helm-repo-with-auth\nurl: https://can-be-your-own-gitlab-ce-instance.org/api/v4/projects/PROJECTID/packages/helm/main\nusername: access-token-name-as-username\npassword: access-token-value-as-password\ncharts:\n- name: prometheus-stack\nchartname: prometheus-community/prometheus\nversion: \"14.6.1\"\ntimeout: 20m\norder: 1\nvalues: |\nalertmanager:\npersistentVolume:\nenabled: false\nserver:\npersistentVolume:\nenabled: false\nnamespace: default\n# We don't need to specify the repo in the repositories section for OCI charts\n- name: oci-chart\nchartname: oci://registry:8080/chart\nversion: \"0.0.1\"\norder: 2\nvalues: \"\"\nnamespace: default\n# Other way is to use local tgz file with chart\n# the file must exist on all controller nodes\n- name: tgz-chart\nchartname: /tmp/chart.tgz\nversion: \"0.0.1\"\norder: 2 values: \"\"\nnamespace: default\n
Example extensions that you can use with Helm charts include:
Running k0s controller with --debug=true
enables helm debug logging.
You can create high availability for the control plane by distributing the control plane across multiple nodes and installing a load balancer on top. Etcd can be colocated with the controller nodes (default in k0s) to achieve highly available datastore at the same time.
Note: In this context even 2 node controlplane is considered HA even though it's not really HA from etcd point of view. The same requirement for LB still applies.
"},{"location":"high-availability/#network-considerations","title":"Network considerations","text":"You should plan to allocate the control plane nodes into different zones. This will avoid failures in case one zone fails.
For etcd high availability it's recommended to configure 3 or 5 controller nodes. For more information, refer to the etcd documentation.
"},{"location":"high-availability/#load-balancer","title":"Load Balancer","text":"Control plane high availability requires a tcp load balancer, which acts as a single point of contact to access the controllers. The load balancer needs to allow and route traffic to each controller through the following ports:
The load balancer can be implemented in many different ways and k0s doesn't have any additional requirements. You can use for example HAProxy, NGINX or your cloud provider's load balancer.
"},{"location":"high-availability/#example-configuration-haproxy","title":"Example configuration: HAProxy","text":"Add the following lines to the end of the haproxy.cfg:
frontend kubeAPI\n bind :6443\n mode tcp\n default_backend kubeAPI_backend\nfrontend konnectivity\n bind :8132\n mode tcp\n default_backend konnectivity_backend\nfrontend controllerJoinAPI\n bind :9443\n mode tcp\n default_backend controllerJoinAPI_backend\n\nbackend kubeAPI_backend\n mode tcp\n server k0s-controller1 <ip-address1>:6443 check check-ssl verify none\n server k0s-controller2 <ip-address2>:6443 check check-ssl verify none\n server k0s-controller3 <ip-address3>:6443 check check-ssl verify none\nbackend konnectivity_backend\n mode tcp\n server k0s-controller1 <ip-address1>:8132 check check-ssl verify none\n server k0s-controller2 <ip-address2>:8132 check check-ssl verify none\n server k0s-controller3 <ip-address3>:8132 check check-ssl verify none\nbackend controllerJoinAPI_backend\n mode tcp\n server k0s-controller1 <ip-address1>:9443 check check-ssl verify none\n server k0s-controller2 <ip-address2>:9443 check check-ssl verify none\n server k0s-controller3 <ip-address3>:9443 check check-ssl verify none\n\nlisten stats\n bind *:9000\n mode http\n stats enable\n stats uri /\n
The last block \"listen stats\" is optional, but can be helpful. It enables HAProxy statistics with a separate dashboard to monitor for example the health of each backend server. You can access it using a web browser:
http://<ip-addr>:9000\n
Restart HAProxy to apply the configuration changes.
"},{"location":"high-availability/#k0s-configuration","title":"k0s configuration","text":"First and foremost, all controllers should utilize the same CA certificates and SA key pair:
/var/lib/k0s/pki/ca.key\n/var/lib/k0s/pki/ca.crt\n/var/lib/k0s/pki/sa.key\n/var/lib/k0s/pki/sa.pub\n/var/lib/k0s/pki/etcd/ca.key\n/var/lib/k0s/pki/etcd/ca.crt\n
To generate these certificates, you have two options: either generate them manually using the instructions provided here and then share it across controller nodes, or utilize k0sctl for automated generation and sharing.
The second important aspect is: the load balancer address must be configured to k0s either by using k0s.yaml
or by using k0sctl to automatically deploy all controllers with the same configuration:
Note to update your load balancer's public ip address into two places.
spec:\napi:\nexternalAddress: <load balancer public ip address>\nsans:\n- <load balancer public ip address>\n
"},{"location":"high-availability/#configuration-using-k0sctlyaml-for-k0sctl","title":"Configuration using k0sctl.yaml (for k0sctl)","text":"Add the following lines to the end of the k0sctl.yaml. Note to update your load balancer's public ip address into two places.
k0s:\nconfig:\nspec:\napi:\nexternalAddress: <load balancer public ip address>\nsans:\n- <load balancer public ip address>\n
For greater detail about k0s configuration, refer to the Full configuration file reference.
"},{"location":"install/","title":"Quick Start Guide","text":"On completion of the Quick Start you will have a full Kubernetes cluster with a single node that includes both the controller and the worker. Such a setup is ideal for environments that do not require high-availability and multiple nodes.
"},{"location":"install/#prerequisites","title":"Prerequisites","text":"Note: Before proceeding, make sure to review the System Requirements.
Though the Quick Start material is written for Debian/Ubuntu, you can use it for any Linux distro that is running either a Systemd or OpenRC init system.
"},{"location":"install/#install-k0s","title":"Install k0s","text":"Download k0s
Run the k0s download script to download the latest stable version of k0s and make it executable from /usr/bin/k0s.
curl -sSLf https://get.k0s.sh | sudo sh\n
Install k0s as a service
The k0s install
sub-command installs k0s as a system service on the local host that is running one of the supported init systems: Systemd or OpenRC. You can execute the install for workers, controllers or single node (controller+worker) instances.
Run the following command to install a single node k0s that includes the controller and worker functions with the default configuration:
sudo k0s install controller --single\n
The k0s install controller
sub-command accepts the same flags and parameters as the k0s controller
. Refer to manual install for a custom config file example.
It is possible to set environment variables with the install command:
sudo k0s install controller -e ETCD_UNSUPPORTED_ARCH=arm\n
The system service can be reinstalled with the --force
flag:
sudo k0s install controller --single --force\nsudo systemctl daemon-reload\n
Start k0s as a service
To start the k0s service, run:
sudo k0s start\n
The k0s service will start automatically after the node restart.
A minute or two typically passes before the node is ready to deploy applications.
Check service, logs and k0s status
To get general information about your k0s instance's status, run:
$ sudo k0s status\nVersion: v1.28.1+k0s.0\nProcess ID: 436\nRole: controller\nWorkloads: true\nInit System: linux-systemd\n
Access your cluster using kubectl
Note: k0s includes the Kubernetes command-line tool kubectl.
Use kubectl to deploy your application or to check your node status:
$ sudo k0s kubectl get nodes\nNAME STATUS ROLES AGE VERSION\nk0s Ready <none> 4m6s v1.28.1+k0s\n
The removal of k0s is a two-step process.
Stop the service.
sudo k0s stop\n
Execute the k0s reset
command.
The k0s reset
command cleans up the installed system service, data directories, containers, mounts and network namespaces.
sudo k0s reset\n
Reboot the system.
A few small k0s fragments persist even after the reset (for example, iptables). As such, you should initiate a reboot after the running of the k0s reset
command.
You can create a k0s cluster on top of docker. In such a scenario, by default, both controller and worker nodes are run in the same container to provide an easy local testing \"cluster\".
"},{"location":"k0s-in-docker/#prerequisites","title":"Prerequisites","text":"You will require a Docker environment running on a Mac, Windows, or Linux system.
"},{"location":"k0s-in-docker/#container-images","title":"Container images","text":"The k0s containers are published both on Docker Hub and GitHub. For reasons of simplicity, the examples given here use Docker Hub (GitHub requires a separate authentication that is not covered). Alternative links include:
Note: Due to Docker Hub tag validation scheme, we have to use -
as the k0s version separator instead of the usual +
. So for example k0s version v1.28.1+k0s.0
is tagged as docker.io/k0sproject/k0s:v1.28.1-k0s.0
.
You can run your own k0s in Docker:
docker run -d --name k0s --hostname k0s --privileged -v /var/lib/k0s -p 6443:6443 docker.io/k0sproject/k0s:latest\n
Note: If you are using Docker Desktop as the runtime, starting from 4.3.0 version it's using cgroups v2 in the VM that runs the engine. This means you have to add some extra flags to the above command to get kubelet and containerd to properly work with cgroups v2:
--cgroupns=host -v /sys/fs/cgroup:/sys/fs/cgroup:rw\n
"},{"location":"k0s-in-docker/#2-optional-create-additional-workers","title":"2. (Optional) Create additional workers","text":"You can attach multiple workers nodes into the cluster to then distribute your application containers to separate workers.
For each required worker:
Acquire a join token for the worker:
token=$(docker exec -t -i k0s k0s token create --role=worker)\n
Run the container to create and join the new worker:
docker run -d --name k0s-worker1 --hostname k0s-worker1 --privileged -v /var/lib/k0s docker.io/k0sproject/k0s:latest k0s worker $token\n
Access your cluster using kubectl:
docker exec k0s kubectl get nodes\n
Alternatively, grab the kubeconfig file with docker exec k0s cat /var/lib/k0s/pki/admin.conf
and paste it into Lens.
As an alternative you can run k0s using Docker Compose:
version: \"3.9\"\nservices:\nk0s:\ncontainer_name: k0s\nimage: docker.io/k0sproject/k0s:latest\ncommand: k0s controller --config=/etc/k0s/config.yaml --enable-worker\nhostname: k0s\nprivileged: true\nvolumes:\n- \"/var/lib/k0s\"\ntmpfs:\n- /run\n- /var/run\nports:\n- \"6443:6443\"\nnetwork_mode: \"bridge\"\nenvironment:\nK0S_CONFIG: |-\napiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nmetadata:\nname: k0s\n# Any additional configuration goes here ...\n
"},{"location":"k0s-in-docker/#known-limitations","title":"Known limitations","text":""},{"location":"k0s-in-docker/#no-custom-docker-networks","title":"No custom Docker networks","text":"Currently, k0s nodes cannot be run if the containers are configured to use custom networks (for example, with --net my-net
). This is because Docker sets up a custom DNS service within the network which creates issues with CoreDNS. No completely reliable workaounds are available, however no issues should arise from running k0s cluster(s) on a bridge network.
You can manually set up k0s nodes by creating a multi-node cluster that is locally managed on each node. This involves several steps, to first install each node separately, and to then connect the node together using access tokens.
"},{"location":"k0s-multi-node/#prerequisites","title":"Prerequisites","text":"Note: Before proceeding, make sure to review the System Requirements.
Though the Manual Install material is written for Debian/Ubuntu, you can use it for any Linux distro that is running either a Systemd or OpenRC init system.
You can speed up the use of the k0s
command by enabling shell completion.
Run the k0s download script to download the latest stable version of k0s and make it executable from /usr/bin/k0s.
curl -sSLf https://get.k0s.sh | sudo sh\n
The download script accepts the following environment variables:
Variable PurposeK0S_VERSION=v1.28.1+k0s.0
Select the version of k0s to be installed DEBUG=true
Output commands and their arguments at execution. Note: If you require environment variables and use sudo, you can do:
curl -sSLf https://get.k0s.sh | sudo K0S_VERSION=v1.28.1+k0s.0 sh\n
"},{"location":"k0s-multi-node/#2-bootstrap-a-controller-node","title":"2. Bootstrap a controller node","text":"Create a configuration file:
mkdir -p /etc/k0s\nk0s config create > /etc/k0s/k0s.yaml\n
Note: For information on settings modification, refer to the configuration documentation.
sudo k0s install controller -c /etc/k0s/k0s.yaml\n
sudo k0s start\n
k0s process acts as a \"supervisor\" for all of the control plane components. In moments the control plane will be up and running.
"},{"location":"k0s-multi-node/#3-create-a-join-token","title":"3. Create a join token","text":"You need a token to join workers to the cluster. The token embeds information that enables mutual trust between the worker and controller(s) and which allows the node to join the cluster as worker.
To get a token, run the following command on one of the existing controller nodes:
sudo k0s token create --role=worker\n
The resulting output is a long token string, which you can use to add a worker to the cluster.
For enhanced security, run the following command to set an expiration time for the token:
sudo k0s token create --role=worker --expiry=100h > token-file\n
"},{"location":"k0s-multi-node/#4-add-workers-to-the-cluster","title":"4. Add workers to the cluster","text":"To join the worker, run k0s in the worker mode with the join token you created:
sudo k0s install worker --token-file /path/to/token/file\n
sudo k0s start\n
"},{"location":"k0s-multi-node/#about-tokens","title":"About tokens","text":"The join tokens are base64-encoded kubeconfigs for several reasons:
The bearer token embedded in the kubeconfig is a bootstrap token. For controller join tokens and worker join tokens k0s uses different usage attributes to ensure that k0s can validate the token role on the controller side.
"},{"location":"k0s-multi-node/#5-add-controllers-to-the-cluster","title":"5. Add controllers to the cluster","text":"Note: Either etcd or an external data store (MySQL or Postgres) via kine must be in use to add new controller nodes to the cluster. Pay strict attention to the high availability configuration and make sure the configuration is identical for all controller nodes.
To create a join token for the new controller, run the following command on an existing controller:
sudo k0s token create --role=controller --expiry=1h > token-file\n
On the new controller, run:
sudo k0s install controller --token-file /path/to/token/file -c /etc/k0s/k0s.yaml\n
Important notice here is that each controller in the cluster must have k0s.yaml otherwise some cluster nodes will use default config values which will lead to inconsistency behavior. If your configuration file includes IP addresses (node address, sans, etcd peerAddress), remember to update them accordingly for this specific controller node.
sudo k0s start\n
"},{"location":"k0s-multi-node/#6-check-k0s-status","title":"6. Check k0s status","text":"To get general information about your k0s instance's status:
sudo k0s status\n
Version: v1.28.1+k0s.0\nProcess ID: 2769\nParent Process ID: 1\nRole: controller\nInit System: linux-systemd\nService file: /etc/systemd/system/k0scontroller.service\n
"},{"location":"k0s-multi-node/#7-access-your-cluster","title":"7. Access your cluster","text":"Use the Kubernetes 'kubectl' command-line tool that comes with k0s binary to deploy your application or check your node status:
sudo k0s kubectl get nodes\n
NAME STATUS ROLES AGE VERSION\nk0s Ready <none> 4m6s v1.28.1+k0s\n
You can also access your cluster easily with Lens, simply by copying the kubeconfig and pasting it to Lens:
sudo cat /var/lib/k0s/pki/admin.conf\n
Note: To access the cluster from an external network you must replace localhost
in the kubeconfig with the host ip address for your controller.
See the Quick Start Guide.
"},{"location":"k0sctl-install/","title":"Install using k0sctl","text":"k0sctl is a command-line tool for bootstrapping and managing k0s clusters. k0sctl connects to the provided hosts using SSH and gathers information on the hosts, with which it forms a cluster by configuring the hosts, deploying k0s, and then connecting the k0s nodes together.
With k0sctl, you can create multi-node clusters in a manner that is automatic and easily repeatable. This method is recommended for production cluster installation.
Note: The k0sctl install method is necessary for automatic upgrade.
"},{"location":"k0sctl-install/#prerequisites","title":"Prerequisites","text":"You can execute k0sctl on any system that supports the Go language. Pre-compiled k0sctl binaries are available on the k0sctl releases page).
Note: For target host prerequisites information, refer to the k0s System Requirements.
"},{"location":"k0sctl-install/#install-k0s","title":"Install k0s","text":""},{"location":"k0sctl-install/#1-install-k0sctl-tool","title":"1. Install k0sctl tool","text":"k0sctl is a single binary, the instructions for downloading and installing of which are available in the k0sctl github repository.
"},{"location":"k0sctl-install/#2-configure-the-cluster","title":"2. Configure the cluster","text":"Run the following command to create a k0sctl configuration file:
k0sctl init > k0sctl.yaml\n
This action creates a k0sctl.yaml
file in the current directory:
apiVersion: k0sctl.k0sproject.io/v1beta1\nkind: Cluster\nmetadata:\nname: k0s-cluster\nspec:\nhosts:\n- role: controller\nssh:\naddress: 10.0.0.1 # replace with the controller's IP address\nuser: root\nkeyPath: ~/.ssh/id_rsa\n- role: worker\nssh:\naddress: 10.0.0.2 # replace with the worker's IP address\nuser: root\nkeyPath: ~/.ssh/id_rsa\n
Provide each host with a valid IP address that is reachable by k0ctl, and the connection details for an SSH connection.
Note: Refer to the k0sctl documentation for k0sctl configuration specifications.
"},{"location":"k0sctl-install/#3-deploy-the-cluster","title":"3. Deploy the cluster","text":"Run k0sctl apply
to perform the cluster deployment:
k0sctl apply --config k0sctl.yaml\n
\u2800\u28ff\u28ff\u2847\u2800\u2800\u2880\u28f4\u28fe\u28ff\u281f\u2801\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u287f\u281b\u2801\u2800\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u2800\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588\n\u2800\u28ff\u28ff\u2847\u28e0\u28f6\u28ff\u287f\u280b\u2800\u2800\u2800\u28b8\u28ff\u2847\u2800\u2800\u2800\u28e0\u2800\u2800\u2880\u28e0\u2846\u28b8\u28ff\u28ff\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2588\u2588\u2588 \u2588\u2588\u2588 \u2588\u2588\u2588\n\u2800\u28ff\u28ff\u28ff\u28ff\u28df\u280b\u2800\u2800\u2800\u2800\u2800\u28b8\u28ff\u2847\u2800\u28b0\u28fe\u28ff\u2800\u2800\u28ff\u28ff\u2847\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u2800\u2588\u2588\u2588 \u2588\u2588\u2588 \u2588\u2588\u2588\n\u2800\u28ff\u28ff\u284f\u283b\u28ff\u28f7\u28e4\u2840\u2800\u2800\u2800\u2838\u281b\u2801\u2800\u2838\u280b\u2801\u2800\u2800\u28ff\u28ff\u2847\u2808\u2809\u2809\u2809\u2809\u2809\u2809\u2809\u2809\u28b9\u28ff\u28ff\u2800\u2588\u2588\u2588 \u2588\u2588\u2588 \u2588\u2588\u2588\n\u2800\u28ff\u28ff\u2847\u2800\u2800\u2819\u28bf\u28ff\u28e6\u28c0\u2800\u2800\u2800\u28e0\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28ff\u28ff\u2847\u28b0\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28fe\u28ff\u28ff\u2800\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\n\nINFO k0sctl 0.0.0 Copyright 2021, Mirantis Inc.\nINFO Anonymized telemetry will be sent to Mirantis.\nINFO By continuing to use k0sctl you agree to these terms:\nINFO https://k0sproject.io/licenses/eula\nINFO ==> Running phase: Connect to hosts\nINFO [ssh] 10.0.0.1:22: connected\nINFO [ssh] 10.0.0.2:22: connected\nINFO ==> Running phase: Detect host operating systems\nINFO [ssh] 10.0.0.1:22: is running Ubuntu 20.10\nINFO [ssh] 10.0.0.2:22: is running Ubuntu 20.10\nINFO ==> Running phase: Prepare hosts\nINFO [ssh] 10.0.0.1:22: installing kubectl\nINFO ==> Running phase: Gather host facts\nINFO [ssh] 10.0.0.1:22: discovered 10.12.18.133 as private address\nINFO ==> Running phase: Validate hosts\nINFO ==> Running phase: Gather k0s facts\nINFO ==> Running phase: Download K0s on the hosts\nINFO [ssh] 10.0.0.2:22: downloading k0s 0.11.0\nINFO [ssh] 10.0.0.1:22: downloading k0s 0.11.0\nINFO ==> Running phase: Configure K0s\nWARN [ssh] 10.0.0.1:22: generating default configuration\nINFO [ssh] 10.0.0.1:22: validating configuration\nINFO [ssh] 10.0.0.1:22: configuration was changed\nINFO ==> Running phase: Initialize K0s Cluster\nINFO [ssh] 10.0.0.1:22: installing k0s controller\nINFO [ssh] 10.0.0.1:22: waiting for the k0s service to start\nINFO [ssh] 10.0.0.1:22: waiting for kubernetes api to respond\nINFO ==> Running phase: Install workers\nINFO [ssh] 10.0.0.1:22: generating token\nINFO [ssh] 10.0.0.2:22: writing join token\nINFO [ssh] 10.0.0.2:22: installing k0s worker\nINFO [ssh] 10.0.0.2:22: starting service\nINFO [ssh] 10.0.0.2:22: waiting for node to become ready\nINFO ==> Running phase: Disconnect from hosts\nINFO ==> Finished in 2m2s\nINFO k0s cluster version 0.11.0 is now installed\nINFO Tip: To access the cluster you can now fetch the admin kubeconfig using:\nINFO k0sctl kubeconfig\n
"},{"location":"k0sctl-install/#4-access-the-cluster","title":"4. Access the cluster","text":"To access your k0s cluster, use k0sctl to generate a kubeconfig
for the purpose.
k0sctl kubeconfig > kubeconfig\n
With the kubeconfig
, you can access your cluster using either kubectl or Lens.
kubectl get pods --kubeconfig kubeconfig -A\n
NAMESPACE NAME READY STATUS RESTARTS AGE\nkube-system calico-kube-controllers-5f6546844f-w8x27 1/1 Running 0 3m50s\nkube-system calico-node-vd7lx 1/1 Running 0 3m44s\nkube-system coredns-5c98d7d4d8-tmrwv 1/1 Running 0 4m10s\nkube-system konnectivity-agent-d9xv2 1/1 Running 0 3m31s\nkube-system kube-proxy-xp9r9 1/1 Running 0 4m4s\nkube-system metrics-server-6fbcd86f7b-5frtn 1/1 Running 0 3m51s\n
"},{"location":"k0sctl-install/#known-limitations","title":"Known limitations","text":"Included with k0s, Manifest Deployer is one of two methods you can use to run k0s with your preferred extensions (the other being by defining your extensions as Helm charts).
"},{"location":"manifests/#overview","title":"Overview","text":"Manifest Deployer runs on the controller nodes and provides an easy way to automatically deploy manifests at runtime.
By default, k0s reads all manifests under /var/lib/k0s/manifests
and ensures that their state matches the cluster state. Moreover, on removal of a manifest file, k0s will automatically prune all of it associated resources.
The use of Manifest Deployer is quite similar to the use the kubectl apply
command. The main difference between the two is that Manifest Deployer constantly monitors the directory for changes, and thus you do not need to manually apply changes that are made to the manifest files.
/var/lib/k0s/manifests
is considered to be its own \"stack\". Nested directories (further subfolders), however, are excluded from the stack mechanism and thus are not automatically deployed by the Manifest Deployer.To try Manifest Deployer, create a new folder under /var/lib/k0s/manifests
and then create a manifest file (such as nginx.yaml
) with the following content:
apiVersion: v1\nkind: Namespace\nmetadata:\nname: nginx\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: nginx-deployment\nnamespace: nginx\nspec:\nselector:\nmatchLabels:\napp: nginx\nreplicas: 3\ntemplate:\nmetadata:\nlabels:\napp: nginx\nspec:\ncontainers:\n- name: nginx\nimage: nginx:latest\nports:\n- containerPort: 80\n
New pods will appear soon thereafter.
sudo k0s kubectl get pods --namespace nginx\n
NAME READY STATUS RESTARTS AGE\nnginx-deployment-66b6c48dd5-8zq7d 1/1 Running 0 10m\nnginx-deployment-66b6c48dd5-br4jv 1/1 Running 0 10m\nnginx-deployment-66b6c48dd5-sqvhb 1/1 Running 0 10m\n
"},{"location":"networking/","title":"Networking","text":""},{"location":"networking/#in-cluster-networking","title":"In-cluster networking","text":"k0s supports two Container Network Interface (CNI) providers out-of-box, Kube-router and Calico. In addition, k0s can support your own CNI configuration.
"},{"location":"networking/#notes","title":"Notes","text":"Kube-router is built into k0s, and so by default the distribution uses it for network provision. Kube-router uses the standard Linux networking stack and toolset, and you can set up CNI networking without any overlays by using BGP as the main mechanism for in-cluster networking.
In addition to Kube-router, k0s also offers Calico as an alternative, built-in network provider. Calico is a layer 3 container networking solution that routes packets to pods. It supports, for example, pod-specific network policies that help to secure kubernetes clusters in demanding use cases. Calico uses the vxlan overlay network by default, and you can configure it to support ipip (IP-in-IP).
You can opt-out of having k0s manage the network setup and choose instead to use any network plugin that adheres to the CNI specification. To do so, configure custom
as the network provider in the k0s configuration file (k0s.yaml
). You can do this, for example, by pushing network provider manifests into /var/lib/k0s/manifests
, from where k0s controllers will collect them for deployment into the cluster (for more information, refer to Manifest Deployer.
One goal of k0s is to allow for the deployment of an isolated control plane, which may prevent the establishment of an IP route between controller nodes and the pod network. Thus, to enable this communication path (which is mandated by conformance tests), k0s deploys Konnectivity service to proxy traffic from the API server (control plane) into the worker nodes. This ensures that we can always fulfill all the Kubernetes API functionalities, but still operate the control plane in total isolation from the workers.
Note: To allow Konnectivity agents running on the worker nodes to establish the connection, configure your firewalls for outbound access, port 8132. Moreover, configure your firewalls for outbound access, port 6443, in order to access Kube-API from the worker nodes.
"},{"location":"networking/#required-ports-and-protocols","title":"Required ports and protocols","text":"Protocol Port Service Direction Notes TCP 2380 etcd peers controller <-> controller TCP 6443 kube-apiserver Worker, CLI => controller Authenticated Kube API using Kube TLS client certs, ServiceAccount tokens with RBAC TCP 179 kube-router worker <-> worker BGP routing sessions between peers UDP 4789 Calico worker <-> worker Calico VXLAN overlay TCP 10250 kubelet Master, Worker => Host*
Authenticated kubelet API for the master node kube-apiserver
(and heapster
/metrics-server
addons) using TLS client certs TCP 9443 k0s-api controller <-> controller k0s controller join API, TLS with token auth TCP 8132 konnectivity worker <-> controller Konnectivity is used as \"reverse\" tunnel between kube-apiserver and worker kubelets"},{"location":"networking/#iptables","title":"iptables","text":"iptables
can work in two distinct modes, legacy
and nftables
. k0s autodetects the mode and prefers nftables
. To check which mode k0s is configured with check ls -lah /var/lib/k0s/bin/
. The iptables
link target reveals the mode which k0s selected. k0s has the same logic as other k8s components, but to ensure al component have picked up the same mode you can check via: kube-proxy: nsenter -t $(pidof kube-proxy) -m iptables -V
kube-router: nsenter -t $(pidof kube-router) -m /sbin/iptables -V
calico: nsenter -t $(pidof -s calico-node) -m iptables -V
There are known version incompatibility issues in iptables versions. k0s ships (in /var/lib/k0s/bin
) a version of iptables that is tested to interoperate with all other Kubernetes components it ships with. However if you have other tooling (firewalls etc.) on your hosts that uses iptables and the host iptables version is different that k0s (and other k8s components) ships with it may cause networking issues. This is based on the fact that iptables being user-space tooling it does not provide any strong version compatibility guarantees.
If you are using firewalld
on your hosts you need to ensure it is configured to use the same FirewallBackend
as k0s and other Kubernetes components use. Otherwise networking will be broken in various ways.
Here's an example configuration for a tested working networking setup:
[root@rhel-test ~]# firewall-cmd --list-all\npublic (active)\ntarget: default\n icmp-block-inversion: no\n interfaces: eth0\n sources: 10.244.0.0/16 10.96.0.0/12\n services: cockpit dhcpv6-client ssh\n ports: 80/tcp 6443/tcp 8132/tcp 10250/tcp 179/tcp 179/udp\n protocols: forward: no\n masquerade: yes\n forward-ports: source-ports: icmp-blocks: rich rules:\n
"},{"location":"nllb/","title":"Node-local load balancing","text":"Note: This feature is experimental! Expect instabilities and/or breaking changes.
For clusters that don't have an externally managed load balancer for the k0s control plane, there is another option to get a highly available control plane, at least from within the cluster. K0s calls this \"node-local load balancing\". In contrast to an externally managed load balancer, node-local load balancing takes place exclusively on the worker nodes. It does not contribute to making the control plane highly available to the outside world (e.g. humans interacting with the cluster using management tools such as Lens or kubectl
), but rather makes the cluster itself internally resilient to controller node outages.
The k0s worker process manages a load balancer on each worker node's loopback interface and configures the relevant components to use that load balancer. This allows for requests from worker components to the control plane to be distributed among all currently available controller nodes, rather than being directed to the controller node that has been used to join a particular worker into the cluster. This improves the reliability and fault tolerance of the cluster in case a controller node becomes unhealthy.
Envoy is the only load balancer that is supported so far. Please note that Envoy is not available on ARMv7, so node-local load balancing is currently unavailable on that platform.
"},{"location":"nllb/#enabling-in-a-cluster","title":"Enabling in a cluster","text":"In order to use node-local load balancing, the cluster needs to comply with the following:
spec.api.externalAddress
.--single
flag.Add the following to the cluster configuration (k0s.yaml
):
spec:\nnetwork:\nnodeLocalLoadBalancing:\nenabled: true\ntype: EnvoyProxy\n
Or alternatively, if using k0sctl
, add the following to the k0sctl configuration (k0sctl.yaml
):
spec:\nk0s:\nconfig:\nspec:\nnetwork:\nnodeLocalLoadBalancing:\nenabled: true\ntype: EnvoyProxy\n
All newly added worker nodes will then use node-local load balancing. The k0s worker process on worker nodes that are already running must be restarted for the new configuration to take effect.
"},{"location":"nllb/#full-example-using-k0sctl","title":"Full example usingk0sctl
","text":"The following example shows a full k0sctl
configuration file featuring three controllers and two workers with node-local load balancing enabled:
apiVersion: k0sctl.k0sproject.io/v1beta1\nkind: Cluster\nmetadata:\nname: k0s-cluster\nspec:\nk0s:\nversion: v1.28.1+k0s.0\nconfig:\nspec:\nnetwork:\nnodeLocalLoadBalancing:\nenabled: true\ntype: EnvoyProxy\nhosts:\n- role: controller\nssh:\naddress: 10.81.146.254\nkeyPath: k0s-ssh-private-key.pem\nport: 22\nuser: k0s\n- role: controller\nssh:\naddress: 10.81.146.184\nkeyPath: k0s-ssh-private-key.pem\nport: 22\nuser: k0s\n- role: controller\nssh:\naddress: 10.81.146.113\nkeyPath: k0s-ssh-private-key.pem\nport: 22\nuser: k0s\n- role: worker\nssh:\naddress: 10.81.146.198\nkeyPath: k0s-ssh-private-key.pem\nport: 22\nuser: k0s\n- role: worker\nssh:\naddress: 10.81.146.51\nkeyPath: k0s-ssh-private-key.pem\nport: 22\nuser: k0s\n
Save the above configuration into a file called k0sctl.yaml
and apply it in order to bootstrap the cluster:
$ k0sctl apply\n\u28ff\u28ff\u2847\u2800\u2800\u2880\u28f4\u28fe\u28ff\u281f\u2801\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u287f\u281b\u2801\u2800\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u2800\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588\n\u28ff\u28ff\u2847\u28e0\u28f6\u28ff\u287f\u280b\u2800\u2800\u2800\u28b8\u28ff\u2847\u2800\u2800\u2800\u28e0\u2800\u2800\u2880\u28e0\u2846\u28b8\u28ff\u28ff\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2800\u2588\u2588\u2588 \u2588\u2588\u2588 \u2588\u2588\u2588\n\u28ff\u28ff\u28ff\u28ff\u28df\u280b\u2800\u2800\u2800\u2800\u2800\u28b8\u28ff\u2847\u2800\u28b0\u28fe\u28ff\u2800\u2800\u28ff\u28ff\u2847\u28b8\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u28ff\u2800\u2588\u2588\u2588 \u2588\u2588\u2588 \u2588\u2588\u2588\n\u28ff\u28ff\u284f\u283b\u28ff\u28f7\u28e4\u2840\u2800\u2800\u2800\u2838\u281b\u2801\u2800\u2838\u280b\u2801\u2800\u2800\u28ff\u28ff\u2847\u2808\u2809\u2809\u2809\u2809\u2809\u2809\u2809\u2809\u28b9\u28ff\u28ff\u2800\u2588\u2588\u2588 \u2588\u2588\u2588 \u2588\u2588\u2588\n\u28ff\u28ff\u2847\u2800\u2800\u2819\u28bf\u28ff\u28e6\u28c0\u2800\u2800\u2800\u28e0\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28ff\u28ff\u2847\u28b0\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28f6\u28fe\u28ff\u28ff\u2800\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\nk0sctl 0.15.0 Copyright 2022, k0sctl authors.\nBy continuing to use k0sctl you agree to these terms:\nhttps://k0sproject.io/licenses/eula\nlevel=info msg=\"==> Running phase: Connect to hosts\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: connected\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: connected\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: connected\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: connected\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: connected\"\nlevel=info msg=\"==> Running phase: Detect host operating systems\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: is running Alpine Linux v3.17\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: is running Alpine Linux v3.17\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: is running Alpine Linux v3.17\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: is running Alpine Linux v3.17\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: is running Alpine Linux v3.17\"\nlevel=info msg=\"==> Running phase: Acquire exclusive host lock\"\nlevel=info msg=\"==> Running phase: Prepare hosts\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: installing packages (curl)\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: installing packages (curl, iptables)\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: installing packages (curl)\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: installing packages (curl, iptables)\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: installing packages (curl)\"\nlevel=info msg=\"==> Running phase: Gather host facts\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: using k0s-controller-1 as hostname\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: using k0s-worker-1 as hostname\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: using k0s-worker-0 as hostname\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: using k0s-controller-2 as hostname\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: using k0s-controller-0 as hostname\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: discovered eth0 as private interface\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: discovered eth0 as private interface\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: discovered eth0 as private interface\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: discovered eth0 as private interface\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: discovered eth0 as private interface\"\nlevel=info msg=\"==> Running phase: Download k0s binaries to local host\"\nlevel=info msg=\"==> Running phase: Validate hosts\"\nlevel=info msg=\"==> Running phase: Gather k0s facts\"\nlevel=info msg=\"==> Running phase: Validate facts\"\nlevel=info msg=\"==> Running phase: Upload k0s binaries to hosts\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.28.1+k0s.0\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.28.1+k0s.0\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.28.1+k0s.0\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.28.1+k0s.0\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: uploading k0s binary from /home/k0sctl/.cache/k0sctl/k0s/linux/amd64/k0s-v1.28.1+k0s.0\"\nlevel=info msg=\"==> Running phase: Configure k0s\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: validating configuration\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: validating configuration\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: validating configuration\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: configuration was changed\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: configuration was changed\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: configuration was changed\"\nlevel=info msg=\"==> Running phase: Initialize the k0s cluster\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: installing k0s controller\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: waiting for the k0s service to start\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: waiting for kubernetes api to respond\"\nlevel=info msg=\"==> Running phase: Install controllers\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: generating token\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: writing join token\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: installing k0s controller\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: starting service\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: waiting for the k0s service to start\"\nlevel=info msg=\"[ssh] 10.81.146.184:22: waiting for kubernetes api to respond\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: generating token\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: writing join token\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: installing k0s controller\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: starting service\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: waiting for the k0s service to start\"\nlevel=info msg=\"[ssh] 10.81.146.113:22: waiting for kubernetes api to respond\"\nlevel=info msg=\"==> Running phase: Install workers\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: validating api connection to https://10.81.146.254:6443\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: validating api connection to https://10.81.146.254:6443\"\nlevel=info msg=\"[ssh] 10.81.146.254:22: generating token\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: writing join token\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: writing join token\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: installing k0s worker\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: installing k0s worker\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: starting service\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: starting service\"\nlevel=info msg=\"[ssh] 10.81.146.198:22: waiting for node to become ready\"\nlevel=info msg=\"[ssh] 10.81.146.51:22: waiting for node to become ready\"\nlevel=info msg=\"==> Running phase: Release exclusive host lock\"\nlevel=info msg=\"==> Running phase: Disconnect from hosts\"\nlevel=info msg=\"==> Finished in 3m30s\"\nlevel=info msg=\"k0s cluster version v1.28.1+k0s.0 is now installed\"\nlevel=info msg=\"Tip: To access the cluster you can now fetch the admin kubeconfig using:\"\nlevel=info msg=\" k0sctl kubeconfig\"\n
The cluster with the two nodes should be available by now. Setup the kubeconfig file in order to interact with it:
k0sctl kubeconfig > k0s-kubeconfig\nexport KUBECONFIG=$(pwd)/k0s-kubeconfig\n
The three controllers are available and provide API Server endpoints:
$ kubectl -n kube-node-lease get \\\nlease/k0s-ctrl-k0s-controller-0 \\\nlease/k0s-ctrl-k0s-controller-1 \\\nlease/k0s-ctrl-k0s-controller-2 \\\nlease/k0s-endpoint-reconciler\nNAME HOLDER AGE\nk0s-ctrl-k0s-controller-0 9ec2b221890e5ed6f4cc70377bfe809fef5be541a2774dc5de81db7acb2786f1 2m37s\nk0s-ctrl-k0s-controller-1 fe45284924abb1bfce674e5a9aa8d647f17c81e53bbab17cf28288f13d5e8f97 2m18s\nk0s-ctrl-k0s-controller-2 5ab43278e63fc863b2a7f0fe1aab37316a6db40c5a3d8a17b9d35b5346e23b3d 2m9s\nk0s-endpoint-reconciler 9ec2b221890e5ed6f4cc70377bfe809fef5be541a2774dc5de81db7acb2786f1 2m37s\n\n$ kubectl -n default get endpoints\nNAME ENDPOINTS AGE\nkubernetes 10.81.146.113:6443,10.81.146.184:6443,10.81.146.254:6443 2m49s\n
The first controller is the current k0s leader. The two worker nodes can be listed, too:
$ kubectl get nodes -owide\nNAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME\nk0s-worker-0 Ready <none> 2m16s v1.28.1+k0s 10.81.146.198 <none> Alpine Linux v3.17 5.15.83-0-virt containerd://1.7.1\nk0s-worker-1 Ready <none> 2m15s v1.28.1+k0s 10.81.146.51 <none> Alpine Linux v3.17 5.15.83-0-virt containerd://1.7.1\n
There is one node-local load balancer pod running for each worker node:
$ kubectl -n kube-system get pod -owide -l app.kubernetes.io/managed-by=k0s,app.kubernetes.io/component=nllb\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES\nnllb-k0s-worker-0 1/1 Running 0 81s 10.81.146.198 k0s-worker-0 <none> <none>\nnllb-k0s-worker-1 1/1 Running 0 85s 10.81.146.51 k0s-worker-1 <none> <none>\n
The cluster is using node-local load balancing and is able to tolerate the outage of one controller node. Shutdown the first controller to simulate a failure condition:
$ ssh -i k0s-ssh-private-key.pem k0s@10.81.146.254 'echo \"Powering off $(hostname) ...\" && sudo poweroff'\nPowering off k0s-controller-0 ...\n
Node-local load balancing provides high availability from within the cluster, not from the outside. The generated kubeconfig file lists the first controller's IP as the Kubernetes API server address by default. As this controller is gone by now, a subsequent call to kubectl
will fail:
$ kubectl get nodes\nUnable to connect to the server: dial tcp 10.81.146.254:6443: connect: no route to host\n
Changing the server address in k0s-kubeconfig
from the first controller to another one makes the cluster accessible again. Pick one of the other controller IP addresses and put that into the kubeconfig file. The addresses are listed both in k0sctl.yaml
as well as in the output of kubectl -n default get endpoints
above.
$ ssh -i k0s-ssh-private-key.pem k0s@10.81.146.184 hostname\nk0s-controller-1\n\n$ sed -i s#https://10\\\\.81\\\\.146\\\\.254:6443#https://10.81.146.184:6443#g k0s-kubeconfig\n\n$ kubectl get nodes -owide\nNAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME\nk0s-worker-0 Ready <none> 3m35s v1.28.1+k0s 10.81.146.198 <none> Alpine Linux v3.17 5.15.83-0-virt containerd://1.7.1\nk0s-worker-1 Ready <none> 3m34s v1.28.1+k0s 10.81.146.51 <none> Alpine Linux v3.17 5.15.83-0-virt containerd://1.7.1\n\n$ kubectl -n kube-system get pods -owide -l app.kubernetes.io/managed-by=k0s,app.kubernetes.io/component=nllb\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES\nnllb-k0s-worker-0 1/1 Running 0 2m31s 10.81.146.198 k0s-worker-0 <none> <none>\nnllb-k0s-worker-1 1/1 Running 0 2m35s 10.81.146.51 k0s-worker-1 <none> <none>\n
The first controller is no longer active. Its IP address is not listed in the default/kubernetes
Endpoints resource and its k0s controller lease is orphaned:
$ kubectl -n default get endpoints\nNAME ENDPOINTS AGE\nkubernetes 10.81.146.113:6443,10.81.146.184:6443 3m56s\n\n$ kubectl -n kube-node-lease get \\\nlease/k0s-ctrl-k0s-controller-0 \\\nlease/k0s-ctrl-k0s-controller-1 \\\nlease/k0s-ctrl-k0s-controller-2 \\\nlease/k0s-endpoint-reconciler\nNAME HOLDER AGE\nk0s-ctrl-k0s-controller-0 4m47s\nk0s-ctrl-k0s-controller-1 fe45284924abb1bfce674e5a9aa8d647f17c81e53bbab17cf28288f13d5e8f97 4m28s\nk0s-ctrl-k0s-controller-2 5ab43278e63fc863b2a7f0fe1aab37316a6db40c5a3d8a17b9d35b5346e23b3d 4m19s\nk0s-endpoint-reconciler 5ab43278e63fc863b2a7f0fe1aab37316a6db40c5a3d8a17b9d35b5346e23b3d 4m47s\n
Despite that controller being unavailable, the cluster remains operational. The third controller has become the new k0s leader. Workloads will run just fine:
$ kubectl -n default run nginx --image=nginx\npod/nginx created\n\n$ kubectl -n default get pods -owide\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES\nnginx 1/1 Running 0 16s 10.244.0.5 k0s-worker-1 <none> <none>\n\n$ kubectl -n default logs nginx\n/docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration\n/docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/\n/docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh\n10-listen-on-ipv6-by-default.sh: info: Getting the checksum of /etc/nginx/conf.d/default.conf\n10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf\n/docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh\n/docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh\n/docker-entrypoint.sh: Configuration complete; ready for start up\n[notice] 1#1: using the \"epoll\" event method\n[notice] 1#1: nginx/1.23.3\n[notice] 1#1: built by gcc 10.2.1 20210110 (Debian 10.2.1-6)\n[notice] 1#1: OS: Linux 5.15.83-0-virt\n[notice] 1#1: getrlimit(RLIMIT_NOFILE): 1048576:1048576\n[notice] 1#1: start worker processes\n[notice] 1#1: start worker process 28\n
"},{"location":"podsecurity/","title":"Pod Security Standards","text":"Since Pod Security Policies have been removed in Kubernetes v1.25, Kubernetes offers Pod Security Standards \u2013 a new way to enhance cluster security.
To enable PSS in k0s you need to create an admission controller config file:
```yaml\napiVersion: apiserver.config.k8s.io/v1\nkind: AdmissionConfiguration\nplugins:\n- name: PodSecurity\n configuration:\n apiVersion: pod-security.admission.config.k8s.io/v1beta1\n kind: PodSecurityConfiguration\n # Defaults applied when a mode label is not set.\n defaults:\n enforce: \"privileged\"\n enforce-version: \"latest\"\n exemptions:\n # Don't forget to exempt namespaces or users that are responsible for deploying\n # cluster components, because they need to run privileged containers\n usernames: [\"admin\"]\n namespaces: [\"kube-system\"]\n```\n
Add these extra arguments to the k0s configuration:
```yaml\napiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nspec:\n api:\n extraArgs:\n admission-control-config-file: /path/to/admission/control/config.yaml\n```\n
"},{"location":"raspberry-pi4/","title":"Create a Raspberry Pi 4 cluster","text":""},{"location":"raspberry-pi4/#prerequisites","title":"Prerequisites","text":"This guide assumes that you use a Raspberry Pi 4 Model B computer and a sufficiently large SD card of at least 32 GB. We will be using Ubuntu Linux for this guide, although k0s should run quite fine on other 64-bit Linux distributions for the Raspberry Pi as well. Please file a Bug if you encounter any obstacles.
"},{"location":"raspberry-pi4/#set-up-the-system","title":"Set up the system","text":""},{"location":"raspberry-pi4/#prepare-sd-card-and-boot-up-the-raspberry-pi","title":"Prepare SD card and boot up the Raspberry Pi","text":"Install Ubuntu Server 22.04.1 LTS 64-bit for Raspberry Pi. Ubuntu provides a step by step guide for the installation process. They use Raspberry Pi Imager, a specialized imaging utility that you can use to write the Ubuntu image, amongst others, to your SD cards. Follow that guide to get a working installation. (You can skip part 5 of the guide, since we won't need a Desktop Environment to run k0s.)
Alternatively, you can also opt to download the Ubuntu server image for Raspberry Pi manually and write it to an SD card using a tool like dd
:
wget https://cdimage.ubuntu.com/releases/22.04.1/release/ubuntu-22.04.1-preinstalled-server-arm64+raspi.img.xz\nunxz ubuntu-22.04.1-preinstalled-server-arm64+raspi.img.xz\ndd if=ubuntu-22.04.1-preinstalled-server-arm64+raspi.img of=/dev/mmcblk0 bs=4M status=progress\n
Note: The manual process is more prone to accidental data loss than the guided one via Raspberry Pi Imager. Be sure to choose the correct device names. The previous content of the SD card will be wiped. Moreover, the partition written to the SD card needs to be resized to make the full capacity of the card available to Ubuntu. This can be achieved, for example, in this way:
growpart /dev/mmcblk0 2\nresize2fs /dev/mmcblk0p2\n
Ubuntu uses cloud-init to allow for automated customizations of the system configuration. The cloud-init configuration files are located on the boot partition of the SD card. You can mount that partition and modify those, e.g. to provision network configuration, users, authorized SSH keys, additional packages and also an automatic installation of k0s.
After you have prepared the SD card, plug it into the Raspberry Pi and boot it up. Once cloud-init finished bootstrapping the system, the default login credentials are set to user ubuntu
with password ubuntu
(which you will be prompted to change on first login).
Note: For network configuration purposes, this documentation assumes that all of your computers are connected on the same subnet.
Review k0s's required ports and protocols to ensure that your network and firewall configurations allow necessary traffic for the cluster.
Review the Ubuntu Server Networking Configuration documentation to ensure that all systems have a static IP address on the network, or that the network is providing a static DHCP lease for the nodes. If the network should be managed via cloud-init, please refer to their documentation.
"},{"location":"raspberry-pi4/#optional-provision-ssh-keys","title":"(Optional) Provision SSH keys","text":"Ubuntu Server deploys and enables OpenSSH via cloud-init by default. Confirm, though, that for whichever user you will deploy the cluster with on the build system, their SSH Key is copied to each node's root user. Before you start, the configuration should be such that the current user can run:
ssh root@${HOST}\n
Where ${HOST}
is any node and the login can succeed with no further prompts.
While having a swap file is technically optional, it can help to ease memory pressure when running memory intensive workloads or on Raspberry Pis with less than 8 GB of RAM.
To create a swap file:
fallocate -l 2G /swapfile && \\\nchmod 0600 /swapfile && \\\nmkswap /swapfile && \\\nswapon -a\n
Ensure that the usage of swap is not too aggressive by setting the sudo sysctl vm.swappiness=10
(the default is generally higher) and configuring it to be persistent in /etc/sysctl.d/*
.
Ensure that your swap is mounted after reboots by confirming that the following line exists in your /etc/fstab
configuration:
/swapfile none swap sw 0 0\n
Download a k0s release. For example:
wget -O /tmp/k0s https://github.com/k0sproject/k0s/releases/download/v1.28.1+k0s.0/k0s-v1.28.1+k0s.0-arm64 # replace version number!\nsudo install /tmp/k0s /usr/local/bin/k0s\n
\u2015 or \u2015
Use the k0s download script (as one command) to download the latest stable k0s and make it executable in /usr/bin/k0s
.
curl -sSLf https://get.k0s.sh | sudo sh\n
At this point you can run k0s
:
ubuntu@ubuntu:~$ k0s version\nv1.28.1+k0s.0\n
To check if k0s's system requirements and external runtime dependencies are fulfilled by your current setup, you can invoke k0s sysinfo
:
ubuntu@ubuntu:~$ k0s sysinfo\nMachine ID: \"d84cde1f38844d1425dc04c454c5aa95e41fb11115bbb141c016f4cd3dea4f51\" (from machine) (pass)\nTotal memory: 3.7 GiB (pass)\nDisk space available for /var/lib/k0s: 24.3 GiB (pass)\nOperating system: Linux (pass)\n Linux kernel release: 5.15.0-1013-raspi (pass)\n Max. file descriptors per process: current: 1024 / max: 1048576 (warning: < 65536)\n AppArmor: unavailable (pass)\n Executable in path: modprobe: /usr/sbin/modprobe (pass)\n /proc file system: mounted (0x9fa0) (pass)\n Control Groups: version 2 (pass)\n cgroup controller \"cpu\": available (pass)\n cgroup controller \"cpuacct\": available (via cpu in version 2) (pass)\n cgroup controller \"cpuset\": available (pass)\n cgroup controller \"memory\": available (pass)\n cgroup controller \"devices\": available (assumed) (pass)\n cgroup controller \"freezer\": available (assumed) (pass)\n cgroup controller \"pids\": available (pass)\n cgroup controller \"hugetlb\": available (pass)\n cgroup controller \"blkio\": available (via io in version 2) (pass)\n CONFIG_CGROUPS: Control Group support: built-in (pass)\n CONFIG_CGROUP_FREEZER: Freezer cgroup subsystem: built-in (pass)\n CONFIG_CGROUP_PIDS: PIDs cgroup subsystem: built-in (pass)\n CONFIG_CGROUP_DEVICE: Device controller for cgroups: built-in (pass)\n CONFIG_CPUSETS: Cpuset support: built-in (pass)\n CONFIG_CGROUP_CPUACCT: Simple CPU accounting cgroup subsystem: built-in (pass)\n CONFIG_MEMCG: Memory Resource Controller for Control Groups: built-in (pass)\n CONFIG_CGROUP_HUGETLB: HugeTLB Resource Controller for Control Groups: built-in (pass)\n CONFIG_CGROUP_SCHED: Group CPU scheduler: built-in (pass)\n CONFIG_FAIR_GROUP_SCHED: Group scheduling for SCHED_OTHER: built-in (pass)\n CONFIG_CFS_BANDWIDTH: CPU bandwidth provisioning for FAIR_GROUP_SCHED: built-in (pass)\n CONFIG_BLK_CGROUP: Block IO controller: built-in (pass)\n CONFIG_NAMESPACES: Namespaces support: built-in (pass)\n CONFIG_UTS_NS: UTS namespace: built-in (pass)\n CONFIG_IPC_NS: IPC namespace: built-in (pass)\n CONFIG_PID_NS: PID namespace: built-in (pass)\n CONFIG_NET_NS: Network namespace: built-in (pass)\n CONFIG_NET: Networking support: built-in (pass)\n CONFIG_INET: TCP/IP networking: built-in (pass)\n CONFIG_IPV6: The IPv6 protocol: built-in (pass)\n CONFIG_NETFILTER: Network packet filtering framework (Netfilter): built-in (pass)\n CONFIG_NETFILTER_ADVANCED: Advanced netfilter configuration: built-in (pass)\n CONFIG_NF_CONNTRACK: Netfilter connection tracking support: module (pass)\n CONFIG_NETFILTER_XTABLES: Netfilter Xtables support: module (pass)\n CONFIG_NETFILTER_XT_TARGET_REDIRECT: REDIRECT target support: module (pass)\n CONFIG_NETFILTER_XT_MATCH_COMMENT: \"comment\" match support: module (pass)\n CONFIG_NETFILTER_XT_MARK: nfmark target and match support: module (pass)\n CONFIG_NETFILTER_XT_SET: set target and match support: module (pass)\n CONFIG_NETFILTER_XT_TARGET_MASQUERADE: MASQUERADE target support: module (pass)\n CONFIG_NETFILTER_XT_NAT: \"SNAT and DNAT\" targets support: module (pass)\n CONFIG_NETFILTER_XT_MATCH_ADDRTYPE: \"addrtype\" address type match support: module (pass)\n CONFIG_NETFILTER_XT_MATCH_CONNTRACK: \"conntrack\" connection tracking match support: module (pass)\n CONFIG_NETFILTER_XT_MATCH_MULTIPORT: \"multiport\" Multiple port match support: module (pass)\n CONFIG_NETFILTER_XT_MATCH_RECENT: \"recent\" match support: module (pass)\n CONFIG_NETFILTER_XT_MATCH_STATISTIC: \"statistic\" match support: module (pass)\n CONFIG_NETFILTER_NETLINK: module (pass)\n CONFIG_NF_NAT: module (pass)\n CONFIG_IP_SET: IP set support: module (pass)\n CONFIG_IP_SET_HASH_IP: hash:ip set support: module (pass)\n CONFIG_IP_SET_HASH_NET: hash:net set support: module (pass)\n CONFIG_IP_VS: IP virtual server support: module (pass)\n CONFIG_IP_VS_NFCT: Netfilter connection tracking: built-in (pass)\n CONFIG_IP_VS_SH: Source hashing scheduling: module (pass)\n CONFIG_IP_VS_RR: Round-robin scheduling: module (pass)\n CONFIG_IP_VS_WRR: Weighted round-robin scheduling: module (pass)\n CONFIG_NF_CONNTRACK_IPV4: IPv4 connetion tracking support (required for NAT): unknown (warning)\n CONFIG_NF_REJECT_IPV4: IPv4 packet rejection: module (pass)\n CONFIG_NF_NAT_IPV4: IPv4 NAT: unknown (warning)\n CONFIG_IP_NF_IPTABLES: IP tables support: module (pass)\n CONFIG_IP_NF_FILTER: Packet filtering: module (pass)\n CONFIG_IP_NF_TARGET_REJECT: REJECT target support: module (pass)\n CONFIG_IP_NF_NAT: iptables NAT support: module (pass)\n CONFIG_IP_NF_MANGLE: Packet mangling: module (pass)\n CONFIG_NF_DEFRAG_IPV4: module (pass)\n CONFIG_NF_CONNTRACK_IPV6: IPv6 connetion tracking support (required for NAT): unknown (warning)\n CONFIG_NF_NAT_IPV6: IPv6 NAT: unknown (warning)\n CONFIG_IP6_NF_IPTABLES: IP6 tables support: module (pass)\n CONFIG_IP6_NF_FILTER: Packet filtering: module (pass)\n CONFIG_IP6_NF_MANGLE: Packet mangling: module (pass)\n CONFIG_IP6_NF_NAT: ip6tables NAT support: module (pass)\n CONFIG_NF_DEFRAG_IPV6: module (pass)\n CONFIG_BRIDGE: 802.1d Ethernet Bridging: module (pass)\n CONFIG_LLC: module (pass)\n CONFIG_STP: module (pass)\n CONFIG_EXT4_FS: The Extended 4 (ext4) filesystem: built-in (pass)\n CONFIG_PROC_FS: /proc file system support: built-in (pass)\n
"},{"location":"raspberry-pi4/#deploy-a-node","title":"Deploy a node","text":"Each node can now serve as a control plane node or worker node or both.
"},{"location":"raspberry-pi4/#as-single-node","title":"As single node","text":"This is a self-contained single node setup which runs both control plane components and worker components. If you don't plan join any more nodes into the cluster, this is for you.
Install the k0scontroller
service:
ubuntu@ubuntu:~$ sudo k0s install controller --single\nubuntu@ubuntu:~$ sudo systemctl status k0scontroller.service\n\u25cb k0scontroller.service - k0s - Zero Friction Kubernetes\n Loaded: loaded (/etc/systemd/system/k0scontroller.service; enabled; vendor preset: enabled)\n Active: inactive (dead)\n Docs: https://docs.k0sproject.io\n
Start it:
ubuntu@ubuntu:~$ sudo systemctl start k0scontroller.service\nubuntu@ubuntu:~$ systemctl status k0scontroller.service\n\u25cf k0scontroller.service - k0s - Zero Friction Kubernetes\n Loaded: loaded (/etc/systemd/system/k0scontroller.service; enabled; vendor preset: enabled)\n Active: active (running) since Thu 2022-08-18 09:56:02 UTC; 2s ago\n Docs: https://docs.k0sproject.io\n Main PID: 2720 (k0s)\n Tasks: 10\n Memory: 24.7M\n CPU: 4.654s\n CGroup: /system.slice/k0scontroller.service\n \u2514\u25002720 /usr/local/bin/k0s controller --single=true\n\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] received CSR\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] generating key: rsa-2048\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] received CSR\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] generating key: rsa-2048\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] received CSR\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] generating key: rsa-2048\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] encoded CSR\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] signed certificate with serial number 6275509116227039894094374442676315636193163621\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] encoded CSR\nAug 18 09:56:04 ubuntu k0s[2720]: 2022/08/18 09:56:04 [INFO] signed certificate with serial number 336800507542010809697469355930007636411790073226\n
When the cluster is up, try to have a look:
ubuntu@ubuntu:~$ sudo k0s kc get nodes -owide\nNAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME\nubuntu Ready control-plane 4m41s v1.28.1+k0s 10.152.56.54 <none> Ubuntu 22.04.1 LTS 5.15.0-1013-raspi containerd://1.7.2\nubuntu@ubuntu:~$ sudo k0s kc get pod -owide -A\nNAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES\nkube-system kube-proxy-kkv2l 1/1 Running 0 4m44s 10.152.56.54 ubuntu <none> <none>\nkube-system kube-router-vf2pv 1/1 Running 0 4m44s 10.152.56.54 ubuntu <none> <none>\nkube-system coredns-88b745646-wd4mp 1/1 Running 0 5m10s 10.244.0.2 ubuntu <none> <none>\nkube-system metrics-server-7d7c4887f4-ssk49 1/1 Running 0 5m6s 10.244.0.3 ubuntu <none> <none>\n
Overall, the single k0s node uses less than 1 GiB of RAM:
ubuntu@ubuntu:~$ free -h\n total used free shared buff/cache available\nMem: 3.7Gi 715Mi 1.3Gi 3.0Mi 1.7Gi 2.8Gi\nSwap: 0B 0B 0B\n
"},{"location":"raspberry-pi4/#as-a-controller-node","title":"As a controller node","text":"This will install k0s as a single non-HA controller. It won't be able to run any workloads, so you need to connect more workers to it.
Install the k0scontroller
service. Note that we're not specifying any flags:
ubuntu@ubuntu:~$ sudo k0s install controller\nubuntu@ubuntu:~$ systemctl status k0scontroller.service\n\u25cb k0scontroller.service - k0s - Zero Friction Kubernetes\n Loaded: loaded (/etc/systemd/system/k0scontroller.service; enabled; vendor preset: enabled)\n Active: inactive (dead)\n Docs: https://docs.k0sproject.io\n
Start it:
ubuntu@ubuntu:~$ sudo systemctl start k0scontroller.service\nubuntu@ubuntu:~$ systemctl status k0scontroller.service\n\u25cf k0scontroller.service - k0s - Zero Friction Kubernetes\n Loaded: loaded (/etc/systemd/system/k0scontroller.service; enabled; vendor preset: enabled)\n Active: active (running) since Thu 2022-08-18 10:31:07 UTC; 3s ago\n Docs: https://docs.k0sproject.io\n Main PID: 1176 (k0s)\n Tasks: 10\n Memory: 30.2M\n CPU: 8.936s\n CGroup: /system.slice/k0scontroller.service\n \u2514\u25001176 /usr/local/bin/k0s controller\n\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] signed certificate with serial number 723202396395786987172578079268287418983457689579\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] encoded CSR\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] signed certificate with serial number 36297085497443583023060005045470362249819432477\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] encoded CSR\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] encoded CSR\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] signed certificate with serial number 728910847354665355109188021924183608444435075827\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] generate received request\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] received CSR\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] generating key: rsa-2048\nAug 18 10:31:09 ubuntu k0s[1176]: 2022/08/18 10:31:09 [INFO] signed certificate with serial number 718948898553094584370065610752227487244528071083\n
As soon as the controller is up, we can try to inspect the API as we did for the single node:
ubuntu@ubuntu:~$ sudo k0s kc get nodes -owide\nNo resources found\nubuntu@ubuntu:~$ sudo k0s kc get pod -owide -A\nNAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES\nkube-system coredns-88b745646-6tpwm 0/1 Pending 0 29s <none> <none> <none> <none>\nkube-system metrics-server-7d7c4887f4-9k5k5 0/1 Pending 0 24s <none> <none> <none> <none>\n
As we see, there are no nodes and two pending pods. A control plane without workers. The memory consumption is below the single node controller, but not much:
ubuntu@ubuntu:~$ free -h\n total used free shared buff/cache available\nMem: 3.7Gi 678Mi 2.3Gi 3.0Mi 758Mi 2.9Gi\nSwap: 0B 0B 0B\n
This controller runs a full-fledged control plane, backed by etcd, as opposed to the lightweight kine based one from the single node example. For the latter, k0s doesn't support joining new nodes.
More nodes can be added by creating join tokens. To add a worker node, create a token for it:
ubuntu@ubuntu:~$ sudo k0s token create --role worker\nH4sIAAAAAAAC/2yV0Y6jPBKF7/MUeYGZ30DonUTai5+Ak5DgbhuXHXwHmAnBhtAJHdKs9t1XnZmRdqW9K1cdfceyrDqzvD+L6no7X7rV/O7MSvtxG6rrbTX7Nv9dr2bz+Xx+q6736rqa18PQ31Z//eWg747vfvdfvvuL1cti4T1VZXUdzj/PZT5U3/KPob5cz8PnN50P+Wp+SNFwSJ01Ax3zcxAyEUMKKqYIA3vO0LA2TpwCC1hEQipFrxD2UogDhawQobWJY297jxHBCdbS70hIvWKTOMWGBcwhgUaMSegPhdPH+VY13GDGYNxTiwONdMSEJtTiLeVYMMALDn6dOKqXtt5r0WfQPpqK43cpWKBAecnWktxEiAvWVZEDghPCorhmXTlWp/7PTPz3jEPcVZF6p0KsFfIlNZiIiB11iFUhlJ+1jkxwn/EjU4kRnnI1zsEJkkiH4OHt2pI4a0gEINZUYEEhQinEkUb4qU0Rvn+9CQD5UKJ0dKfG1NVZ2dWCcfCkHFDKycjbYZuGIsk5DngY7Svcn3N5mdIGm1yylkU+Srcxyiy7l50ZRUTvGqtcNuK9QAvEjcihu4yJh/sipC5xy4nBssut9UrcB6nENz72JnfxKLBmxAseZftgyhHvfLIjaeK+PNYX2tmwkKQrGjPlSFAI2VRKmyZmidjnsGCefRfe6Vp4p6veBk0FCtaN/uBu7JAp9kS6nFKDCQvxVUXYsGPiFji+VU05UtFvdLt8oVK8JRE+5m6fZfbvBcGa8QhH0pzG6vxjLEOSEJvtZdRvhNSywNmCejEihiRMYp/IH34utZc6GpdwWwgbc9Hhh5Q+4ushLeXJEZ6t85YBCLxTTfwmGhyWW+HC2B+AE1DnYdK4l9pYJ/P0jhn1mrsq1MbHKYqcRO6cyuAQQG/kRlsq2aOK/HVp2FZKDVRqQg0OmNuz3MTB2jgBiXSQCGHYVmN6XnoAItDIrmnbBxDFHbdqB8ZZU5ktGMRAgQUApzuH3chQ9BCSRcrBR2riVCHxBt5ln3kYlXKxKKI6JEizV4wn3tWyMMk1N/iVtvpayvqaQ+nrKfj6gxMzOOCIBF/+cBQv4JG4AnATe0GZjUNy6gcWkkG5CJGpntKGTnzb472XfeqtekuQzqsWua+bpaw2j9d0ih02YZauh5y4/v7gqZzY2lYmVuWkahFqzF0cri1jbPu3n4d6nVp10G4fVw3OZbp8VabfaQfvtWN9zYNOdfVYmIWjz4PMzOOFmv5Nb3u39CgqXdUCth4xyxrwaQ8Oc3On9xIet3mHmewCj7kJgmP/pr3os5i0oLx+1+4yyj1mcwuTmDIko50DpndhWwNxHwcQQSuEGFljI0Z7lYJ1EhgnguJ3PukPYXr3VbJYOCdE5ECSFpBqgrDEpzFzRSfFxSUgIrJhUQZxW5jazxpCk445CfK3RMbHdcOGtL2N0O7uAuyCId8A0izZ4B2EseQb55EgwVX7+CyjmB9c1eSTVQXeLWiDj4CjUW7ZXXl9nR7pqDYKUXnZqyZ4r46x98bR/vduxtzQE0UiFZHdpEACEcFzLx/o5Z+z+bzL22o1N+g2Ky/dUD2GXznxq/6VE39C46n6anzcnqePorLV8K24XIbbcM37/6V9XK9VN3z7Q3o2zbnTq/n60v08n2b9tfpZXauurG6r+b/+PfuiPs1/Q/4P/mn8vMJwMVW3mrvL84/lj+8N8ia/uZ/Lf2izWFb57D8BAAD//zANvmsEBwAA\n
Save the join token for subsequent steps.
"},{"location":"raspberry-pi4/#as-a-worker-node","title":"As a worker node","text":"To join an existing k0s cluster, create the join token file for the worker (where $TOKEN_CONTENT
is one of the join tokens created in the control plane setup):
sudo sh -c 'mkdir -p /var/lib/k0s/ && umask 077 && echo \"$TOKEN_CONTENT\" > /var/lib/k0s/join-token'\n
After that, install the k0sworker
service:
ubuntu@ubuntu:~$ sudo k0s install worker --token-file /var/lib/k0s/join-token\nubuntu@ubuntu:~$ systemctl status k0sworker.service\n\u25cb k0sworker.service - k0s - Zero Friction Kubernetes\n Loaded: loaded (/etc/systemd/system/k0sworker.service; enabled; vendor preset: enabled)\n Active: inactive (dead)\n Docs: https://docs.k0sproject.io\n
Start the service:
ubuntu@ubuntu:~$ sudo systemctl start k0sworker.service\nubuntu@ubuntu:~$ systemctl status k0sworker.service\n\u25cf k0sworker.service - k0s - Zero Friction Kubernetes\n Loaded: loaded (/etc/systemd/system/k0sworker.service; enabled; vendor preset: enabled)\n Active: active (running) since Thu 2022-08-18 13:48:58 UTC; 2s ago\n Docs: https://docs.k0sproject.io\n Main PID: 1631 (k0s)\n Tasks: 22\n Memory: 181.7M\n CPU: 4.010s\n CGroup: /system.slice/k0sworker.service\n \u251c\u25001631 /usr/local/bin/k0s worker --token-file=/var/lib/k0s/join-token\n \u2514\u25001643 /var/lib/k0s/bin/containerd --root=/var/lib/k0s/containerd --state=/run/k0s/containerd --address=/run/k0s/containerd.sock --log-level=info --config=/etc/k0s/containerd.toml\n\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"Starting to supervise\" component=containerd\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"Started successfully, go nuts pid 1643\" component=containerd\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"starting OCIBundleReconciler\"\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"starting Kubelet\"\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"Starting kubelet\"\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"detected 127.0.0.53 nameserver, assuming systemd-resolved, so using resolv.conf: /run/systemd/resolve/resolv.conf\"\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"Starting to supervise\" component=kubelet\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"Started successfully, go nuts pid 1648\" component=kubelet\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"starting Status\"\nAug 18 13:49:00 ubuntu k0s[1631]: time=\"2022-08-18 13:49:00\" level=info msg=\"starting Autopilot\"\n
As this is a worker node, we cannot access the Kubernetes API via the builtin k0s kc
subcommand, but we can check the k0s API instead:
ubuntu@ubuntu:~$ sudo k0s status\nVersion: v1.28.1+k0s.0\nProcess ID: 1631\nRole: worker\nWorkloads: true\nSingleNode: false\n
The memory requirements are also pretty low:
ubuntu@ubuntu:~$ free -h\n total used free shared buff/cache available\nMem: 3.7Gi 336Mi 2.1Gi 3.0Mi 1.2Gi 3.2Gi\nSwap: 0B 0B 0B\n
"},{"location":"raspberry-pi4/#connect-to-the-cluster","title":"Connect to the cluster","text":"On a controller node, generate a new raspi-cluster-master
user with admin rights and get a kubeconfig for it:
ubuntu@ubuntu:~$ sudo k0s kc create clusterrolebinding raspi-cluster-master-admin --clusterrole=admin --user=raspi-cluster-master\nclusterrolebinding.rbac.authorization.k8s.io/raspi-cluster-master-admin created\nubuntu@ubuntu:~$ sudo k0s kubeconfig create --groups system:masters raspi-cluster-master\n\napiVersion: v1\nclusters:\n- cluster:\n server: https://10.152.56.54:6443\n certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURBRENDQWVpZ0F3SUJBZ0lVT2RSVzdWdm83UWR5dmdFZHRUK1V3WDN2YXdvd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0dERVdNQlFHQTFVRUF4TU5hM1ZpWlhKdVpYUmxjeTFqWVRBZUZ3MHlNakE0TVRneE5EQTFNREJhRncwegpNakE0TVRVeE5EQTFNREJhTUJneEZqQVVCZ05WQkFNVERXdDFZbVZ5Ym1WMFpYTXRZMkV3Z2dFaU1BMEdDU3FHClNJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUURsdy8wRFJtcG1xRjVnVElmN1o5bElRN0RFdUp6WDJLN1MKcWNvYk5oallFanBqbnBDaXFYOSt5T1R2cGgyUlRKN2tvaGkvUGxrYm5oM2pkeVQ3NWxSMGowSkV1elRMaUdJcApoR2pqc3htek5RRWVwb210R0JwZXNGeUE3NmxTNVp6WVJtT0lFQVgwb0liWjBZazhuU3pQaXBsWDMwcTFETEhGCkVIcSsyZG9vVXRIb09EaEdmWFRJTUJsclZCV3dCV3cxbmdnN0dKb01TN2tHblpYaUw2NFBiRDg5NmtjYXo0a28KTXhhZGc1ZmZQNStBV3JIVHhKV1d2YjNCMjEyOWx3R3FiOHhMTCt1cnVISHVjNEh4em9OVUt1WUlXc2lvQWp4YgphdDh6M1QwV2RnSit2VithWWlRNFlLeEVFdFB4cEMvUHk0czU0UHF4RzVZa0hiMDczMEUxQWdNQkFBR2pRakJBCk1BNEdBMVVkRHdFQi93UUVBd0lCQmpBUEJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJTd2p4STIKRUxVNCtNZUtwT0JNQUNnZDdKU1QxVEFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBQ3k3dHFFMk5WT3E0Z0I1Ngp2clVZMFU0SWp1c0dUN0UzQ2xqSUtQODk2Mm9xdlpvU0NWb2U5YS9UQTR6ZXYrSXJwaTZ1QXFxc3RmT3JFcDJ4CmVwMWdYZHQrbG5nV0xlbXdWdEVOZ0xvSnBTM09Vc3N1ai9XcmJwSVU4M04xWVJTRzdzU21KdXhpa3pnVUhiUk8KZ01SLzIxSDFESzJFdmdQY2pHWXlGbUQzSXQzSjVNcnNiUHZTRG4rUzdWWWF0eWhIMUo4dmwxVDFpbzRWWjRTNgpJRFlaV05JOU10TUpqcGxXL01pRnlwTUhFU1E3UEhHeHpGVExoWFplS0pKSlRPYXFha1AxM3J1WFByVHVDQkl4CkFCSWQraU9qdGhSU3ZxbTFocGtHcmY4Rm9PdG1PYXZmazdDdnNJTWdUV2pqd2JJZWZIRU8zUmVBMzZWZWV3bXoKOFJHVUtBPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=\n name: k0s\ncontexts:\n- context:\n cluster: k0s\n user: raspi-cluster-master\n name: k0s\ncurrent-context: k0s\nkind: Config\npreferences: {}\nusers:\n- name: raspi-cluster-master\n user:\n client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURYVENDQWtXZ0F3SUJBZ0lVV0ZZNkZ4cCtUYnhxQUxTVjM0REVMb0dEc3Q0d0RRWUpLb1pJaHZjTkFRRUwKQlFBd0dERVdNQlFHQTFVRUF4TU5hM1ZpWlhKdVpYUmxjeTFqWVRBZUZ3MHlNakE0TVRneE5ERTRNREJhRncweQpNekE0TVRneE5ERTRNREJhTURneEZ6QVZCZ05WQkFvVERuTjVjM1JsYlRwdFlYTjBaWEp6TVIwd0d3WURWUVFECkV4UnlZWE53YVMxamJIVnpkR1Z5TFcxaGMzUmxjakNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0MKQVFvQ2dnRUJBTGJNalI5eHA1dDJzank1S0dEQnQ2dWl3QU4vaEhwZkFUNXJrZTFRblc2eFlZeDYzR2JBTXYrRQpjWmEyUEdPempQeVVTZThVdWp4ZnR0L1JWSTJRVkVIRGlJZ1ZDNk1tUUFmTm1WVlpKOHBFaTM2dGJZYUVxN3dxClhxYmJBQ0F0ZGtwNTJ0Y0RLVU9sRS9SV0tUSjN4bXUvRmh0OTIrRDdtM1RrZTE0TkJ5a1hvakk1a2xVWU9ySEMKVTN3V210eXlIUFpDMFBPdWpXSE5yeS9wOXFjZzRreWNDN0NzUVZqMWoxY2JwdXRpWllvRHNHV3piS0RTbExRZApyYnUwRnRVZVpUQzVPN2NuTk5tMU1EZldubXhlekw4L2N5dkJCYnRmMjhmcERFeEhMT2dTY2ZZUlZwUllPMzdvCk5yUjljMGNaZE9oZW5YVnlQcU1WVVlSNkQxMlRrY0VDQXdFQUFhTi9NSDB3RGdZRFZSMFBBUUgvQkFRREFnV2cKTUIwR0ExVWRKUVFXTUJRR0NDc0dBUVVGQndNQkJnZ3JCZ0VGQlFjREFqQU1CZ05WSFJNQkFmOEVBakFBTUIwRwpBMVVkRGdRV0JCUitqQTlGNm1jc25ob2NtMnd0dFNYY2tCaUpoakFmQmdOVkhTTUVHREFXZ0JTd2p4STJFTFU0CitNZUtwT0JNQUNnZDdKU1QxVEFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBY2RRV3N4OUpHOUIxckxVc2Y1QzgKd1BzTkhkZURYeG1idm4zbXN3aFdVMEZHU1pjWjlkMTYzeXhEWnA4QlNzNWFjNnZqcU1lWlFyRThDUXdXYTlxVAowZVJXcTlFODYzcS9VcFVNN3lPM1BnMHd4RWtQSTVuSjRkM0o3MHA3Zk4zenpzMUJzU0h6Q2hzOWR4dE5XaVp5CnNINzdhbG9NanA0cXBEVWRyVWcyT0d4RWhRdzJIaXE3ZEprQm80a3hoWmhBc3lWTDdZRng0SDY3WkIzSjY4V3QKdTdiWnRmUVJZV3ZPUE9oS0pFdmlLVXptNDJBUlZXTDdhZHVESTBBNmpxbXhkTGNxKzlNWVlaNm1CT0NWakx1WgoybDlJSVI2NkdjOUdpdC9kSFdwbTVZbmozeW8xcUU0UVg4ZmVUQTczUlU5cmFIdkNpTGdVbFRaVUNGa3JNL0NtCndBPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=\n client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdHN5TkgzR25tM2F5UExrb1lNRzNxNkxBQTMrRWVsOEJQbXVSN1ZDZGJyRmhqSHJjClpzQXkvNFJ4bHJZOFk3T00vSlJKN3hTNlBGKzIzOUZValpCVVFjT0lpQlVMb3laQUI4MlpWVmtueWtTTGZxMXQKaG9TcnZDcGVwdHNBSUMxMlNubmExd01wUTZVVDlGWXBNbmZHYTc4V0czM2I0UHViZE9SN1hnMEhLUmVpTWptUwpWUmc2c2NKVGZCYWEzTEljOWtMUTg2Nk5ZYzJ2TCtuMnB5RGlUSndMc0t4QldQV1BWeHVtNjJKbGlnT3daYk5zCm9OS1V0QjJ0dTdRVzFSNWxNTGs3dHljMDJiVXdOOWFlYkY3TXZ6OXpLOEVGdTEvYngra01URWNzNkJKeDloRlcKbEZnN2Z1ZzJ0SDF6UnhsMDZGNmRkWEkrb3hWUmhIb1BYWk9Sd1FJREFRQUJBb0lCQUFpYytzbFFnYVZCb29SWgo5UjBhQTUyQ3ZhbHNpTUY3V0lPb2JlZlF0SnBTb1ZZTk0vVmplUU94S2VrQURUaGxiVzg1VFlLR1o0QVF3bjBwClQrS2J1bHllNmYvL2ZkemlJSUk5bmN2M3QzaEFZcEpGZWJPczdLcWhGSFNvUFFsSEd4dkhRaGgvZmFKQ1ZQNWUKVVBLZjBpbWhoMWtrUlFnRTB2NWZCYkVZekEyVGl4bThJSGtQUkdmZWN4WmF1VHpBS2VLR0hjTFpDem8xRHhlSgp3bHpEUW9YWDdHQnY5MGxqR1pndENXcFEyRUxaZ1NwdW0rZ0crekg1WFNXZXgwMzJ4d0NhbkdDdGcyRmxHd2V2Ck9PaG8zSjNrRWVJR1MzSzFJY24rcU9hMjRGZmgvcmRsWXFSdStWeEZ4ZkZqWGxaUjdjZkF4Mnc1Z3NmWm9CRXIKUE1oMTdVRUNnWUVBejZiTDc4RWsvZU1jczF6aWdaVVpZcE5qa2FuWHlsS3NUUWM1dU1pRmNORFdObFkxdlQzVQprOHE5cHVLbnBZRVlTTGVVTS9tSWk5TVp6bmZjSmJSL0hJSG9YVjFMQVJ2blQ0djN3T0JsaDc5ajdKUjBpOW1OClYrR0Q1SlNPUmZCVmYxVlJHRXN6d3ZhOVJsS2lMZ0JVM2tKeWN2Q09jYm5aeFltSXRrbDhDbXNDZ1lFQTRWeG4KZTY2QURIYmR3T0plbEFSKytkVHh5eVYyRjY1SEZDNldPQVh2RVRucGRudnRRUUprWWhNYzM1Y2gvMldmZDBWYQpZb3lGZE9kRThKZSsvcWxuS1pBc3BHRC9yZHp2VmFteHQ4WXdrQXU5Q1diZWw2VENPYkZOQ2hjK1NUbmRqN0duCmlSUHprM1JYMnBEVi9OaW5FVFA0TEJnTHJQYkxlSVAwSzZ4bjk0TUNnWUVBeXZGMmNVendUVjRRNTgrSTVDS0gKVzhzMnpkOFRzbjVZUFRRcG1zb0hlTG55RWNyeDNKRTRXSFVXSTZ0ek01TFczQUxuU21DL3JnQlVRWER0Yk1CYQpWczh6L1VPM2tVN25JOXhrK0ZHWGlUTnBnb2VZM0RGMExZYVBNL0JvbUR3S0kxZUwyVlZ1TWthWnQ4ZjlEejV0CnM0ZDNlWlJYY3hpem1KY1JVUzdDbHg4Q2dZQk45Vmc2K2RlRCtFNm4zZWNYenlKWnJHZGtmZllISlJ1amlLWWcKaFRUNVFZNVlsWEF5Yi9CbjJQTEJDaGdSc0lia2pKSkN5eGVUcERrOS9WQnQ2ZzRzMjVvRjF5UTdjZFU5VGZHVApnRFRtYjVrYU9vSy85SmZYdTFUS0s5WTVJSkpibGZvOXVqQWxqemFnL2o5NE16NC8vamxZajR6aWJaRmZoRTRnCkdZanhud0tCZ0U1cFIwMlVCa1hYL3IvdjRqck52enNDSjR5V3U2aWtpem00UmJKUXJVdEVNd1Y3a2JjNEs0VFIKM2s1blo1M1J4OGhjYTlMbXREcDJIRWo2MlBpL2pMR0JTN0NhOCtQcStxNjZwWWFZTDAwWnc4UGI3OVMrUmpzQQpONkNuQWg1dDFYeDhVMTIvWm9JcjBpOWZDaERuNlBqVEM0MVh5M1EwWWd6TW5jYXMyNVBiCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==\n
Using the above kubeconfig, you can now access and use the cluster:
ubuntu@ubuntu:~$ KUBECONFIG=/path/to/kubeconfig kubectl get nodes,deployments,pods -owide -A\nNAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME\nnode/ubuntu Ready <none> 5m1s v1.28.1+k0s 10.152.56.54 <none> Ubuntu 22.04.1 LTS 5.15.0-1013-raspi containerd://1.7.2\n\nNAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR\nkube-system deployment.apps/coredns 1/1 1 1 33m coredns registry.k8s.io/coredns/coredns:v1.7.0 k8s-app=kube-dns\nkube-system deployment.apps/metrics-server 1/1 1 1 33m metrics-server registry.k8s.io/metrics-server/metrics-server:v0.6.4 k8s-app=metrics-server\n\nNAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES\nkube-system pod/coredns-88b745646-pkk5w 1/1 Running 0 33m 10.244.0.5 ubuntu <none> <none>\nkube-system pod/konnectivity-agent-h4nfj 1/1 Running 0 5m1s 10.244.0.6 ubuntu <none> <none>\nkube-system pod/kube-proxy-qcgzs 1/1 Running 0 5m1s 10.152.56.54 ubuntu <none> <none>\nkube-system pod/kube-router-6lrht 1/1 Running 0 5m1s 10.152.56.54 ubuntu <none> <none>\nkube-system pod/metrics-server-7d7c4887f4-wwbkk 1/1 Running 0 33m 10.244.0.4 ubuntu <none> <none>\n
"},{"location":"reinstall-k0sctl/","title":"Reinstall a node","text":"k0sctl
currently does not support changing all the configuration of containerd (state
, root
) on the fly.
For example, in order to move containerd's root
directory to a new partition/drive, you have to provide --data-dir /new/drive
in your k0sctl installFlags
for each (worker) node. --data-dir
is an option of k0s
and then added to the service unit.
The following is an example of that:
# spec.hosts[*].installFlags\n- role: worker\ninstallFlags:\n- --profile flatcar\n- --enable-cloud-provider\n- --data-dir /new/drive\n- --kubelet-extra-args=\"--cloud-provider=external\"\n
However, the installFlags
are only used when the node is installed.
Drain the node:
kubectl drain node.hostname\n
Access your node (e.g. via ssh) to stop and reset k0s:
sudo k0s stop\nsudo k0s reset\n
Reboot the node (for good measure):
sudo systemctl reboot\n
Once the node is available again, run k0sctl apply
to integrate it into your cluster and uncordon the node to allow pods to be scheduled:
k0sctl apply -c config.yaml\nkubectl uncordon node.hostname\n
"},{"location":"releases/","title":"Releases","text":"This page describes how we release and support the k0s project. Mirantis Inc. can also provide commercial support for k0s.
"},{"location":"releases/#upstream-kubernetes-release-support-cycle","title":"Upstream Kubernetes release & support cycle","text":"This release and support cycle is followed for ALL new minor releases. A minor release can be e.g. 1.25, 1.26 and so on. What this means in practice is that every 4 months there is a new minor release published.
After a minor release is published, the upstream community is maintaining it for 14 months. Maintenance in this case means that upstream Kubernetes provides bug fixes, CVE mitigations and such for 14 months per minor release.
"},{"location":"releases/#k0s-release-and-support-model","title":"k0s release and support model","text":"Starting from the k0s 1.21, k0s started following the Kubernetes project's release and support model.
k0s project follows closely the upstream Kubernetes release cycle. The only difference to upstream Kubernetes release / maintenance schedule is that our initial release date is always a few weeks behind the upstream Kubernetes version release date as we are building our version of k0s from the officially released version of Kubernetes and need time for testing the final version before shipping.
Given the fact that upstream Kubernetes provides support and patch releases for a minor version for roughly 14 months, it means that k0s will follow this same model. Each minor release is maintained for roughly 14 months since its initial release.
k0s project will typically include patches and fixes included in a Kubernetes upstream patch release for the fixes needed in k0s own codebase. For example, if a bug is identified in 1.26 series k0s project will create and ship a fix for it with the next upstream Kubernetes 1.26.x release. In rare cases where a critical bug is identified we may also ship \u201cout of band\u201d patches. Such out-of-band release would be identified in the version string suffix. For example a normal release following Kubernetes upstream would be 1.26.3+k0s.0 whereas a critical out-of-band patch would be identified as 1.26.3+k0s.1.
"},{"location":"releases/#new-features-and-enhancements","title":"New features and enhancements","text":"The biggest new k0s features will typically only be delivered on top of the latest Kubernetes version, but smaller enhancements can be included in older release tracks as well.
"},{"location":"releases/#version-string","title":"Version string","text":"The k0s version string consists of the Kubernetes version and the k0s version. For example:
The Kubernetes version (1.28.1) is the first part, and the last part (k0s.0) reflects the k0s version, which is built on top of the certain Kubernetes version.
"},{"location":"remove_controller/","title":"Remove or replace a controller","text":"You can manually remove or replace a controller from a multi-node k0s cluster (>=3 controllers) without downtime. However, you have to maintain quorum on Etcd while doing so.
"},{"location":"remove_controller/#remove-a-controller","title":"Remove a controller","text":"If your controller is also a worker (k0s controller --enable-worker
), you first have to delete the controller from Kubernetes itself. To do so, run the following commands from the controller:
# Remove the containers from the node and cordon it\nk0s kubectl drain --ignore-daemonsets --delete-emptydir-data <controller>\n# Delete the node from the cluster\nk0s kubectl delete node <controller>\n
Then you need to remove it from the Etcd cluster. For example, if you want to remove controller01
from a cluster with 3 controllers:
# First, list the Etcd members\nk0s etcd member-list\n{\"members\":{\"controller01\":\"<PEER_ADDRESS1>\", \"controller02\": \"<PEER_ADDRESS2>\", \"controller03\": \"<PEER_ADDRESS3>\"}}\n# Then, remove the controller01 using its peer address\nk0s etcd leave --peer-address \"<PEER_ADDRESS1>\"\n
The controller is now removed from the cluster. To reset k0s on the machine, run the following commands:
k0s stop\nk0s reset\nreboot\n
"},{"location":"remove_controller/#replace-a-controller","title":"Replace a controller","text":"To replace a controller, you first remove the old controller (like described above) then follow the manual installation procedure to add the new one.
"},{"location":"reset/","title":"Uninstall/Reset","text":"k0s can be uninstalled locally with k0s reset
command and remotely with k0sctl reset
command. They remove all k0s-related files from the host.
reset
operates under the assumption that k0s is installed as a service on the host.
To prevent accidental triggering, k0s reset
will not run if the k0s service is running, so you must first stop the service:
Stop the service:
sudo k0s stop\n
Invoke the reset
command:
$ sudo k0s reset\nINFO[2021-06-29 13:08:39] * containers steps\nINFO[2021-06-29 13:08:44] successfully removed k0s containers!\nINFO[2021-06-29 13:08:44] no config file given, using defaults\nINFO[2021-06-29 13:08:44] * remove k0s users step:\nINFO[2021-06-29 13:08:44] no config file given, using defaults\nINFO[2021-06-29 13:08:44] * uninstall service step\nINFO[2021-06-29 13:08:44] Uninstalling the k0s service\nINFO[2021-06-29 13:08:45] * remove directories step\nINFO[2021-06-29 13:08:45] * CNI leftovers cleanup step\nINFO k0s cleanup operations done. To ensure a full reset, a node reboot is recommended.\n
k0sctl can be used to connect each node and remove all k0s-related files and processes from the hosts.
k0sctl reset
command:$ k0sctl reset --config k0sctl.yaml\nk0sctl v0.9.0 Copyright 2021, k0sctl authors.\n\n? Going to reset all of the hosts, which will destroy all configuration and data, Are you sure? Yes\nINFO ==> Running phase: Connect to hosts \nINFO [ssh] 13.53.43.63:22: connected \nINFO [ssh] 13.53.218.149:22: connected INFO ==> Running phase: Detect host operating systems \nINFO [ssh] 13.53.43.63:22: is running Ubuntu 20.04.2 LTS \nINFO [ssh] 13.53.218.149:22: is running Ubuntu 20.04.2 LTS INFO ==> Running phase: Prepare hosts INFO ==> Running phase: Gather k0s facts \nINFO [ssh] 13.53.43.63:22: found existing configuration \nINFO [ssh] 13.53.43.63:22: is running k0s controller version 1.28.1+k0s.0\nINFO [ssh] 13.53.218.149:22: is running k0s worker version 1.28.1+k0s.0\nINFO [ssh] 13.53.43.63:22: checking if worker has joined INFO ==> Running phase: Reset hosts \nINFO [ssh] 13.53.43.63:22: stopping k0s \nINFO [ssh] 13.53.218.149:22: stopping k0s \nINFO [ssh] 13.53.218.149:22: running k0s reset \nINFO [ssh] 13.53.43.63:22: running k0s reset INFO ==> Running phase: Disconnect from hosts INFO ==> Finished in 8s
k0s uses containerd as the default Container Runtime Interface (CRI) and runc as the default low-level runtime. In most cases they don't require any configuration changes. However, if custom configuration is needed, this page provides some examples.
"},{"location":"runtime/#containerd-configuration","title":"containerd configuration","text":"By default k0s manages the full containerd configuration. User has the option of fully overriding, and thus also managing, the configuration themselves.
"},{"location":"runtime/#user-managed-containerd-configuration","title":"User managed containerd configuration","text":"In the default k0s generated configuration there's a \"magic\" comment telling k0s it is k0s managed:
# k0s_managed=true\n
If you wish to take over the configuration management remove this line.
To make changes to containerd configuration you must first generate a default containerd configuration, with the default values set to /etc/k0s/containerd.toml
:
containerd config default > /etc/k0s/containerd.toml\n
k0s
runs containerd with the following default values:
/var/lib/k0s/bin/containerd \\\n--root=/var/lib/k0s/containerd \\\n--state=/run/k0s/containerd \\\n--address=/run/k0s/containerd.sock \\\n--config=/etc/k0s/containerd.toml\n
Next, add the following default values to the configuration file:
version = 2\nroot = \"/var/lib/k0s/containerd\"\nstate = \"/run/k0s/containerd\"\n...\n\n[grpc]\naddress = \"/run/k0s/containerd.sock\"\n
"},{"location":"runtime/#k0s-managed-dynamic-runtime-configuration","title":"k0s managed dynamic runtime configuration","text":"From 1.27.1 onwards k0s enables dynamic configuration on containerd CRI runtimes. This works by k0s creating a special directory in /etc/k0s/containerd.d/
where user can drop-in partial containerd configuration snippets.
k0s will automatically pick up these files and adds these in containerd configuration imports
list. If k0s sees the configuration drop-ins are CRI related configurations k0s will automatically collect all these into a single file and adds that as a single import file. This is to overcome some hard limitation on containerd 1.X versions. Read more at containerd#8056
Following chapters provide some examples how to configure different runtimes for containerd using k0s managed drop-in configurations.
"},{"location":"runtime/#using-gvisor","title":"Using gVisor","text":"gVisor is an application kernel, written in Go, that implements a substantial portion of the Linux system call interface. It provides an additional layer of isolation between running applications and the host operating system.
Install the needed gVisor binaries into the host.
(\nset -e\n ARCH=$(uname -m)\nURL=https://storage.googleapis.com/gvisor/releases/release/latest/${ARCH}\nwget ${URL}/runsc ${URL}/runsc.sha512 \\\n${URL}/containerd-shim-runsc-v1 ${URL}/containerd-shim-runsc-v1.sha512\n sha512sum -c runsc.sha512 \\\n-c containerd-shim-runsc-v1.sha512\n rm -f *.sha512\n chmod a+rx runsc containerd-shim-runsc-v1\n sudo mv runsc containerd-shim-runsc-v1 /usr/local/bin\n)\n
Refer to the gVisor install docs for more information.
Prepare the config for k0s
managed containerD, to utilize gVisor as additional runtime:
cat <<EOF | sudo tee /etc/k0s/containerd.d/gvisor.toml\nversion = 2\n\n[plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.runsc]\n runtime_type = \"io.containerd.runsc.v1\"\nEOF\n
Start and join the worker into the cluster, as normal:
k0s worker $token\n
Register containerd to the Kubernetes side to make gVisor runtime usable for workloads (by default, containerd uses normal runc as the runtime):
cat <<EOF | kubectl apply -f -\napiVersion: node.k8s.io/v1\nkind: RuntimeClass\nmetadata:\n name: gvisor\nhandler: runsc\nEOF\n
At this point, you can use gVisor runtime for your workloads:
apiVersion: v1\nkind: Pod\nmetadata:\nname: nginx-gvisor\nspec:\nruntimeClassName: gvisor\ncontainers:\n- name: nginx\nimage: nginx\n
(Optional) Verify that the created nginx pod is running under gVisor runtime:
# kubectl exec nginx-gvisor -- dmesg | grep -i gvisor\n[ 0.000000] Starting gVisor...\n
nvidia-container-runtime
","text":"First, install the NVIDIA runtime components:
distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \\\n&& curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - \\\n&& curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list\nsudo apt-get update && sudo apt-get install -y nvidia-container-runtime\n
Next, drop in the containerd runtime configuration snippet into /etc/k0s/containerd.d/nvidia.toml
[plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.nvidia]\nprivileged_without_host_devices = false\nruntime_engine = \"\"\nruntime_root = \"\"\nruntime_type = \"io.containerd.runc.v1\"\n[plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.nvidia.options]\nBinaryName = \"/usr/bin/nvidia-container-runtime\"\n
Create the needed RuntimeClass
:
cat <<EOF | kubectl apply -f -\napiVersion: node.k8s.io/v1\nkind: RuntimeClass\nmetadata:\n name: nvidia\nhandler: nvidia\nEOF\n
Note Detailed instruction on how to run nvidia-container-runtime
on your node is available here.
Warning: You can use your own CRI runtime with k0s (for example, docker
). However, k0s will not start or manage the runtime, and configuration is solely your responsibility.
Use the option --cri-socket
to run a k0s worker with a custom CRI runtime. the option takes input in the form of <type>:<socket_path>
(for type
, use docker
for a pure Docker setup and remote
for anything else).
To run k0s with a pre-existing Dockershim setup, run the worker with k0s worker --cri-socket docker:unix:///var/run/cri-dockerd.sock <token>
. A detailed explanation on dockershim and a guide for installing cri-dockerd can be found in our k0s dockershim guide.
SELinux enforces mandatory access control policies that confine user programs and system services, as well as access to files and network resources. Limiting privilege to the minimum required to work reduces or eliminates the ability of these programs and daemons to cause harm if faulty or compromised.
Enabling SELinux in container runtime provides an additional security control to help further enforce isolation among deployed containers and the host.
This guide describes how to enable SELinux in Kubernetes environment provided by k0s on CentOS and Red Hat Enterprise Linux (RHEL).
"},{"location":"selinux/#requirements","title":"Requirements","text":"SELinux is enabled on CentOS and RHEL by default. Below command output indicates SELinux is enabled.
$ getenforce\nEnforcing\n
"},{"location":"selinux/#install-container-selinux","title":"Install container-selinux","text":"It is required to have container-selinux installed. In most Fedora based distributions including Fedora 37, Red Hat Enterprise Linux 7, 8 and 8, CentOS 7 and 8 and Rocky Linux 9 this can be achieved by installing the package container-selinux.
In RHEL 7 and CentOS 7 this is achieved by running:
yum install -y container-selinux\n
In the rest of the metnioned distributions run:
dnf install -y container-selinux\n
"},{"location":"selinux/#set-selinux-labels-for-k0s-installation-files","title":"Set SELinux labels for k0s installation files","text":"Run below commands on the host OS of the worker nodes.
DATA_DIR=\"/var/lib/k0s\"\nsudo semanage fcontext -a -t container_runtime_exec_t \"${DATA_DIR}/bin/containerd.*\"\nsudo semanage fcontext -a -t container_runtime_exec_t \"${DATA_DIR}/bin/runc\"\nsudo restorecon -R -v ${DATA_DIR}/bin\nsudo semanage fcontext -a -t container_var_lib_t \"${DATA_DIR}/containerd(/.*)?\"\nsudo semanage fcontext -a -t container_ro_file_t \"${DATA_DIR}/containerd/io.containerd.snapshotter.*/snapshots(/.*)?\"\nsudo restorecon -R -v ${DATA_DIR}/containerd\n
"},{"location":"selinux/#enable-selinux-in-containerd-of-k0s","title":"Enable SELinux in containerd of k0s","text":"Add below lines to /etc/k0s/containerd.toml
of the worker nodes. You need to restart k0s service on the node to make the change take effect.
[plugins.\"io.containerd.grpc.v1.cri\"]\nenable_selinux = true\n
"},{"location":"selinux/#verify-selinux-works-in-kubernetes-environment","title":"Verify SELinux works in Kubernetes environment","text":"By following the example Assign SELinux labels to a Container, deploy a testing pod using below YAML file:
apiVersion: v1\nkind: Pod\nmetadata:\nname: test-selinux\nspec:\ncontainers:\n- image: busybox\nname: test-selinux\ncommand: [\"sleep\", \"infinity\"]\nsecurityContext:\nseLinuxOptions:\nlevel: \"s0:c123,c456\"\n
After the pod starts, ssh to the worker node on which the pod is running and check the pod process. It should display the label s0:c123,c456
that you sepecified in YAML file:
$ ps -efZ | grep -F 'sleep infinity'\nsystem_u:system_r:container_t:s0:c123,c456 root 3346 3288 0 16:39 ? 00:00:00 sleep infinity\n
"},{"location":"shell-completion/","title":"Enabling Shell Completion","text":"Generate the k0s completion script using the k0s completion <shell_name>
command, for Bash, Zsh, fish, or PowerShell.
Sourcing the completion script in your shell enables k0s autocompletion.
"},{"location":"shell-completion/#bash","title":"Bash","text":"echo 'source <(k0s completion bash)' >>~/.bashrc\n
To load completions for each session, execute once:
k0s completion bash > /etc/bash_completion.d/k0s\n
"},{"location":"shell-completion/#zsh","title":"Zsh","text":"If shell completion is not already enabled in Zsh environment you will need to enable it:
echo \"autoload -U compinit; compinit\" >> ~/.zshrc\n
To load completions for each session, execute once:
k0s completion zsh > \"${fpath[1]}/_k0s\"\n
Note: You must start a new shell for the setup to take effect.
"},{"location":"shell-completion/#fish","title":"Fish","text":"k0s completion fish | source\n
To load completions for each session, execute once:
k0s completion fish > ~/.config/fish/completions/k0s.fish\n
"},{"location":"storage/","title":"Storage","text":""},{"location":"storage/#bundled-openebs-storage","title":"Bundled OpenEBS storage","text":"K0s comes out with bundled OpenEBS installation which can be enabled by using configuration file
Use following configuration as an example:
spec:\nextensions:\nstorage:\ntype: openebs_local_storage\n
The cluster will have two storage classes available for you to use:
k0s kubectl get storageclass\n
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE\nopenebs-device openebs.io/local Delete WaitForFirstConsumer false 24s\nopenebs-hostpath openebs.io/local Delete WaitForFirstConsumer false 24s\n
The openebs-hostpath
is the storage class that maps to the /var/openebs/local
The openebs-device
is not configured and could be configured by manifest deployer accordingly to the OpenEBS documentation
Use following manifests as an example of pod with mounted volume:
apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\nname: nginx-pvc\nnamespace: default\nspec:\naccessModes:\n- ReadWriteOnce\nstorageClassName: openebs-hostpath\nresources:\nrequests:\nstorage: 5Gi\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: nginx\nnamespace: default\nlabels:\napp: nginx\nspec:\nselector:\nmatchLabels:\napp: nginx\nstrategy:\ntype: Recreate\ntemplate:\nmetadata:\nlabels:\napp: nginx\nspec:\ncontainers:\n- image: nginx name: nginx\nvolumeMounts:\n- name: persistent-storage\nmountPath: /var/lib/nginx\nvolumes:\n- name: persistent-storage\npersistentVolumeClaim:\nclaimName: nginx-pvc\n
k0s kubectl apply -f nginx.yaml\n
persistentvolumeclaim/nginx-pvc created\ndeployment.apps/nginx created\nbash-5.1# k0s kc get pods\nNAME READY STATUS RESTARTS AGE\nnginx-d95bcb7db-gzsdt 1/1 Running 0 30s\n
k0s kubectl get pv\n
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE\npvc-9a7fae2d-eb03-42c3-aaa9-1a807d5df12f 5Gi RWO Delete Bound default/nginx-pvc openebs-hostpath 30s\n
"},{"location":"storage/#csi","title":"CSI","text":"k0s supports a wide range of different storage options by utilizing Container Storage Interface (CSI). All Kubernetes storage solutions are supported and users can easily select the storage that fits best for their needs.
When the storage solution implements Container Storage Interface (CSI), containers can communicate with the storage for creation and configuration of persistent volumes. This makes it easy to dynamically provision the requested volumes. It also expands the supported storage solutions from the previous generation, in-tree volume plugins. More information about the CSI concept is described on the Kubernetes Blog.
"},{"location":"storage/#installing-3rd-party-storage-solutions","title":"Installing 3rd party storage solutions","text":"Follow your storage driver's installation instructions. Note that the Kubelet installed by k0s uses a slightly different path for its working directory (/varlib/k0s/kubelet
instead of /var/lib/kubelet
). Consult the CSI driver's configuration documentation on how to customize this path.
Different Kubernetes storage solutions are explained in the official Kubernetes storage documentation. All of them can be used with k0s. Here are some popular ones:
If you are looking for a fault-tolerant storage with data replication, you can find a k0s tutorial for configuring Ceph storage with Rook in here.
"},{"location":"system-monitoring/","title":"System components monitoring","text":"Controller nodes are isolated by default, which thus means that a cluster user cannot schedule workloads onto controller nodes.
k0s provides a mechanism to expose system components for monitoring. System component metrics can give a better look into what is happening inside them. Metrics are particularly useful for building dashboards and alerts. You can read more about metrics for Kubernetes system components here.
Note: the mechanism is an opt-in feature, you can enable it on installation:
sudo k0s install controller --enable-metrics-scraper\n
"},{"location":"system-monitoring/#jobs","title":"Jobs","text":"The list of components which is scrapped by k0s:
Note: kube-apiserver metrics are not scrapped since they are accessible via kubernetes
endpoint within the cluster.
k0s uses pushgateway with TTL to make it possible to detect issues with the metrics delivery. Default TTL is 2 minutes.
"},{"location":"system-requirements/","title":"System requirements","text":"This page describes the system requirements for k0s.
"},{"location":"system-requirements/#minimum-memory-and-cpu-requirements","title":"Minimum memory and CPU requirements","text":"The minimum requirements for k0s detailed below are approximations, and thus your results may vary.
Role Memory (RAM) Virtual CPU (vCPU) Controller node 1 GB 1 vCPU Worker node 0.5 GB 1 vCPU Controller + worker 1 GB 1 vCPU"},{"location":"system-requirements/#controller-node-recommendations","title":"Controller node recommendations","text":"# of Worker nodes # of Pods Recommended RAM Recommended vCPU up to 10 up to 1000 1-2 GB 1-2 vCPU up to 50 up to 5000 2-4 GB 2-4 vCPU up to 100 up to 10000 4-8 GB 2-4 vCPU up to 500 up to 50000 8-16 GB 4-8 vCPU up to 1000 up to 100000 16-32 GB 8-16 vCPU up to 5000 up to 150000 32-64 GB 16-32 vCPUk0s has the standard Kubernetes limits for the maximum number of nodes, pods, etc. For more details, see the Kubernetes considerations for large clusters.
k0s controller node measured memory consumption can be found below on this page.
"},{"location":"system-requirements/#storage","title":"Storage","text":"It's recommended to use an SSD for optimal storage performance (cluster latency and throughput are sensitive to storage).
The specific storage consumption for k0s is as follows:
Role Storage (k0s part) Controller node ~0.5 GB Worker node ~1.3 GB Controller + worker ~1.7 GBNote: The operating system and application requirements must be considered in addition to the k0s part.
"},{"location":"system-requirements/#host-operating-system","title":"Host operating system","text":"For information on the required ports and protocols, refer to networking.
"},{"location":"system-requirements/#external-runtime-dependencies","title":"External runtime dependencies","text":"k0s strives to be as independent from the OS as possible. The current and past external runtime dependencies are documented here.
To run some automated compatiblility checks on your system, use k0s sysinfo
.
The following table shows the measured memory consumption in the cluster of one controller node.
# of Worker nodes # of Pods (besides default) Memory consumption 1 0 510 MB 1 100 600 MB 20 0 660 MB 20 2000 1000 MB 50 0 790 MB 50 5000 1400 MB 100 0 1000 MB 100 10000 2300 MB 200 0 1500 MB 200 20000 3300 MBMeasurement details:
There are few common cases we've seen where k0s fails to run properly.
"},{"location":"troubleshooting/#coredns-in-crashloop","title":"CoreDNS in crashloop","text":"The most common case we've encountered so far has been CoreDNS getting into crashloop on the node(s).
With kubectl you see something like this:
$ kubectl get pod --all-namespaces\nNAMESPACE NAME READY STATUS RESTARTS AGE\nkube-system calico-kube-controllers-5f6546844f-25px6 1/1 Running 0 167m\nkube-system calico-node-fwjx5 1/1 Running 0 164m\nkube-system calico-node-t4tx5 1/1 Running 0 164m\nkube-system calico-node-whwsg 1/1 Running 0 164m\nkube-system coredns-5c98d7d4d8-tfs4q 1/1 Error 17 167m\nkube-system konnectivity-agent-9jkfd 1/1 Running 0 164m\nkube-system konnectivity-agent-bvhdb 1/1 Running 0 164m\nkube-system konnectivity-agent-r6mzj 1/1 Running 0 164m\nkube-system kube-proxy-kr2r9 1/1 Running 0 164m\nkube-system kube-proxy-tbljr 1/1 Running 0 164m\nkube-system kube-proxy-xbw7p 1/1 Running 0 164m\nkube-system metrics-server-7d4bcb75dd-pqkrs 1/1 Running 0 167m\n
When you check the logs, it'll show something like this:
kubectl -n kube-system logs coredns-5c98d7d4d8-tfs4q\n
plugin/loop: Loop (127.0.0.1:55953 -> :1053) detected for zone \".\", see https://coredns.io/plugins/loop#troubleshooting. Query: \"HINFO 4547991504243258144.3688648895315093531.\"\n
This is most often caused by systemd-resolved stub (or something similar) running locally and CoreDNS detects a possible loop with DNS queries.
The easiest but most crude way to workaround is to disable the systemd-resolved stub and revert the hosts /etc/resolv.conf
to original
Read more at CoreDNS troubleshooting docs.
"},{"location":"troubleshooting/#k0s-controller-fails-on-arm-boxes","title":"k0s controller
fails on ARM boxes","text":"In the logs you probably see etcd not starting up properly.
Etcd is not fully supported on ARM architecture, thus you need to run k0s controller
and thus also etcd process with env ETCD_UNSUPPORTED_ARCH=arm
.
As etcd is not fully supported on ARM, it also means that the k0s control plane with etcd itself is not fully supported on ARM either.
"},{"location":"troubleshooting/#k0s-will-not-start-on-zfs-based-systems","title":"k0s
will not start on ZFS-based systems","text":"On ZFS-based systems k0s will fail to start because containerd runs by default in overlayfs mode to manage image layers. This is not compatible with ZFS and requires a custom config of containerd. The following steps should get k0s working on ZFS-based systems:
$ ctr -a /run/k0s/containerd.sock plugins ls
that the containerd ZFS snapshotter plugin is in ok
state (should be the case if ZFS kernel modules and ZFS userspace utils are correctly configured):TYPE ID PLATFORMS STATUS \n...\nio.containerd.snapshotter.v1 zfs linux/amd64 ok\n...\n
$ containerd config default > /etc/k0s/containerd.toml
/etc/k0s/containerd.toml
:...\n[plugins.\"io.containerd.grpc.v1.cri\".containerd]\nsnapshotter = \"overlayfs\"\n...\n
to
...\n[plugins.\"io.containerd.grpc.v1.cri\".containerd]\nsnapshotter = \"zfs\"\n...\n
$ zfs create -o mountpoint=/var/lib/k0s/containerd/io.containerd.snapshotter.v1.zfs rpool/containerd
$ k0s install controller --single -c /etc/k0s/k0s.yaml
Once we enable cloud provider support on kubelet on worker nodes, kubelet will automatically add a taint node.cloudprovider.kubernetes.io/uninitialized
for the node. This tain will prevent normal workloads to be scheduled on the node until the cloud provider controller actually runs second initialization on the node and removes the taint. This means that these nodes are not available for scheduling until the cloud provider controller is actually successfully running on the cluster.
For troubleshooting your specific cloud provider see its documentation.
"},{"location":"troubleshooting/#k0s-not-working-with-read-only-usr","title":"k0s not working with read only/usr
","text":"By default k0s does not run on nodes where /usr
is read only.
This can be fixed by changing the default path for volumePluginDir
in your k0s config. You will need to change to values, one for the kubelet itself, and one for Calico.
Here is a snippet of an example config with the default values changed:
spec:\ncontrollerManager:\nextraArgs:\nflex-volume-plugin-dir: \"/etc/kubernetes/kubelet-plugins/volume/exec\"\nnetwork:\ncalico:\nflexVolumeDriverPath: /etc/k0s/kubelet-plugins/volume/exec/nodeagent~uds\nworkerProfiles:\n- name: coreos\nvalues:\nvolumePluginDir: /etc/k0s/kubelet-plugins/volume/exec/\n
With this config you can start your controller as usual. Any workers will need to be started with
k0s worker --profile coreos [TOKEN]\n
"},{"location":"troubleshooting/#profiling","title":"Profiling","text":"We drop any debug related information and symbols from the compiled binary by utilzing -w -s
linker flags.
To keep those symbols use DEBUG
env variable:
DEBUG=true make k0s\n
Any value not equal to the \"false\" would work.
To add custom linker flags use LDFLAGS
variable.
LD_FLAGS=\"--custom-flag=value\" make k0s\n
"},{"location":"troubleshooting/#im-using-custom-cri-and-missing-some-labels-in-prometheus-metrics","title":"I'm using custom CRI and missing some labels in Prometheus metrics","text":"Due to removal of the embedded dockershim from Kubelet, the Kubelet's embedded cAdvisor metrics got slightly broken. If your container runtime is a custom containerd you can add --kubelet-extra-flags=\"--containerd=<path/to/containerd.sock>\"
into k0s worker startup. That configures the Kubelet embedded cAdvisor to talk directly with containerd to gather the metrics and thus gets the expected labels in place.
Unfortunately this does not work on when using Docker via cri-dockerd shim. Currently, there is no easy solution to this problem.
In the future Kubelet will be refactored to get the container metrics from CRI interface rather than from the runtime directly. This work is specified and followed up in KEP-2371 but until that work completes the only option is to run a standalone cAdvisor. The known issues section in the official Kubernetes documentation about migrating away from dockershim explains the current shortcomings and shows how to run cAdvisor as a standalone DaemonSet.
"},{"location":"troubleshooting/#customized-configurations","title":"Customized configurations","text":"/var/lib/k0s
, for example:/var/lib/k0s/kubelet
/var/lib/k0s/etcd
The k0s upgrade is a simple process due to its single binary distribution. The k0s single binary file includes all the necessary parts for the upgrade and essentially the upgrade process is to replace that file and restart the service.
This tutorial explains two different approaches for k0s upgrade:
If your k0s cluster has been deployed with k0sctl, then k0sctl provides the easiest upgrade method. In that case jump to the next chapter. However, if you have deployed k0s without k0sctl, then follow the upgrade method explained in this chapter.
Before starting the upgrade, consider moving your applications to another node if you want to avoid downtime. This can be done by draining a worker node. Remember to uncordon the worker node afterwards to tell Kubernetes that it can resume scheduling new pods onto the node.
The upgrade process is started by stopping the currently running k0s service.
sudo k0s stop\n
Now you can replace the old k0s binary file. The easiest way is to use the download script. It will download the latest k0s binary and replace the old binary with it. You can also do this manually without the download script.
curl -sSLf https://get.k0s.sh | sudo sh\n
Then you can start the service (with the upgraded k0s) and your upgrade is done.
sudo k0s start\n
"},{"location":"upgrade/#upgrade-a-k0s-cluster-using-k0sctl","title":"Upgrade a k0s cluster using k0sctl","text":"The upgrading of k0s clusters using k0sctl occurs not through a particular command (there is no upgrade
sub-command in k0sctl) but by way of the configuration file. The configuration file describes the desired state of the cluster, and when you pass the description to the k0sctl apply
command a discovery of the current state is performed and the system does whatever is necessary to bring the cluster to the desired state (for example, perform an upgrade).
The following operations occur during a k0sctl upgrade:
Upgrade of each controller, one at a time. There is no downtime if multiple controllers are configured.
Upgrade of workers, in batches of 10%.
Draining of workers, which allows the workload to move to other nodes prior to the actual upgrade of the worker node components. (To skip the drain process, use the --no-drain
option.)
The upgrade process continues once the upgraded nodes return to Ready state.
You can configure the desired cluster version in the k0sctl configuration by setting the value of spec.k0s.version
:
spec:\nk0s:\nversion: 1.28.1+k0s.0\n
If you do not specify a version, k0sctl checks online for the latest version and defaults to it.
k0sctl apply\n
...\n...\nINFO[0001] ==> Running phase: Upgrade controllers\nINFO[0001] [ssh] 10.0.0.23:22: starting upgrade\nINFO[0001] [ssh] 10.0.0.23:22: Running with legacy service name, migrating...\nINFO[0011] [ssh] 10.0.0.23:22: waiting for the k0s service to start\nINFO[0016] ==> Running phase: Upgrade workers\nINFO[0016] Upgrading 1 workers in parallel\nINFO[0016] [ssh] 10.0.0.17:22: upgrade starting\nINFO[0027] [ssh] 10.0.0.17:22: waiting for node to become ready again\nINFO[0027] [ssh] 10.0.0.17:22: upgrade successful\nINFO[0027] ==> Running phase: Disconnect from hosts\nINFO[0027] ==> Finished in 27s\nINFO[0027] k0s cluster version 1.28.1+k0s.0 is now installed\nINFO[0027] Tip: To access the cluster you can now fetch the admin kubeconfig using:\nINFO[0027] k0sctl kubeconfig\n
"},{"location":"user-management/","title":"User Management","text":""},{"location":"user-management/#adding-a-cluster-user","title":"Adding a Cluster User","text":"Run the kubeconfig create command on the controller to add a user to the cluster. The command outputs a kubeconfig for the user, to use for authentication.
k0s kubeconfig create [username]\n
"},{"location":"user-management/#enabling-access-to-cluster-resources","title":"Enabling Access to Cluster Resources","text":"Create the user with the system:masters
group to grant the user access to the cluster:
k0s kubeconfig create --groups \"system:masters\" testUser > k0s.config\n
Create a roleBinding
to grant the user access to the resources:
k0s kubectl create clusterrolebinding --kubeconfig k0s.config testUser-admin-binding --clusterrole=admin --user=testUser\n
"},{"location":"worker-node-config/","title":"Configuration options for worker nodes","text":"Although the k0s worker
command does not take in any special yaml configuration, there are still methods for configuring the workers to run various components.
The k0s worker
command accepts the --labels
flag, with which you can make the newly joined worker node the register itself, in the Kubernetes API, with the given set of labels.
For example, running the worker with k0s worker --token-file k0s.token --labels=\"k0sproject.io/foo=bar,k0sproject.io/other=xyz\"
results in:
kubectl get node --show-labels\n
NAME STATUS ROLES AGE VERSION LABELS\nworker0 NotReady <none> 10s v1.28.1+k0s beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,k0sproject.io/foo=bar,k0sproject.io/other=xyz,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker0,kubernetes.io/os=linux\n
Controller worker nodes are assigned node.k0sproject.io/role=control-plane
and node-role.kubernetes.io/control-plane=true
labels:
kubectl get node --show-labels\n
NAME STATUS ROLES AGE VERSION LABELS\ncontroller0 NotReady control-plane 10s v1.28.1+k0s beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=worker0,kubernetes.io/os=linux,node.k0sproject.io/role=control-plane,node-role.kubernetes.io/control-plane=true\n
Note: Setting the labels is only effective on the first registration of the node. Changing the labels thereafter has no effect.
"},{"location":"worker-node-config/#taints","title":"Taints","text":"The k0s worker
command accepts the --taints
flag, with which you can make the newly joined worker node the register itself with the given set of taints.
Note: Controller nodes running with --enable-worker
are assigned node-role.kubernetes.io/master:NoExecute
taint automatically. You can disable default taints using --no-taints
parameter.
kubectl get nodes -o custom-columns=NAME:.metadata.name,TAINTS:.spec.taints\n
NAME TAINTS\ncontroller0 [map[effect:NoSchedule key:node-role.kubernetes.io/master]]\nworker0 <none>\n
"},{"location":"worker-node-config/#kubelet-configuration","title":"Kubelet configuration","text":"The k0s worker
command accepts a generic flag to pass in any set of arguments for kubelet process.
For example, running k0s worker --token-file=k0s.token --kubelet-extra-args=\"--node-ip=1.2.3.4 --address=0.0.0.0\"
passes in the given flags to Kubelet as-is. As such, you must confirm that any flags you are passing in are properly formatted and valued as k0s will not validate those flags.
Kubelet configuration fields can also be set via a worker profiles. Worker profiles are defined in the main k0s.yaml and are used to generate ConfigMaps containing a custom kubelet.config.k8s.io/v1beta1/KubeletConfiguration
object. To see examples of k0s.yaml containing worker profiles: go here. For a list of possible Kubelet configuration fields: go here.
k0s detects iptables backend automatically based on the existing records. On a brand-new setup, iptables-nft
will be used. There is a --iptables-mode
flag to specify the mode explicitly. Valid values: nft
, legacy
and auto
(default).
k0s worker --iptables-mode=nft\n
"},{"location":"cli/","title":"Index","text":""},{"location":"cli/#k0s","title":"k0s","text":"k0s - Zero Friction Kubernetes
"},{"location":"cli/#synopsis","title":"Synopsis","text":"k0s - The zero friction Kubernetes - https://k0sproject.io This software is built and distributed by Mirantis, Inc., and is subject to EULA https://k0sproject.io/licenses/eula
"},{"location":"cli/#options","title":"Options","text":" -h, --help help for k0s\n
"},{"location":"cli/#see-also","title":"SEE ALSO","text":"k0s - Zero Friction Kubernetes
"},{"location":"cli/k0s/#synopsis","title":"Synopsis","text":"k0s - The zero friction Kubernetes - https://k0sproject.io This software is built and distributed by Mirantis, Inc., and is subject to EULA https://k0sproject.io/licenses/eula
"},{"location":"cli/k0s/#options","title":"Options","text":" -h, --help help for k0s\n
"},{"location":"cli/k0s/#see-also","title":"SEE ALSO","text":"Manage airgap setup
"},{"location":"cli/k0s_airgap/#options","title":"Options","text":" -c, --config string config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for airgap\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_airgap/#see-also","title":"SEE ALSO","text":"List image names and version needed for air-gap install
k0s airgap list-images [flags]\n
"},{"location":"cli/k0s_airgap_list-images/#examples","title":"Examples","text":"k0s airgap list-images\n
"},{"location":"cli/k0s_airgap_list-images/#options","title":"Options","text":" --all include all images, even if they are not used in the current configuration\n -c, --config string config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for list-images\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_airgap_list-images/#see-also","title":"SEE ALSO","text":"Run the controller API
k0s api [flags]\n
"},{"location":"cli/k0s_api/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for api\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_api/#see-also","title":"SEE ALSO","text":"Back-Up k0s configuration. Must be run as root (or with sudo)
k0s backup [flags]\n
"},{"location":"cli/k0s_backup/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for backup\n --save-path string destination directory path for backup assets, use '-' for stdout\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_backup/#see-also","title":"SEE ALSO","text":"Generate completion script
"},{"location":"cli/k0s_completion/#synopsis","title":"Synopsis","text":"To load completions:
Bash:
$ source <(k0s completion bash)
"},{"location":"cli/k0s_completion/#to-load-completions-for-each-session-execute-once","title":"To load completions for each session, execute once:","text":"$ k0s completion bash > /etc/bash_completion.d/k0s
Zsh:
"},{"location":"cli/k0s_completion/#if-shell-completion-is-not-already-enabled-in-your-environment-you-will-need","title":"If shell completion is not already enabled in your environment you will need","text":""},{"location":"cli/k0s_completion/#to-enable-it-you-can-execute-the-following-once","title":"to enable it. You can execute the following once:","text":"$ echo \"autoload -U compinit; compinit\" >> ~/.zshrc
"},{"location":"cli/k0s_completion/#to-load-completions-for-each-session-execute-once_1","title":"To load completions for each session, execute once:","text":"$ k0s completion zsh > \"${fpath[1]}/_k0s\"
"},{"location":"cli/k0s_completion/#you-will-need-to-start-a-new-shell-for-this-setup-to-take-effect","title":"You will need to start a new shell for this setup to take effect.","text":"Fish:
$ k0s completion fish | source
"},{"location":"cli/k0s_completion/#to-load-completions-for-each-session-execute-once_2","title":"To load completions for each session, execute once:","text":"$ k0s completion fish > ~/.config/fish/completions/k0s.fish
k0s completion <bash|zsh|fish|powershell>\n
"},{"location":"cli/k0s_completion/#options","title":"Options","text":" -h, --help help for completion\n
"},{"location":"cli/k0s_completion/#see-also","title":"SEE ALSO","text":"Configuration related sub-commands
"},{"location":"cli/k0s_config/#options","title":"Options","text":" -h, --help help for config\n
"},{"location":"cli/k0s_config/#see-also","title":"SEE ALSO","text":"Output the default k0s configuration yaml to stdout
k0s config create [flags]\n
"},{"location":"cli/k0s_config_create/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for create\n --include-images include the default images in the output\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_config_create/#see-also","title":"SEE ALSO","text":"Launch the editor configured in your shell to edit k0s configuration
k0s config edit [flags]\n
"},{"location":"cli/k0s_config_edit/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n --debug Debug logging [$DEBUG]\n -h, --help help for edit\n
"},{"location":"cli/k0s_config_edit/#see-also","title":"SEE ALSO","text":"Display dynamic configuration reconciliation status
k0s config status [flags]\n
"},{"location":"cli/k0s_config_status/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n --debug Debug logging [$DEBUG]\n -h, --help help for status\n -o, --output string Output format. Must be one of yaml|json\n
"},{"location":"cli/k0s_config_status/#see-also","title":"SEE ALSO","text":"Validate k0s configuration
"},{"location":"cli/k0s_config_validate/#synopsis","title":"Synopsis","text":"Example: k0s config validate --config path_to_config.yaml
k0s config validate [flags]\n
"},{"location":"cli/k0s_config_validate/#options","title":"Options","text":" -c, --config string config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for validate\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_config_validate/#see-also","title":"SEE ALSO","text":"Run controller
k0s controller [join-token] [flags]\n
"},{"location":"cli/k0s_controller/#examples","title":"Examples","text":" Command to associate master nodes:\n CLI argument:\n $ k0s controller [join-token]\n\n or CLI flag:\n $ k0s controller --token-file [path_to_file]\n Note: Token can be passed either as a CLI argument or as a flag\n
"},{"location":"cli/k0s_controller/#options","title":"Options","text":" --api-server string HACK: api-server for the windows worker node\n --cidr-range string HACK: cidr range for the windows worker node (default \"10.96.0.0/12\")\n --cluster-dns string HACK: cluster dns for the windows worker node (default \"10.96.0.10\")\n -c, --config string config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n --cri-socket string container runtime socket to use, default to internal containerd. Format: [remote|docker]:[path-to-socket]\n --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n --disable-components strings disable components (valid items: autopilot,control-api,coredns,csr-approver,endpoint-reconciler,helm,konnectivity-server,kube-controller-manager,kube-proxy,kube-scheduler,metrics-server,network-provider,node-role,system-rbac,worker-config)\n --enable-cloud-provider Whether or not to enable cloud provider support in kubelet\n --enable-dynamic-config enable cluster-wide dynamic config based on custom resource\n --enable-k0s-cloud-provider enables the k0s-cloud-provider (default false)\n --enable-metrics-scraper enable scraping metrics from the controller components (kube-scheduler, kube-controller-manager)\n --enable-worker enable worker (default false)\n -h, --help help for controller\n --ignore-pre-flight-checks continue even if pre-flight checks fail\n --iptables-mode string iptables mode (valid values: nft, legacy, auto). default: auto\n --k0s-cloud-provider-port int the port that k0s-cloud-provider binds on (default 10258)\n --k0s-cloud-provider-update-frequency duration the frequency of k0s-cloud-provider node updates (default 2m0s)\n --kube-controller-manager-extra-args string extra args for kube-controller-manager\n --kubelet-extra-args string extra args for kubelet\n --labels strings Node labels, list of key=value pairs\n -l, --logging stringToString Logging Levels for the different components (default [etcd=info,containerd=info,konnectivity-server=1,kube-apiserver=1,kube-controller-manager=1,kube-scheduler=1,kubelet=1,kube-proxy=1])\n --no-taints disable default taints for controller node\n --profile string worker profile to use on the node (default \"default\")\n --single enable single node (implies --enable-worker, default false)\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n --taints strings Node taints, list of key=value:effect strings\n --token-file string Path to the file containing join-token.\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_controller/#see-also","title":"SEE ALSO","text":"containerd CLI
"},{"location":"cli/k0s_ctr/#synopsis","title":"Synopsis","text":"ctr is an unsupported debug and administrative client for interacting with the containerd daemon. Because it is unsupported, the commands, options, and operations are not guaranteed to be backward compatible or stable from release to release of the containerd project.
k0s ctr [flags]\n
"},{"location":"cli/k0s_ctr/#options","title":"Options","text":" -h, --help help for ctr\n
"},{"location":"cli/k0s_ctr/#see-also","title":"SEE ALSO","text":"Generate k0s command documentation
k0s docs <markdown|man> [flags]\n
"},{"location":"cli/k0s_docs/#options","title":"Options","text":" -h, --help help for docs\n
"},{"location":"cli/k0s_docs/#see-also","title":"SEE ALSO","text":"Manage etcd cluster
"},{"location":"cli/k0s_etcd/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for etcd\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_etcd/#see-also","title":"SEE ALSO","text":"Sign off a given etc node from etcd cluster
k0s etcd leave [flags]\n
"},{"location":"cli/k0s_etcd_leave/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for leave\n --peer-address string etcd peer address\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_etcd_leave/#see-also","title":"SEE ALSO","text":"Returns etcd cluster members list
k0s etcd member-list [flags]\n
"},{"location":"cli/k0s_etcd_member-list/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for member-list\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_etcd_member-list/#see-also","title":"SEE ALSO","text":"Install k0s on a brand-new system. Must be run as root (or with sudo)
"},{"location":"cli/k0s_install/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -e, --env stringArray set environment variable\n --force force init script creation\n -h, --help help for install\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_install/#see-also","title":"SEE ALSO","text":"Install k0s controller on a brand-new system. Must be run as root (or with sudo)
k0s install controller [flags]\n
"},{"location":"cli/k0s_install_controller/#examples","title":"Examples","text":"All default values of controller command will be passed to the service stub unless overridden.\n\nWith the controller subcommand you can setup a single node cluster by running:\n\n k0s install controller --single\n
"},{"location":"cli/k0s_install_controller/#options","title":"Options","text":" --api-server string HACK: api-server for the windows worker node\n --cidr-range string HACK: cidr range for the windows worker node (default \"10.96.0.0/12\")\n --cluster-dns string HACK: cluster dns for the windows worker node (default \"10.96.0.10\")\n -c, --config string config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n --cri-socket string container runtime socket to use, default to internal containerd. Format: [remote|docker]:[path-to-socket]\n --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n --disable-components strings disable components (valid items: autopilot,control-api,coredns,csr-approver,endpoint-reconciler,helm,konnectivity-server,kube-controller-manager,kube-proxy,kube-scheduler,metrics-server,network-provider,node-role,system-rbac,worker-config)\n --enable-cloud-provider Whether or not to enable cloud provider support in kubelet\n --enable-dynamic-config enable cluster-wide dynamic config based on custom resource\n --enable-k0s-cloud-provider enables the k0s-cloud-provider (default false)\n --enable-metrics-scraper enable scraping metrics from the controller components (kube-scheduler, kube-controller-manager)\n --enable-worker enable worker (default false)\n -h, --help help for controller\n --iptables-mode string iptables mode (valid values: nft, legacy, auto). default: auto\n --k0s-cloud-provider-port int the port that k0s-cloud-provider binds on (default 10258)\n --k0s-cloud-provider-update-frequency duration the frequency of k0s-cloud-provider node updates (default 2m0s)\n --kube-controller-manager-extra-args string extra args for kube-controller-manager\n --kubelet-extra-args string extra args for kubelet\n --labels strings Node labels, list of key=value pairs\n -l, --logging stringToString Logging Levels for the different components (default [containerd=info,konnectivity-server=1,kube-apiserver=1,kube-controller-manager=1,kube-scheduler=1,kubelet=1,kube-proxy=1,etcd=info])\n --no-taints disable default taints for controller node\n --profile string worker profile to use on the node (default \"default\")\n --single enable single node (implies --enable-worker, default false)\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n --taints strings Node taints, list of key=value:effect strings\n --token-file string Path to the file containing join-token.\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_install_controller/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --env stringArray set environment variable\n --force force init script creation\n
"},{"location":"cli/k0s_install_controller/#see-also","title":"SEE ALSO","text":"Install k0s worker on a brand-new system. Must be run as root (or with sudo)
k0s install worker [flags]\n
"},{"location":"cli/k0s_install_worker/#examples","title":"Examples","text":"Worker subcommand allows you to pass in all available worker parameters.\nAll default values of worker command will be passed to the service stub unless overridden.\n\nWindows flags like \"--api-server\", \"--cidr-range\" and \"--cluster-dns\" will be ignored since install command doesn't yet support Windows services\n
"},{"location":"cli/k0s_install_worker/#options","title":"Options","text":" --api-server string HACK: api-server for the windows worker node\n --cidr-range string HACK: cidr range for the windows worker node (default \"10.96.0.0/12\")\n --cluster-dns string HACK: cluster dns for the windows worker node (default \"10.96.0.10\")\n --cri-socket string container runtime socket to use, default to internal containerd. Format: [remote|docker]:[path-to-socket]\n --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n --enable-cloud-provider Whether or not to enable cloud provider support in kubelet\n -h, --help help for worker\n --iptables-mode string iptables mode (valid values: nft, legacy, auto). default: auto\n --kubelet-extra-args string extra args for kubelet\n --labels strings Node labels, list of key=value pairs\n -l, --logging stringToString Logging Levels for the different components (default [kubelet=1,kube-proxy=1,etcd=info,containerd=info,konnectivity-server=1,kube-apiserver=1,kube-controller-manager=1,kube-scheduler=1])\n --profile string worker profile to use on the node (default \"default\")\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n --taints strings Node taints, list of key=value:effect strings\n --token-file string Path to the file containing token.\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_install_worker/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --env stringArray set environment variable\n --force force init script creation\n
"},{"location":"cli/k0s_install_worker/#see-also","title":"SEE ALSO","text":"Create a kubeconfig file for a specified user
k0s kubeconfig [command] [flags]\n
"},{"location":"cli/k0s_kubeconfig/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for kubeconfig\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_kubeconfig/#see-also","title":"SEE ALSO","text":"Display Admin's Kubeconfig file
"},{"location":"cli/k0s_kubeconfig_admin/#synopsis","title":"Synopsis","text":"Print kubeconfig for the Admin user to stdout
k0s kubeconfig admin [flags]\n
"},{"location":"cli/k0s_kubeconfig_admin/#examples","title":"Examples","text":" $ k0s kubeconfig admin > ~/.kube/config\n $ export KUBECONFIG=~/.kube/config\n $ kubectl get nodes\n
"},{"location":"cli/k0s_kubeconfig_admin/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for admin\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_kubeconfig_admin/#see-also","title":"SEE ALSO","text":"Create a kubeconfig for a user
"},{"location":"cli/k0s_kubeconfig_create/#synopsis","title":"Synopsis","text":"Create a kubeconfig with a signed certificate and public key for a given user (and optionally user groups) Note: A certificate once signed cannot be revoked for a particular user
k0s kubeconfig create username [flags]\n
"},{"location":"cli/k0s_kubeconfig_create/#examples","title":"Examples","text":" Command to create a kubeconfig for a user:\n CLI argument:\n $ k0s kubeconfig create username\n\n optionally add groups:\n $ k0s kubeconfig create username --groups [groups]\n
"},{"location":"cli/k0s_kubeconfig_create/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n --groups string Specify groups\n -h, --help help for create\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_kubeconfig_create/#see-also","title":"SEE ALSO","text":"kubectl controls the Kubernetes cluster manager
"},{"location":"cli/k0s_kubectl/#synopsis","title":"Synopsis","text":"kubectl controls the Kubernetes cluster manager.
Find more information at: https://kubernetes.io/docs/reference/kubectl/
k0s kubectl [flags]\n
"},{"location":"cli/k0s_kubectl/#options","title":"Options","text":" --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace.\n --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups.\n --as-uid string UID to impersonate for the operation.\n --cache-dir string Default cache directory (default \"/home/runner/.kube/cache\")\n --certificate-authority string Path to a cert file for the certificate authority\n --client-certificate string Path to a client certificate file for TLS\n --client-key string Path to a client key file for TLS\n --cluster string The name of the kubeconfig cluster to use\n --context string The name of the kubeconfig context to use\n --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n --debug Debug logging [$DEBUG]\n --disable-compression If true, opt-out of response compression for all requests to the server\n -h, --help help for kubectl\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Path to the kubeconfig file to use for CLI requests.\n --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)\n --match-server-version Require server version to match client version\n -n, --namespace string If present, the namespace scope for this CLI request\n --password string Password for basic authentication to the API server\n --profile string Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex) (default \"none\")\n --profile-output string Name of the file to write the profile to (default \"profile.pprof\")\n --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default \"0\")\n -s, --server string The address and port of the Kubernetes API server\n --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used\n --token string Bearer token for authentication to the API server\n --user string The name of the kubeconfig user to use\n --username string Username for basic authentication to the API server\n -v, --v Level number for the log level verbosity\n --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging (only works for the default text log format)\n --warnings-as-errors Treat warnings received from the server as errors and exit with a non-zero exit code\n
"},{"location":"cli/k0s_kubectl/#see-also","title":"SEE ALSO","text":"Uninstall k0s. Must be run as root (or with sudo)
k0s reset [flags]\n
"},{"location":"cli/k0s_reset/#options","title":"Options","text":" -c, --config string config file, use '-' to read the config from stdin (default \"/etc/k0s/k0s.yaml\")\n --cri-socket string container runtime socket to use, default to internal containerd. Format: [remote|docker]:[path-to-socket]\n --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for reset\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_reset/#see-also","title":"SEE ALSO","text":"restore k0s state from given backup archive. Use '-' as filename to read from stdin. Must be run as root (or with sudo)
k0s restore filename [flags]\n
"},{"location":"cli/k0s_restore/#options","title":"Options","text":" --config-out string Specify desired name and full path for the restored k0s.yaml file (default: /home/runner/work/k0s/k0s/k0s_<archive timestamp>.yaml\n --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for restore\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_restore/#see-also","title":"SEE ALSO","text":"Start the k0s service configured on this host. Must be run as root (or with sudo)
k0s start [flags]\n
"},{"location":"cli/k0s_start/#options","title":"Options","text":" -h, --help help for start\n
"},{"location":"cli/k0s_start/#see-also","title":"SEE ALSO","text":"Get k0s instance status information
k0s status [flags]\n
"},{"location":"cli/k0s_status/#examples","title":"Examples","text":"The command will return information about system init, PID, k0s role, kubeconfig and similar.\n
"},{"location":"cli/k0s_status/#options","title":"Options","text":" -h, --help help for status\n -o, --out string sets type of output to json or yaml\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n
"},{"location":"cli/k0s_status/#see-also","title":"SEE ALSO","text":"Get k0s instance component status information
k0s status components [flags]\n
"},{"location":"cli/k0s_status_components/#examples","title":"Examples","text":"The command will return information about k0s components.\n
"},{"location":"cli/k0s_status_components/#options","title":"Options","text":" -h, --help help for components\n --max-count int how many latest probes to show (default 1)\n
"},{"location":"cli/k0s_status_components/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -o, --out string sets type of output to json or yaml\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n
"},{"location":"cli/k0s_status_components/#see-also","title":"SEE ALSO","text":"Stop the k0s service configured on this host. Must be run as root (or with sudo)
k0s stop [flags]\n
"},{"location":"cli/k0s_stop/#options","title":"Options","text":" -h, --help help for stop\n
"},{"location":"cli/k0s_stop/#see-also","title":"SEE ALSO","text":"Display system information
"},{"location":"cli/k0s_sysinfo/#synopsis","title":"Synopsis","text":"Runs k0s's pre-flight checks and issues the results to stdout.
k0s sysinfo [flags]\n
"},{"location":"cli/k0s_sysinfo/#options","title":"Options","text":" --controller Include controller-specific sysinfo (default true)\n --data-dir string Data Directory for k0s (default \"/var/lib/k0s\")\n -h, --help help for sysinfo\n --worker Include worker-specific sysinfo (default true)\n
"},{"location":"cli/k0s_sysinfo/#see-also","title":"SEE ALSO","text":"Manage join tokens
"},{"location":"cli/k0s_token/#options","title":"Options","text":" -h, --help help for token\n
"},{"location":"cli/k0s_token/#see-also","title":"SEE ALSO","text":"Create join token
k0s token create [flags]\n
"},{"location":"cli/k0s_token_create/#examples","title":"Examples","text":"k0s token create --role worker --expiry 100h //sets expiration time to 100 hours\nk0s token create --role worker --expiry 10m //sets expiration time to 10 minutes\n
"},{"location":"cli/k0s_token_create/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n --expiry string Expiration time of the token. Format 1.5h, 2h45m or 300ms. (default \"0s\")\n -h, --help help for create\n --role string Either worker or controller (default \"worker\")\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n --wait wait forever (default false)\n
"},{"location":"cli/k0s_token_create/#see-also","title":"SEE ALSO","text":"Invalidates existing join token
k0s token invalidate [flags]\n
"},{"location":"cli/k0s_token_invalidate/#examples","title":"Examples","text":"k0s token invalidate xyz123\n
"},{"location":"cli/k0s_token_invalidate/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for invalidate\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_token_invalidate/#see-also","title":"SEE ALSO","text":"List join tokens
k0s token list [flags]\n
"},{"location":"cli/k0s_token_list/#examples","title":"Examples","text":"k0s token list --role worker // list worker tokens\n
"},{"location":"cli/k0s_token_list/#options","title":"Options","text":" --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for list\n --role string Either worker, controller or empty for all roles\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_token_list/#see-also","title":"SEE ALSO","text":"Generates token and secret and stores them as a files
k0s token pre-shared [flags]\n
"},{"location":"cli/k0s_token_pre-shared/#examples","title":"Examples","text":"k0s token pre-shared --role worker --cert <path>/<to>/ca.crt --url https://<controller-ip>:<port>/\n
"},{"location":"cli/k0s_token_pre-shared/#options","title":"Options","text":" --cert string path to the CA certificate file\n --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n -h, --help help for pre-shared\n --out string path to the output directory. Default: current dir (default \".\")\n --role string token role. valid values: worker, controller. Default: worker (default \"worker\")\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n --url string url of the api server to join\n --valid duration how long token is valid, in Go duration format\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_token_pre-shared/#see-also","title":"SEE ALSO","text":"Print the k0s version
k0s version [flags]\n
"},{"location":"cli/k0s_version/#options","title":"Options","text":" -a, --all use to print all k0s version info\n -h, --help help for version\n -j, --json use to print all k0s version info in json\n
"},{"location":"cli/k0s_version/#see-also","title":"SEE ALSO","text":"Run worker
k0s worker [join-token] [flags]\n
"},{"location":"cli/k0s_worker/#examples","title":"Examples","text":" Command to add worker node to the master node:\n CLI argument:\n $ k0s worker [token]\n\n or CLI flag:\n $ k0s worker --token-file [path_to_file]\n Note: Token can be passed either as a CLI argument or as a flag\n
"},{"location":"cli/k0s_worker/#options","title":"Options","text":" --api-server string HACK: api-server for the windows worker node\n --cidr-range string HACK: cidr range for the windows worker node (default \"10.96.0.0/12\")\n --cluster-dns string HACK: cluster dns for the windows worker node (default \"10.96.0.10\")\n --cri-socket string container runtime socket to use, default to internal containerd. Format: [remote|docker]:[path-to-socket]\n --data-dir string Data Directory for k0s (default: /var/lib/k0s). DO NOT CHANGE for an existing setup, things will break!\n -d, --debug Debug logging (default: false)\n --debugListenOn string Http listenOn for Debug pprof handler (default \":6060\")\n --enable-cloud-provider Whether or not to enable cloud provider support in kubelet\n -h, --help help for worker\n --ignore-pre-flight-checks continue even if pre-flight checks fail\n --iptables-mode string iptables mode (valid values: nft, legacy, auto). default: auto\n --kubelet-extra-args string extra args for kubelet\n --labels strings Node labels, list of key=value pairs\n -l, --logging stringToString Logging Levels for the different components (default [kube-scheduler=1,kubelet=1,kube-proxy=1,etcd=info,containerd=info,konnectivity-server=1,kube-apiserver=1,kube-controller-manager=1])\n --profile string worker profile to use on the node (default \"default\")\n --status-socket string Full file path to the socket file. (default \"/var/lib/k0s/run/status.sock\")\n --taints strings Node taints, list of key=value:effect strings\n --token-file string Path to the file containing token.\n -v, --verbose Verbose logging (default: false)\n
"},{"location":"cli/k0s_worker/#see-also","title":"SEE ALSO","text":"k0s follows the CNCF Code of Conduct.
"},{"location":"contributors/github_workflow/","title":"GitHub Workflow","text":"This guide assumes you have already cloned the upstream repo to your system via git clone
, or via go get github.com/k0sproject/k0s
.
export GITHUB_USER={ your github username }\n
cd $WORKDIR/k0s\ngit remote add $GITHUB_USER git@github.com:${GITHUB_USER}/k0s.git\n\n# Prevent push to Upstream\ngit remote set-url --push origin no_push\n\n# Set your fork remote as a default push target\ngit push --set-upstream $GITHUB_USER main\n
Your remotes should look something like this:
git remote -v\n
origin https://github.com/k0sproject/k0s (fetch)\norigin no_push (push)\nmy_fork git@github.com:{ github_username }/k0s.git (fetch)\nmy_fork git@github.com:{ github_username }/k0s.git (push)\n
"},{"location":"contributors/github_workflow/#create-rebase-your-feature-branch","title":"Create & Rebase Your Feature Branch","text":"Create a feature branch and switch to it:
git checkout -b my_feature_branch\n
Rebase your branch:
git fetch origin && \\\ngit rebase origin/main\n
Current branch my_feature_branch is up to date.\n
Please don't use git pull
instead of the above fetch
/ rebase
. git pull
does a merge, which leaves merge commits. These make the commit history messy and violate the principle that commits ought to be individually understandable and useful.
Commit and sign your changes:
git commit --signoff\n
The commit message should have a short, capitalized title without trailing period as first line. After the title a blank line and then a longer description that explains why the change was made, unless it is obvious.
Use imperative mood in the commit message.
For example:
Summarize changes in around 50 characters or less\n\nMore detailed explanatory text, if necessary. Wrap it to about 72\ncharacters or so. In some contexts, the first line is treated as the\nsubject of the commit and the rest of the text as the body. The\nblank line separating the summary from the body is critical (unless\nyou omit the body entirely); various tools like `log`, `shortlog`\nand `rebase` can get confused if you run the two together.\n\nExplain the problem that this commit is solving. Focus on why you\nare making this change as opposed to how (the code explains that).\nAre there side effects or other unintuitive consequences of this\nchange? Here's the place to explain them.\n\nFurther paragraphs come after blank lines.\n\n - Bullet points are okay, too\n\n - Typically a hyphen or asterisk is used for the bullet, preceded\n by a single space, with blank lines in between.\n\nIf you use an issue tracker, put references to them at the bottom,\nlike this:\n\nFixes: https://github.com/k0sproject/k0s/issues/373\nSee also: #456, #789\n\nSigned-off-by: Name Lastname <user@example.com>\n
You can go back and edit/build/test some more, then commit --amend
in a few cycles.
When ready, push your changes to your fork's repository:
git push --set-upstream my_fork my_feature_branch\n
"},{"location":"contributors/github_workflow/#open-a-pull-request","title":"Open a Pull Request","text":"See GitHub's docs on how to create a pull request from a fork.
"},{"location":"contributors/github_workflow/#get-a-code-review","title":"Get a code review","text":"Once your pull request has been opened it will be assigned to one or more reviewers, and will go through a series of smoke tests.
Commit changes made in response to review comments should be added to the same branch on your fork.
Very small PRs are easy to review. Very large PRs are very difficult to review.
"},{"location":"contributors/github_workflow/#squashing-commits","title":"Squashing Commits","text":"Commits on your branch should represent meaningful milestones or units of work. Small commits that contain typo fixes, rebases, review feedbacks, etc should be squashed.
To do that, it's best to perform an interactive rebase:
"},{"location":"contributors/github_workflow/#example","title":"Example","text":"Rebase your feature branch against upstream main branch:
git rebase -i origin/main\n
If your PR has 3 commits, output would be similar to this:
pick f7f3f6d Changed some code\npick 310154e fixed some typos\npick a5f4a0d made some review changes\n\n# Rebase 710f0f8..a5f4a0d onto 710f0f8\n#\n# Commands:\n# p, pick <commit> = use commit\n# r, reword <commit> = use commit, but edit the commit message\n# e, edit <commit> = use commit, but stop for amending\n# s, squash <commit> = use commit, but meld into previous commit\n# f, fixup <commit> = like \"squash\", but discard this commit's log message\n# x, exec <command> = run command (the rest of the line) using shell\n# b, break = stop here (continue rebase later with 'git rebase --continue')\n# d, drop <commit> = remove commit\n# l, label <label> = label current HEAD with a name\n# t, reset <label> = reset HEAD to a label\n# m, merge [-C <commit> | -c <commit>] <label> [# <oneline>]\n# . create a merge commit using the original merge commit's\n# . message (or the oneline, if no original merge commit was\n# . specified). Use -c <commit> to reword the commit message.\n#\n# These lines can be re-ordered; they are executed from top to bottom.\n#\n# However, if you remove everything, the rebase will be aborted.\n#\n# Note that empty commits are commented out\n
Use a command line text editor to change the word pick
to f
of fixup
for the commits you want to squash, then save your changes and continue the rebase:
Per the output above, you can see that:
fixup <commit> = like \"squash\", but discard this commit's log message\n
Which means that when rebased, the commit message \"fixed some typos\" will be removed, and squashed with the parent commit.
"},{"location":"contributors/github_workflow/#push-your-final-changes","title":"Push Your Final Changes","text":"Once done, you can push the final commits to your branch:
git push --force\n
You can run multiple iteration of rebase
/push -f
, if needed.
Thank you for taking the time to make a contribution to k0s. The following document is a set of guidelines and instructions for contributing to k0s.
When contributing to this repository, please consider first discussing the change you wish to make by opening an issue.
"},{"location":"contributors/overview/#code-of-conduct","title":"Code of Conduct","text":"Our code of conduct can be found in the link below. Please follow it in all your interactions with the project.
We use GitHub flow, so all code changes are tracked via Pull Requests. A detailed guide on the recommended workflow can be found below:
All submitted PRs go through a set of tests and reviews. You can run most of these tests before a PR is submitted. In fact, we recommend it, because it will save on many possible review iterations and automated tests. The testing guidelines can be found here:
By contributing, you agree that your contributions will be licensed as followed:
Some of you might have noticed we have official community blog hosted on Medium. If you are not yet following us, we'd like to invite you to do so now! Make sure to follow us on Twitter as well \ud83d\ude0a
We have also decided to participate in the Lens Forums. As part of our ongoing collaboration with the Lens IDE team, who are not only close friends of the k0s crew but also widely embraced by the Kubernetes user community, it was only natural for us to join forces on their platform. By becoming a part of the Lens Forums, you can easily connect with us through the dedicated k0s categories. Stay in the loop with the latest news, engage in technical discussions, and contribute your expertise and feedback!
"},{"location":"contributors/testing/","title":"Testing Your Code","text":"k0s uses github actions to run automated tests on any PR, before merging. However, a PR will not be reviewed before all tests are green, so to save time and prevent your PR from going stale, it is best to test it before submitting the PR.
"},{"location":"contributors/testing/#run-local-verifications","title":"Run Local Verifications","text":"Please run the following style and formatting commands and fix/check-in any changes:
Linting
We use golangci-lint
for style verification. In the repository's root directory, simply run:
make lint\n
There's no need to install golangci-lint
manually. The build system will take care of that.
Go fmt
go fmt ./...\n
Checking the documentation
Verify any changes to the documentation by following the instructions here.
Pre-submit Flight Checks
In the repository root directory, make sure that:
make build && git diff --exit-code
runs successfully. Verifies that the build is working and that the generated source code matches the one that's checked into source control.make check-unit
runs successfully. Verifies that all the unit tests pass.make check-basic
runs successfully. Verifies basic cluster functionality using one controller and two workers.make check-hacontrolplane
runs successfully. Verifies that joining of controllers works.Please note that this last test is prone to \"flakiness\", so it might fail on occasion. If it fails constantly, take a deeper look at your code to find the source of the problem.
If you find that all tests passed, you may open a pull request upstream.
You may open a pull request in draft mode. All automated tests will still run against the PR, but the PR will not be assigned for review. Once a PR is ready for review, transition it from Draft mode, and code owners will be notified.
"},{"location":"contributors/testing/#conformance-testing","title":"Conformance Testing","text":"Once a PR has been reviewed and all other tests have passed, a code owner will run a full end-to-end conformance test against the PR. This is usually the last step before merging.
"},{"location":"contributors/testing/#pre-requisites-for-pr-merge","title":"Pre-Requisites for PR Merge","text":"In order for a PR to be merged, the following conditions should exist:
--signoff
option.In order to clean up the local workspace, run make clean
. It will clean up all of the intermediate files and directories created during the k0s build. Note that you can't just use git clean -X
or even rm -rf
, since the Go modules cache sets all of its subdirectories to read-only. If you get in trouble while trying to delete your local workspace, try chmod -R u+w /path/to/workspace && rm -rf /path/to/workspace
.
You can configure k0s with the Ambassador API Gateway and a MetalLB service loadbalancer. To do this you leverage Helm's extensible bootstrapping functionality to add the correct extensions to the k0s.yaml
file during cluster configuration.
Note: Currently Ambassador API Gateway does not support Kubernetes v1.22 or above. See here for details.
"},{"location":"examples/ambassador-ingress/#use-docker-for-non-native-k0s-platforms","title":"Use Docker for non-native k0s platforms","text":"With Docker you can run k0s on platforms that the distribution does not natively support (refer to Run k0s in Docker). Skip this section if you are on a platform that k0s natively supports.
As you need to create a custom configuration file to install Ambassador Gateway, you will first need to map that file into the k0s container and to expose the ports Ambassador needs for outside access.
Run k0s under Docker:
docker run -d --name k0s --hostname k0s --privileged -v /var/lib/k0s -p 6443:6443 docker.io/k0sproject/k0s:latest\n
Export the default k0s configuration file:
docker exec k0s k0s config create > k0s.yaml\n
Export the cluster config, so you can access it using kubectl:
docker exec k0s cat /var/lib/k0s/pki/admin.conf > k0s-cluster.conf\nexport KUBECONFIG=\"$KUBECONFIG:$PWD/k0s-cluster.conf\"\n
k0s.yaml
for Ambassador Gateway","text":"Open the k0s.yml
file and append the following extensions at the end:
extensions:\nhelm:\nrepositories:\n- name: datawire\nurl: https://www.getambassador.io\n- name: bitnami\nurl: https://charts.bitnami.com/bitnami\ncharts:\n- name: ambassador\nchartname: datawire/ambassador\nversion: \"6.5.13\"\nnamespace: ambassador\nvalues: |2\nservice:\nexternalIPs:\n- 172.17.0.2\n- name: metallb\nchartname: bitnami/metallb\nversion: \"1.0.1\"\nnamespace: default\nvalues: |2\nconfigInline:\naddress-pools:\n- name: generic-cluster-pool\nprotocol: layer2\naddresses:\n- 172.17.0.2\n
Note: It may be necessary to replace the 172.17.0.2 IP with your local IP address.
This action adds both Ambassador and MetalLB (required for LoadBalancers) with the corresponding repositories and (minimal) configurations. Be aware that the provided example illustrates the use of your local network and that you will want to provide a range of IPs for MetalLB that are addressable on your LAN to access these services from anywhere on your network.
Stop/remove your k0s container:
docker stop k0s\ndocker rm k0s\n
Retart your k0s container, this time with additional ports and the above config file mapped into it:
docker run --name k0s --hostname k0s --privileged -v /var/lib/k0s -v \"$PWD\"/k0s.yaml:/k0s.yaml -p 6443:6443 -p 80:80 -p 443:443 -p 8080:8080 docker.io/k0sproject/k0s:latest\n
After some time, you will be able to list the Ambassador Services:
kubectl get services -n ambassador\n
Output:
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\nambassador-1611224811 LoadBalancer 10.99.84.151 172.17.0.2 80:30327/TCP,443:30355/TCP 2m11s\nambassador-1611224811-admin ClusterIP 10.96.79.130 <none> 8877/TCP 2m11s\nambassador-1611224811-redis ClusterIP 10.110.33.229 <none> 6379/TCP 2m11s\n
Install the Ambassador edgectl tool and run the login command:
edgectl login --namespace=ambassador localhost\n
Your browser will open and deeliver you to the Ambassador Console.
Create a YAML file for the service (for example purposes, create a Swagger Petstore service using a petstore.YAML file):
---\napiVersion: v1\nkind: Service\nmetadata:\nname: petstore\nnamespace: ambassador\nspec:\nports:\n- name: http\nport: 80\ntargetPort: 8080\nselector:\napp: petstore\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: petstore\nnamespace: ambassador\nspec:\nreplicas: 1\nselector:\nmatchLabels:\napp: petstore\nstrategy:\ntype: RollingUpdate\ntemplate:\nmetadata:\nlabels:\napp: petstore\nspec:\ncontainers:\n- name: petstore-backend\nimage: docker.io/swaggerapi/petstore3:unstable\nports:\n- name: http\ncontainerPort: 8080\n---\napiVersion: getambassador.io/v2\nkind: Mapping\nmetadata:\nname: petstore\nnamespace: ambassador\nspec:\nprefix: /petstore/\nservice: petstore\n
Apply the YAML file:
kubectl apply -f petstore.yaml\n
Output:
service/petstore created\ndeployment.apps/petstore created\nmapping.getambassador.io/petstore created\n
Validate that the service is running.
In the terminal using curl:
curl -k 'https://localhost/petstore/api/v3/pet/findByStatus?status=available'\n
Output:
[{\"id\":1,\"category\":{\"id\":2,\"name\":\"Cats\"},\"name\":\"Cat 1\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag1\"},{\"id\":2,\"name\":\"tag2\"}],\"status\":\"available\"},{\"id\":2,\"category\":{\"id\":2,\"name\":\"Cats\"},\"name\":\"Cat 2\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag2\"},{\"id\":2,\"name\":\"tag3\"}],\"status\":\"available\"},{\"id\":4,\"category\":{\"id\":1,\"name\":\"Dogs\"},\"name\":\"Dog 1\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag1\"},{\"id\":2,\"name\":\"tag2\"}],\"status\":\"available\"},{\"id\":7,\"category\":{\"id\":4,\"name\":\"Lions\"},\"name\":\"Lion 1\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag1\"},{\"id\":2,\"name\":\"tag2\"}],\"status\":\"available\"},{\"id\":8,\"category\":{\"id\":4,\"name\":\"Lions\"},\"name\":\"Lion 2\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag2\"},{\"id\":2,\"name\":\"tag3\"}],\"status\":\"available\"},{\"id\":9,\"category\":{\"id\":4,\"name\":\"Lions\"},\"name\":\"Lion 3\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag3\"},{\"id\":2,\"name\":\"tag4\"}],\"status\":\"available\"},{\"id\":10,\"category\":{\"id\":3,\"name\":\"Rabbits\"},\"name\":\"Rabbit 1\",\"photoUrls\":[\"url1\",\"url2\"],\"tags\":[{\"id\":1,\"name\":\"tag3\"},{\"id\":2,\"name\":\"tag4\"}],\"status\":\"available\"}]\n
Or by way of your browser:
Open https://localhost/petstore/ in your browser and change the URL in the field at the top of the page to https://localhost/petstore/api/v3/openapi.json (as it is mapped to the /petstore prefix) and click Explore.
Navigate to the Mappings area in the Ambassador Console to view the corresponding PetStore mapping as configured.
Ansible is a popular infrastructure-as-code tool that can use to automate tasks for the purpose of achieving the desired state in a system. With Ansible (and the k0s-Ansible playbook) you can quickly install a multi-node Kubernetes Cluster.
Note: Before using Ansible to create a cluster, you should have a general understanding of Ansible (refer to the official Ansible User Guide.
"},{"location":"examples/ansible-playbook/#prerequisites","title":"Prerequisites","text":"You will require the following tools to install k0s on local virtual machines:
Tool Detailmultipass
A lightweight VM manager that uses KVM on Linux, Hyper-V on Windows, and hypervisor.framework on macOS. Installation information ansible
An infrastructure as code tool. Installation Guide kubectl
Command line tool for running commands against Kubernetes clusters. Kubernetes Install Tools"},{"location":"examples/ansible-playbook/#create-the-cluster","title":"Create the cluster","text":"Download k0s-ansible
Clone the k0s-ansible repository on your local machine:
git clone https://github.com/movd/k0s-ansible.git\ncd k0s-ansible\n
Create virtual machines
Note: Though multipass is the VM manager in use here, there is no interdependence.
Create a number of virtual machines. For the automation to work, each instance must have passwordless SSH access. To achieve this, provision each instance with a cloud-init manifest that imports your current users' public SSH key and into a user k0s
(refer to the bash script below).
This creates 7 virtual machines:
./tools/multipass_create_instances.sh 7\n
Create cloud-init to import ssh key...\n[1/7] Creating instance k0s-1 with multipass...\nLaunched: k0s-1\n[2/7] Creating instance k0s-2 with multipass...\nLaunched: k0s-2\n[3/7] Creating instance k0s-3 with multipass...\nLaunched: k0s-3\n[4/7] Creating instance k0s-4 with multipass...\nLaunched: k0s-4\n[5/7] Creating instance k0s-5 with multipass...\nLaunched: k0s-5\n[6/7] Creating instance k0s-6 with multipass...\nLaunched: k0s-6\n[7/7] Creating instance k0s-7 with multipass...\nLaunched: k0s-7\nName State IPv4 Image\nk0s-1 Running 192.168.64.32 Ubuntu 20.04 LTS\nk0s-2 Running 192.168.64.33 Ubuntu 20.04 LTS\nk0s-3 Running 192.168.64.56 Ubuntu 20.04 LTS\nk0s-4 Running 192.168.64.57 Ubuntu 20.04 LTS\nk0s-5 Running 192.168.64.58 Ubuntu 20.04 LTS\nk0s-6 Running 192.168.64.60 Ubuntu 20.04 LTS\nk0s-7 Running 192.168.64.61 Ubuntu 20.04 LTS\n
Create Ansible inventory
1. Copy the sample to create the inventory directory:
```shell\n cp -rfp inventory/sample inventory/multipass\n ```\n
2. Create the inventory.
Assign the virtual machines to the different host groups, as required by the playbook logic.\n\n | Host group | Detail |\n |:----------------------|:------------------------------------------|\n | `initial_controller` | Must contain a single node that creates the worker and controller tokens needed by the other nodes|\n | `controller` | Can contain nodes that, together with the host from `initial_controller`, form a highly available isolated control plane |\n | `worker` | Must contain at least one node, to allow for the deployment of Kubernetes objects |\n
3. Fill in inventory/multipass/inventory.yml
. This can be done by direct entry using the metadata provided by multipass list,
, or you can use the following Python script multipass_generate_inventory.py
:
```shell\n ./tools/multipass_generate_inventory.py\n ```\n\n ```shell\n Designate first three instances as control plane\n Created Ansible Inventory at: /Users/dev/k0s-ansible/tools/inventory.yml\n $ cp tools/inventory.yml inventory/multipass/inventory.yml\n ```\n\n Your `inventory/multipass/inventory.yml` should resemble the example below:\n\n ```yaml\n ---\n all:\n children:\n initial_controller:\n hosts:\n k0s-1:\n controller:\n hosts:\n k0s-2:\n k0s-3:\n worker:\n hosts:\n k0s-4:\n k0s-5:\n k0s-6:\n k0s-7:\n hosts:\n k0s-1:\n ansible_host: 192.168.64.32\n k0s-2:\n ansible_host: 192.168.64.33\n k0s-3:\n ansible_host: 192.168.64.56\n k0s-4:\n ansible_host: 192.168.64.57\n k0s-5:\n ansible_host: 192.168.64.58\n k0s-6:\n ansible_host: 192.168.64.60\n k0s-7:\n ansible_host: 192.168.64.61\n vars:\n ansible_user: k0s\n ```\n
Test the virtual machine connections
Run the following command to test the connection to your hosts:
ansible -i inventory/multipass/inventory.yml -m ping\n
k0s-4 | SUCCESS => {\n\"ansible_facts\": {\n\"discovered_interpreter_python\": \"/usr/bin/python3\"\n},\n \"changed\": false,\n \"ping\": \"pong\"\n}\n...\n
If the test result indicates success, you can proceed.
Provision the cluster with Ansible
Applying the playbook, k0s download and be set up on all nodes, tokens will be exchanged, and a kubeconfig will be dumped to your local deployment environment.
ansible-playbook site.yml -i inventory/multipass/inventory.yml\n
TASK [k0s/initial_controller : print kubeconfig command] *******************************************************\nTuesday 22 December 2020 17:43:20 +0100 (0:00:00.257) 0:00:41.287 ******\nok: [k0s-1] => {\n\"msg\": \"To use Cluster: export KUBECONFIG=/Users/dev/k0s-ansible/inventory/multipass/artifacts/k0s-kubeconfig.yml\"\n}\n...\nPLAY RECAP *****************************************************************************************************\nk0s-1 : ok=21 changed=11 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0\nk0s-2 : ok=10 changed=5 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0\nk0s-3 : ok=10 changed=5 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0\nk0s-4 : ok=9 changed=5 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0\nk0s-5 : ok=9 changed=5 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0\nk0s-6 : ok=9 changed=5 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0\nk0s-7 : ok=9 changed=5 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0\n\nTuesday 22 December 2020 17:43:36 +0100 (0:00:01.204) 0:00:57.478 ******\n===============================================================================\nprereq : Install apt packages -------------------------------------------------------------------------- 22.70s\nk0s/controller : Wait for k8s apiserver ----------------------------------------------------------------- 4.30s\nk0s/initial_controller : Create worker join token ------------------------------------------------------- 3.38s\nk0s/initial_controller : Wait for k8s apiserver --------------------------------------------------------- 3.36s\ndownload : Download k0s binary k0s-v0.9.0-rc1-amd64 ----------------------------------------------------- 3.11s\nGathering Facts ----------------------------------------------------------------------------------------- 2.85s\nGathering Facts ----------------------------------------------------------------------------------------- 1.95s\nprereq : Create k0s Directories ------------------------------------------------------------------------- 1.53s\nk0s/worker : Enable and check k0s service --------------------------------------------------------------- 1.20s\nprereq : Write the k0s config file ---------------------------------------------------------------------- 1.09s\nk0s/initial_controller : Enable and check k0s service --------------------------------------------------- 0.94s\nk0s/controller : Enable and check k0s service ----------------------------------------------------------- 0.73s\nGathering Facts ----------------------------------------------------------------------------------------- 0.71s\nGathering Facts ----------------------------------------------------------------------------------------- 0.66s\nGathering Facts ----------------------------------------------------------------------------------------- 0.64s\nk0s/worker : Write the k0s token file on worker --------------------------------------------------------- 0.64s\nk0s/worker : Copy k0s service file ---------------------------------------------------------------------- 0.53s\nk0s/controller : Write the k0s token file on controller ------------------------------------------------- 0.41s\nk0s/controller : Copy k0s service file ------------------------------------------------------------------ 0.40s\nk0s/initial_controller : Copy k0s service file ---------------------------------------------------------- 0.36s\n
A kubeconfig was copied to your local machine while the playbook was running which you can use to gain access to your new Kubernetes cluster:
export KUBECONFIG=/Users/dev/k0s-ansible/inventory/multipass/artifacts/k0s-kubeconfig.yml\nkubectl cluster-info\n
Kubernetes control plane is running at https://192.168.64.32:6443\nCoreDNS is running at https://192.168.64.32:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy\nMetrics-server is running at https://192.168.64.32:6443/api/v1/namespaces/kube-system/services/https:metrics-server:/proxy\n\n$ kubectl get nodes -o wide\nNAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME\nk0s-4 Ready <none> 21s v1.20.1-k0s1 192.168.64.57 <none> Ubuntu 20.04.1 LTS 5.4.0-54-generic containerd://1.4.3\nk0s-5 Ready <none> 21s v1.20.1-k0s1 192.168.64.58 <none> Ubuntu 20.04.1 LTS 5.4.0-54-generic containerd://1.4.3\nk0s-6 NotReady <none> 21s v1.20.1-k0s1 192.168.64.60 <none> Ubuntu 20.04.1 LTS 5.4.0-54-generic containerd://1.4.3\nk0s-7 NotReady <none> 21s v1.20.1-k0s1 192.168.64.61 <none> Ubuntu 20.04.1 LTS 5.4.0-54-generic containerd://1.4.3\n
Note: The first three control plane nodes will not display, as the control plane is fully isolated. To check on the distributed etcd cluster, you can use ssh to securely log a controller node, or you can run the following ad-hoc command:
ansible k0s-1 -a \"k0s etcd member-list -c /etc/k0s/k0s.yaml\" -i inventory/multipass/inventory.yml | tail -1 | jq\n
{\n\"level\": \"info\",\n\"members\": {\n\"k0s-1\": \"https://192.168.64.32:2380\",\n\"k0s-2\": \"https://192.168.64.33:2380\",\n\"k0s-3\": \"https://192.168.64.56:2380\"\n},\n\"msg\": \"done\",\n\"time\": \"2020-12-23T00:21:22+01:00\"\n}\n
Once all worker nodes are at Ready
state you can use the cluster. You can test the cluster state by creating a simple nginx deployment.
kubectl create deployment nginx --image=gcr.io/google-containers/nginx --replicas=5\n
deployment.apps/nginx created\n
kubectl expose deployment nginx --target-port=80 --port=8100\n
service/nginx exposed\n
kubectl run hello-k0s --image=quay.io/prometheus/busybox --rm -it --restart=Never --command -- wget -qO- nginx:8100\n
<!DOCTYPE html>\n<html>\n<head>\n<title>Welcome to nginx on Debian!</title>\n...\npod \"hello-k0s\" deleted\n
Note: k0s users are the developers of k0s-ansible. Please send your feedback, bug reports, and pull requests to github.com/movd/k0s-ansible._
"},{"location":"examples/gitops-flux/","title":"Using GitOps with Flux","text":"This tutorial describes the benefits of using GitOps with k0s and provides an example of deploying an application with Flux v2.
GitOps is a practice where you leverage Git as the single source of truth. It offers a declarative way to do Kubernetes cluster management and application delivery. The desired states, using Kubernetes manifests and helm packages, are pulled from a git repository and automatically deployed to the cluster. This also makes it quick to re-deploy and recover applications whenever needed.
"},{"location":"examples/gitops-flux/#why-gitops-with-k0s","title":"Why GitOps with k0s","text":"k0s doesn't come with a lot of different extensions and add-ons that some users might find useful (and some not). Instead, k0s comes with 100% upstream Kubernetes and is compatible with all Kubernetes extensions. This makes it easy for k0s users to freely select the needed extensions that their applications and infrastructure need, without conflicting to any predefined options. Now, GitOps is a perfect practice to deploy these extensions automatically with applications by defining and configuring them directly in Git. This will also help with cluster security as the cluster doesn't need to be accessed directly when application changes are needed. However, this puts more stress on the Git access control, because changes in Git are propagated automatically to the cluster.
"},{"location":"examples/gitops-flux/#install-k0s","title":"Install k0s","text":"Let's start by installing k0s. Any k0s deployment option will do, but to keep things simple, this Quick Start Guide gets you started with a single node k0s cluster.
Run these three commands to download k0s, install and start it:
curl -sSLf https://get.k0s.sh | sudo sh\nsudo k0s install controller --single\nsudo k0s start\n
"},{"location":"examples/gitops-flux/#set-kubeconfig","title":"Set kubeconfig","text":"Next, you need to set the KUBECONFIG variable, which is needed by Flux CLI later on.
sudo k0s kubeconfig admin > kubeconfig\nexport KUBECONFIG=$PWD/kubeconfig\n
"},{"location":"examples/gitops-flux/#install-flux","title":"Install Flux","text":"To proceed with Flux, install the Flux CLI, which is used for configuring Flux to your Kubernetes cluster. For macOS and Linux, this can be done either with brew or bash script. Use one of them:
brew install fluxcd/tap/flux\n
or
curl -s https://fluxcd.io/install.sh | sudo bash\n
For more details of the Flux installation, check the Flux documentation.
"},{"location":"examples/gitops-flux/#configure-flux-for-a-github-repository","title":"Configure Flux for a GitHub repository","text":"Export your GitHub personal access token (instructions how to get it) and username:
export GITHUB_TOKEN=<your-token>\nexport GITHUB_USER=<your-username>\n
Come up with a GitHub repo name (e.g. flux-demo), which will be used by Flux to store (and sync) the config files.
export GITHUB_REPO_NAME=<select-repo-name-to-be-created>\n
Bootstrap flux to your cluster. The GitHub repo will be created automatically by Flux:
flux bootstrap github \\\n--owner=$GITHUB_USER \\\n--repository=$GITHUB_REPO_NAME \\\n--branch=main \\\n--path=./clusters/my-cluster \\\n--personal\n
Now you are all set with Flux and can proceed to deploy your first application.
"},{"location":"examples/gitops-flux/#deploy-example-application","title":"Deploy example application","text":"Next, we'll deploy a simple web application and expose it using a NodePort service. In the previous step, we configured Flux to track the path /clusters/my-cluster/ in your repository. Now clone the repo to your local machine:
git clone git@github.com:$GITHUB_USER/$GITHUB_REPO_NAME.git\ncd $GITHUB_REPO_NAME/clusters/my-cluster/\n
Create the following YAML file (simple-web-server-with-nodeport.yaml) into the same directory:
apiVersion: v1\nkind: Namespace\nmetadata:\nname: web\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: web-server\nnamespace: web\nspec:\nselector:\nmatchLabels:\napp: web\ntemplate:\nmetadata:\nlabels:\napp: web\nspec:\ncontainers:\n- name: httpd\nimage: httpd:2.4.53-alpine\nports:\n- containerPort: 80\n---\napiVersion: v1\nkind: Service\nmetadata:\nname: web-server-service\nnamespace: web\nspec:\ntype: NodePort\nselector:\napp: web\nports:\n- port: 80\ntargetPort: 80\nnodePort: 30003\n
Then push the new file to the repository:
git add .\ngit commit -m \"Add web server manifest\"\ngit push\n
Check that Flux detects your changes and the web server gets applied (by default this should happen within 1 min):
flux get kustomizations\n
If the deployment went successfully, you should see the newly added objects:
sudo k0s kubectl get all -n web\n
You can try to access the web application using
curl localhost:30003\n
or by using a web browser http://localhost:30003.
Voil\u00e0! You have now installed the example application using the GitOps method with Flux. As a next step you can try to modify the web app YAML file or add another application directly in to the Git repo and see how Flux will automatically pick up the changes without accessing the cluster with kubectl.
"},{"location":"examples/gitops-flux/#uninstall-flux","title":"Uninstall Flux","text":"If you want to uninstall Flux from the cluster, run:
flux uninstall --namespace=flux-system\n
Your applications, which were installed by Flux, will remain in the cluster, but you don't have the Flux processes anymore to sync up the desired state from Git.
"},{"location":"examples/metallb-loadbalancer/","title":"Installing MetalLB Load Balancer","text":"This tutorial covers the installation of MetalLB load balancer on k0s. k0s doesn't come with an in-built load balancer, but it's easy to deploy MetalLB as shown in this document.
"},{"location":"examples/metallb-loadbalancer/#about-load-balancers","title":"About Load Balancers","text":"Load balancers can be used for exposing applications to the external network. Load balancer provides a single IP address to route incoming requests to your app. In order to successfully create Kubernetes services of type LoadBalancer, you need to have the load balancer (implementation) available for Kubernetes.
Load balancer can be implemented by a cloud provider as an external service (with additional cost). This can also be implemented internally in the Kubernetes cluster (pure SW solution) with MetalLB.
"},{"location":"examples/metallb-loadbalancer/#metallb","title":"MetalLB","text":"MetalLB implements the Kubernetes service of type LoadBalancer. When a LoadBalancer service is requested, MetalLB allocates an IP address from the configured range and makes the network aware that the IP \u201clives\u201d in the cluster.
One of the benefits of MetalLB is that you avoid all cloud provider dependencies. That's why MetalLB is typically used for bare-metal deployments.
See the MetalLB requirements in the MetalLB's official documentation. By default, k0s runs with Kube-Router CNI, which is compatible with MetalLB as long as you don't use MetalLB\u2019s BGP mode. If you are not using Kube-Router and you are using kube-proxy in IPVS mode, you need to enable strict ARP mode in kube-proxy (see MetalLB preparations):
apiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nmetadata:\nname: k0s\nspec:\nnetwork:\nkubeProxy:\nmode: ipvs\nipvs:\nstrictARP: true\n
Port 7946 (TCP & UDP) must be allowed between the nodes. In addition, before installing MetalLB, make sure there is no other software running on port 7946 on the nodes, such as docker daemon.
"},{"location":"examples/metallb-loadbalancer/#install-metallb","title":"Install MetalLB","text":"Install MetalLB using the official Helm chart and k0s Helm extension manager:
apiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\n metadata:\nname: k0s\nspec:\n extensions:\n helm:\n repositories:\n - name: metallb\n url: https://metallb.github.io/metallb\n charts:\n - name: metallb\n chartname: metallb/metallb\n namespace: metallb\n
Other installation methods are available in the MetalLB's official documentation.
Create ConfigMap for MetalLB
Next you need to create ConfigMap, which includes an IP address range for the load balancer. The pool of IPs must be dedicated to MetalLB's use. You can't reuse for example the Kubernetes node IPs or IPs controlled by other services. You can, however, use private IP addresses, for example 192.168.1.180-192.168.1.199, but then you need to take care of the routing from the external network if you need external access. In this example, we don't need it.
Create a YAML file accordingly, and deploy it: kubectl apply -f metallb-l2-pool.yaml
---\napiVersion: metallb.io/v1beta1\nkind: IPAddressPool\nmetadata:\nname: first-pool\nnamespace: metallb-system\nspec:\naddresses:\n- <ip-address-range-start>-<ip-address-range-stop>\n---\napiVersion: metallb.io/v1beta1\nkind: L2Advertisement\nmetadata:\nname: example\nnamespace: metallb-system\n
Deploy an example application (web server) with a load balancer
apiVersion: v1\nkind: Namespace\nmetadata:\nname: web\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: web-server\nnamespace: web\nspec:\nselector:\nmatchLabels:\napp: web\ntemplate:\nmetadata:\nlabels:\napp: web\nspec:\ncontainers:\n- name: httpd\nimage: httpd:2.4.53-alpine\nports:\n- containerPort: 80\n---\napiVersion: v1\nkind: Service\nmetadata:\nname: web-server-service\nnamespace: web\nspec:\nselector:\napp: web\nports:\n- protocol: TCP\nport: 80\ntargetPort: 80\ntype: LoadBalancer\n
Check your LoadBalancer
Run the following command to see your LoadBalancer with the external-ip and port.
kubectl get service -n web\n
Access your example application
If you used private IP addresses for MetalLB in the ConfigMap (in step 2), you should run the following command from the local network. Use the IP address from the previous step.
curl <EXTERNAL-IP>\n
If you are successful, you should see <html><body><h1>It works!</h1></body></html>
.
For more information about MetalLB installation, take a look at the official MetalLB documentation.
"},{"location":"examples/metallb-loadbalancer/#alternative-examples","title":"Alternative examples","text":"Get load balancer using cloud provider.
"},{"location":"examples/nginx-ingress/","title":"Installing NGINX Ingress Controller","text":"This tutorial covers the installation of NGINX Ingress controller, which is an open source project made by the Kubernetes community. k0s doesn't come with an in-built Ingress controller, but it's easy to deploy NGINX Ingress as shown in this document. Other Ingress solutions can be used as well (see the links at the end of the page).
"},{"location":"examples/nginx-ingress/#nodeport-vs-loadbalancer-vs-ingress-controller","title":"NodePort vs LoadBalancer vs Ingress controller","text":"Kubernetes offers multiple options for exposing services to external networks. The main options are NodePort, LoadBalancer and Ingress controller.
NodePort, as the name says, means that a port on a node is configured to route incoming requests to a certain service. The port range is limited to 30000-32767, so you cannot expose commonly used ports like 80 or 443 with NodePort.
LoadBalancer is a service, which is typically implemented by the cloud provider as an external service (with additional cost). Load balancers can also be installed internally in the Kubernetes cluster with MetalLB, which is typically used for bare-metal deployments. Load balancer provides a single IP address to access your services, which can run on multiple nodes.
Ingress controller helps to consolidate routing rules of multiple applications into one entity. Ingress controller is exposed to an external network with the help of NodePort, LoadBalancer or host network. You can also use Ingress controller to terminate TLS for your domain in one place, instead of terminating TLS for each application separately.
"},{"location":"examples/nginx-ingress/#nginx-ingress-controller","title":"NGINX Ingress Controller","text":"NGINX Ingress Controller is a very popular Ingress for Kubernetes. In many cloud environments, it can be exposed to an external network by using the load balancer offered by the cloud provider. However, cloud load balancers are not necessary. Load balancer can also be implemented with MetalLB, which can be deployed in the same Kubernetes cluster. Another option to expose the Ingress controller to an external network is to use NodePort. The third option is to use host network. All of these alternatives are described in more detail on below, with separate examples.
"},{"location":"examples/nginx-ingress/#install-nginx-using-nodeport","title":"Install NGINX using NodePort","text":"Installing NGINX using NodePort is the most simple example for Ingress Controller as we can avoid the load balancer dependency. NodePort is used for exposing the NGINX Ingress to the external network.
Install NGINX Ingress Controller (using the official manifests by the ingress-nginx project)
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.3/deploy/static/provider/baremetal/deploy.yaml\n
Check that the Ingress controller pods have started
kubectl get pods -n ingress-nginx\n
Check that you can see the NodePort service
kubectl get services -n ingress-nginx\n
From version v1.0.0
of the Ingress-NGINX Controller, a ingressclass object is required.
In the default installation, an ingressclass object named nginx
has already been created.
$ kubectl -n ingress-nginx get ingressclasses\nNAME CONTROLLER PARAMETERS AGE\nnginx k8s.io/ingress-nginx <none> 162m\n
If this is only instance of the Ingresss-NGINX controller, you should add the annotation ingressclass.kubernetes.io/is-default-class
in your ingress class:
kubectl -n ingress-nginx annotate ingressclasses nginx ingressclass.kubernetes.io/is-default-class=\"true\"\n
Try connecting the Ingress controller using the NodePort from the previous step (in the range of 30000-32767)
curl <worker-external-ip>:<node-port>\n
If you don't yet have any backend service configured, you should see \"404 Not Found\" from nginx. This is ok for now. If you see a response from nginx, the Ingress Controller is running and you can reach it.
Deploy a small test application (httpd web server) to verify your Ingress controller.
Create the following YAML file and name it \"simple-web-server-with-ingress.yaml\":
apiVersion: v1\nkind: Namespace\nmetadata:\nname: web\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: web-server\nnamespace: web\nspec:\nselector:\nmatchLabels:\napp: web\ntemplate:\nmetadata:\nlabels:\napp: web\nspec:\ncontainers:\n- name: httpd\nimage: httpd:2.4.53-alpine\nports:\n- containerPort: 80\n---\napiVersion: v1\nkind: Service\nmetadata:\nname: web-server-service\nnamespace: web\nspec:\nselector:\napp: web\nports:\n- protocol: TCP\nport: 5000\ntargetPort: 80\n---\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\nname: web-server-ingress\nnamespace: web\nspec:\ningressClassName: nginx\nrules:\n- host: web.example.com\nhttp:\npaths:\n- path: /\npathType: Prefix\nbackend:\nservice:\nname: web-server-service\nport:\nnumber: 5000\n
Deploy the app:
kubectl apply -f simple-web-server-with-ingress.yaml\n
Verify that you can access your application using the NodePort from step 3.
curl <worker-external-ip>:<node-port> -H 'Host: web.example.com'\n
If you are successful, you should see <html><body><h1>It works!</h1></body></html>
.
In this example you'll install NGINX Ingress controller using LoadBalancer on k0s.
Install LoadBalancer
There are two alternatives to install LoadBalancer on k0s. Follow the links in order to install LoadBalancer.
- MetalLB as a pure SW solution running internally in the k0s cluster - Cloud provider's load balancer running outside of the k0s cluster
Verify LoadBalancer
In order to proceed you need to have a load balancer available for the Kubernetes cluster. To verify that it's available, deploy a simple load balancer service.
apiVersion: v1\nkind: Service\nmetadata:\nname: example-load-balancer\nspec:\nselector:\napp: web\nports:\n- protocol: TCP\nport: 80\ntargetPort: 80\ntype: LoadBalancer\n
kubectl apply -f example-load-balancer.yaml\n
Then run the following command to see your LoadBalancer with an external IP address.
kubectl get service example-load-balancer\n
If the LoadBalancer is not available, you won't get an IP address for EXTERNAL-IP. Instead, it's <pending>
. In this case you should go back to the previous step and check your load balancer availability.
If you are successful, you'll see a real IP address and you can proceed further.
You can delete the example-load-balancer:
kubectl delete -f example-load-balancer.yaml\n
Install NGINX Ingress Controller by following the steps in the previous chapter (step 1 to step 4).
Edit the NGINX Ingress Controller to use LoadBalancer instead of NodePort
kubectl edit service ingress-nginx-controller -n ingress-nginx\n
Find the spec.type field and change it from \"NodePort\" to \"LoadBalancer\".
Check that you can see the ingress-nginx-controller with type LoadBalancer.
kubectl get services -n ingress-nginx\n
Try connecting to the Ingress controller
If you used private IP addresses for MetalLB in step 2, you should run the following command from the local network. Use the IP address from the previous step, column EXTERNAL-IP.
curl <EXTERNAL-IP>\n
If you don't yet have any backend service configured, you should see \"404 Not Found\" from nginx. This is ok for now. If you see a response from nginx, the Ingress Controller is running and you can reach it using LoadBalancer.
Deploy a small test application (httpd web server) to verify your Ingress.
Create the YAML file \"simple-web-server-with-ingress.yaml\" as described in the previous chapter (step 6) and deploy it.
kubectl apply -f simple-web-server-with-ingress.yaml\n
Verify that you can access your application through the LoadBalancer and Ingress controller.
curl <worker-external-ip> -H 'Host: web.example.com'\n
If you are successful, you should see <html><body><h1>It works!</h1></body></html>
.
The host network option exposes Ingress directly using the worker nodes' IP addresses. It also allows you to use ports 80 and 443. This option doesn't use any Service objects (ClusterIP, NodePort, LoadBalancer) and it has the limitation that only one Ingress controller Pod may be scheduled on each cluster node.
Download the official NGINX Ingress Controller manifests:
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.3/deploy/static/provider/baremetal/deploy.yaml\n
Edit deploy.yaml. Find the Deployment ingress-nginx-controller and enable the host network option by adding the hostNetwork line:
spec:\n template:\n spec:\n hostNetwork: true\n
You can also remove the Service ingress-nginx-controller completely, because it won't be needed.
Install Ingress
kubectl apply -f deploy.yaml\n
Try to connect to the Ingress controller, deploy a test application and verify the access. These steps are similar to the previous install methods.
For more information about NGINX Ingress Controller installation, take a look at the official ingress-nginx installation guide and bare-metal considerations.
"},{"location":"examples/nginx-ingress/#alternative-examples-for-ingress-controllers-on-k0s","title":"Alternative examples for Ingress Controllers on k0s","text":"Traefik Ingress
"},{"location":"examples/rook-ceph/","title":"Installing Ceph Storage with Rook","text":"In this tutorial you'll create a Ceph storage for k0s. Ceph is a highly scalable, distributed storage solution. It offers object, block, and file storage, and it's designed to run on any common hardware. Ceph implements data replication into multiple volumes that makes it fault-tolerant. Another clear advantage of Ceph in Kubernetes is the dynamic provisioning. This means that applications just need to request the storage (persistent volume claim) and Ceph will automatically provision the requested storage without a manual creation of the persistent volume each time.
Unfortunately, the Ceph deployment as such can be considered a bit complex. To make the deployment easier, we'll use Rook operator. Rook is a CNCF project and it's dedicated to storage orchestration. Rook supports several storage solutions, but in this tutorial we will use it to manage Ceph.
This tutorial uses three worker nodes and one controller. It's possible to use less nodes, but using three worker nodes makes it a good example for deploying a high-available storage cluster. We use external storage partitions, which are assigned to the worker nodes to be used by Ceph.
After the Ceph deployment we'll deploy a sample application (MongoDB) to use the storage in practice.
"},{"location":"examples/rook-ceph/#prerequisites","title":"Prerequisites","text":"In this example we'll use Terraform to create four Ubuntu VMs on AWS. Using Terraform makes the VM deployment fast and repeatable. You can avoid manually setting up everything in the AWS GUI. Moreover, when you have finished with the tutorial, it's very easy to tear down the VMs with Terraform (with one command). However, you can set up the nodes in many different ways and it doesn't make a difference in the following steps.
We will use k0sctl to create the k0s cluster. k0sctl repo also includes a ready-made Terraform configuration to create the VMs on AWS. We'll use that. Let's start be cloning the k0sctl repo.
git clone git@github.com:k0sproject/k0sctl.git\n
Take a look at the Terraform files
cd k0sctl/examples/aws-tf\nls -l\n
Open variables.tf
and set the number of controller and worker nodes like this:
variable \"cluster_name\" {\ntype = string\ndefault = \"k0sctl\"\n}\n\nvariable \"controller_count\" {\ntype = number\ndefault = 1\n}\n\nvariable \"worker_count\" {\ntype = number\ndefault = 3\n}\n\nvariable \"cluster_flavor\" {\ntype = string\ndefault = \"t3.small\"\n}\n
Open main.tf
to check or modify k0s version near the end of the file.
You can also configure a different name to your cluster and change the default VM type. t3.small
(2 vCPUs, 2 GB RAM) runs just fine for this tutorial.
For AWS, you need an account. Terraform will use the following environment variable: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_SESSION_TOKEN
. You can easily copy-paste them from the AWS portal. For more information, see the AWS documentation.
When the environment variables are set, you can proceed with Terraform and deploy the VMs.
terraform init\nterraform apply\n
If you decide to create the VMs manually using AWS GUI, you need to disable source / destination checking. This needs to be disbled always for multi-node Kubernetes clusters in order to get the node-to-node communication working due to Network Address Translation. For Terraform this is already taken care of in the default configuration.
"},{"location":"examples/rook-ceph/#3-create-and-attach-the-volumes","title":"3. Create and attach the volumes","text":"Ceph requires one of the following storage options for storing the data:
We will be using raw partititions (AWS EBS volumes), which can be easily attached to the worker node VMs. They are automatically detected by Ceph with its default configuration.
Deploy AWS EBS volumes, one for each worker node. You can manually create three EBS volumes (for example 10 GB each) using the AWS GUI and attach those to your worker nodes. Formatting shouldn't be done. Instead, Ceph handles that part automatically.
After you have attached the EBS volumes to the worker nodes, log in to one of the workers and check the available block devices:
lsblk -f\n
NAME FSTYPE LABEL UUID FSAVAIL FSUSE% MOUNTPOINT\nloop0 squashfs 0 100% /snap/amazon-ssm-agent/3552\nloop1 squashfs 0 100% /snap/core18/1997\nloop2 squashfs 0 100% /snap/snapd/11588\nloop3 squashfs 0 100% /snap/lxd/19647\nnvme0n1\n\u2514\u2500nvme0n1p1 ext4 cloudimg-rootfs e8070c31-bfee-4314-a151-d1332dc23486 5.1G 33% /\nnvme1n1\n
The last line (nvme1n1) in this example printout corresponds to the attached EBS volume. Note that it doesn't have any filesystem (FSTYPE is empty). This meets the Ceph storage requirements and you are good to proceed.
"},{"location":"examples/rook-ceph/#4-install-k0s-using-k0sctl","title":"4. Install k0s using k0sctl","text":"You can use terraform to automatically output a config file for k0sctl with the ip addresses and access details.
terraform output -raw k0s_cluster > k0sctl.yaml\n
After that deploying k0s becomes very easy with the ready-made configuration.
k0sctl apply --config k0sctl.yaml\n
It might take around 2-3 minutes for k0sctl to connect each node, install k0s and connect the nodes together to form a cluster.
"},{"location":"examples/rook-ceph/#5-access-k0s-cluster","title":"5. Access k0s cluster","text":"To access your new cluster remotely, you can use k0sctl to fetch kubeconfig and use that with kubectl or Lens.
k0sctl kubeconfig --config k0sctl.yaml > kubeconfig\nexport KUBECONFIG=$PWD/kubeconfig\nkubectl get nodes\n
The other option is to login to your controller node and use the k0s in-built kubectl to access the cluster. Then you don't need to worry about kubeconfig (k0s takes care of that automatically).
ssh -i aws.pem <username>@<ip-address>\nsudo k0s kubectl get nodes\n
"},{"location":"examples/rook-ceph/#6-deploy-rook","title":"6. Deploy Rook","text":"To get started with Rook, let's first clone the Rook GitHub repo:
git clone --single-branch --branch release-1.7 https://github.com/rook/rook.git\ncd rook/cluster/examples/kubernetes/ceph\n
We will use mostly the default Rook configuration. However, k0s kubelet drectory must be configured in operator.yaml
like this
ROOK_CSI_KUBELET_DIR_PATH: \"/var/lib/k0s/kubelet\"\n
To create the resources, which are needed by the Rook\u2019s Ceph operator, run
kubectl apply -f crds.yaml -f common.yaml -f operator.yaml\n
Now you should see the operator running. Check them with
kubectl get pods -n rook-ceph\n
"},{"location":"examples/rook-ceph/#7-deploy-ceph-cluster","title":"7. Deploy Ceph Cluster","text":"Then you can proceed to create a Ceph cluster. Ceph will use the three EBS volumes attached to the worker nodes:
kubectl apply -f cluster.yaml\n
It takes some minutes to prepare the volumes and create the cluster. Once this is completed you should see the following output:
kubectl get pods -n rook-ceph\n
NAME READY STATUS RESTARTS AGE\ncsi-cephfsplugin-nhxc8 3/3 Running 0 2m48s\ncsi-cephfsplugin-provisioner-db45f85f5-ldhjp 6/6 Running 0 2m48s\ncsi-cephfsplugin-provisioner-db45f85f5-sxfm8 6/6 Running 0 2m48s\ncsi-cephfsplugin-tj2bh 3/3 Running 0 2m48s\ncsi-cephfsplugin-z2rrl 3/3 Running 0 2m48s\ncsi-rbdplugin-5q7gq 3/3 Running 0 2m49s\ncsi-rbdplugin-8sfpd 3/3 Running 0 2m49s\ncsi-rbdplugin-f2xdz 3/3 Running 0 2m49s\ncsi-rbdplugin-provisioner-d85cbdb48-g6vck 6/6 Running 0 2m49s\ncsi-rbdplugin-provisioner-d85cbdb48-zpmvr 6/6 Running 0 2m49s\nrook-ceph-crashcollector-ip-172-31-0-76-64cb4c7775-m55x2 1/1 Running 0 45s\nrook-ceph-crashcollector-ip-172-31-13-183-654b46588d-djqsd 1/1 Running 0 2m57s\nrook-ceph-crashcollector-ip-172-31-15-5-67b68698f-gcjb7 1/1 Running 0 2m46s\nrook-ceph-mgr-a-5ffc65c874-8pxgv 1/1 Running 0 58s\nrook-ceph-mon-a-ffcd85c5f-z89tb 1/1 Running 0 2m59s\nrook-ceph-mon-b-fc8f59464-lgczk 1/1 Running 0 2m46s\nrook-ceph-mon-c-69bd87b558-kl4nl 1/1 Running 0 91s\nrook-ceph-operator-54cf7487d4-pl66p 1/1 Running 0 4m57s\nrook-ceph-osd-0-dd4fd8f6-g6s9m 1/1 Running 0 48s\nrook-ceph-osd-1-7c478c49c4-gkqml 1/1 Running 0 47s\nrook-ceph-osd-2-5b887995fd-26492 1/1 Running 0 46s\nrook-ceph-osd-prepare-ip-172-31-0-76-6b5fw 0/1 Completed 0 28s\nrook-ceph-osd-prepare-ip-172-31-13-183-cnkf9 0/1 Completed 0 25s\nrook-ceph-osd-prepare-ip-172-31-15-5-qc6pt 0/1 Completed 0 23s\n
"},{"location":"examples/rook-ceph/#8-configure-ceph-block-storage","title":"8. Configure Ceph block storage","text":"Before Ceph can provide storage to your cluster, you need to create a ReplicaPool and a StorageClass. In this example, we use the default configuration to create the block storage.
kubectl apply -f ./csi/rbd/storageclass.yaml\n
"},{"location":"examples/rook-ceph/#9-request-storage","title":"9. Request storage","text":"Create a new manifest file mongo-pvc.yaml
with the following content:
apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\nname: mongo-pvc\nspec:\nstorageClassName: rook-ceph-block\naccessModes:\n- ReadWriteOnce\nresources:\nrequests:\nstorage: 2Gi\n
This will create Persistent Volume Claim (PVC) to request a 2 GB block storage from Ceph. Provioning will be done dynamically. You can define the block size freely as long as it fits to the available storage size.
kubectl apply -f mongo-pvc.yaml\n
You can now check the status of your PVC:
kubectl get pvc\n
When the PVC gets the requested volume reserved (bound), it should look like this:
kubectl get pvc\n
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE\nmongo-pvc Bound pvc-08337736-65dd-49d2-938c-8197a8871739 2Gi RWO rook-ceph-block 6s\n
"},{"location":"examples/rook-ceph/#10-deploy-an-example-application","title":"10. Deploy an example application","text":"Let's deploy a Mongo database to verify the Ceph storage. Create a new file mongo.yaml
with the following content:
apiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: mongo\nspec:\nselector:\nmatchLabels:\napp: mongo\ntemplate:\nmetadata:\nlabels:\napp: mongo\nspec:\ncontainers:\n- image: mongo:4.0\nname: mongo\nports:\n- containerPort: 27017\nname: mongo\nvolumeMounts:\n- name: mongo-persistent-storage\nmountPath: /data/db\nvolumes:\n- name: mongo-persistent-storage\npersistentVolumeClaim:\nclaimName: mongo-pvc\n
Deploy the database:
kubectl apply -f mongo.yaml\n
"},{"location":"examples/rook-ceph/#11-access-the-application","title":"11. Access the application","text":"Open the MongoDB shell using the mongo pod:
kubectl get pods\n
NAME READY STATUS RESTARTS AGE\nmongo-b87cbd5cc-4wx8t 1/1 Running 0 76s\n
kubectl exec -it mongo-b87cbd5cc-4wx8t -- mongo\n
Create a DB and insert some data:
> use testDB\nswitched to db testDB\n> db.testDB.insertOne( {name: \"abc\", number: 123 })\n{\n \"acknowledged\" : true,\n \"insertedId\" : ObjectId(\"60815690a709d344f83b651d\")\n}\n> db.testDB.insertOne( {name: \"bcd\", number: 234 })\n{\n \"acknowledged\" : true,\n \"insertedId\" : ObjectId(\"6081569da709d344f83b651e\")\n}\n
Read the data:
> db.getCollection(\"testDB\").find()\n{ \"_id\" : ObjectId(\"60815690a709d344f83b651d\"), \"name\" : \"abc\", \"number\" : 123 }\n{ \"_id\" : ObjectId(\"6081569da709d344f83b651e\"), \"name\" : \"bcd\", \"number\" : 234 }\n>\n
You can also try to restart the mongo pod or restart the worker nodes to verity that the storage is persistent.
"},{"location":"examples/rook-ceph/#12-clean-up","title":"12. Clean-up","text":"You can use Terraform to take down the VMs:
terraform destroy\n
Remember to delete the EBS volumes separately.
"},{"location":"examples/rook-ceph/#conclusions","title":"Conclusions","text":"You have now created a replicated Ceph storage for k0s. All you data is stored to multiple disks at the same time so you have a fault-tolerant solution. You also have enabled dynamic provisioning. Your applications can request the available storage without a manual creation of the persistent volumes each time.
This was just one example to deploy distributed storage to k0s cluster using an operator. You can easily use different Kubernetes storage solutions with k0s.
"},{"location":"examples/traefik-ingress/","title":"Installing Traefik Ingress Controller","text":"You can configure k0s with the Traefik ingress controller, a MetalLB service loadbalancer, and deploy the Traefik Dashboard using a service sample. To do this you leverage Helm's extensible bootstrapping functionality to add the correct extensions to the k0s.yaml
file during cluster configuration.
Configure k0s to install Traefik and MetalLB during cluster bootstrapping by adding their Helm charts as extensions in the k0s configuration file (k0s.yaml
).
Note:
A good practice is to have a small range of IP addresses that are addressable on your network, preferably outside the assignment pool your DHCP server allocates (though any valid IP range should work locally on your machine). Providing an addressable range allows you to access your load balancer and Ingress services from anywhere on your local network.
extensions:\nhelm:\nrepositories:\n- name: traefik\nurl: https://traefik.github.io/charts\n- name: bitnami\nurl: https://charts.bitnami.com/bitnami\ncharts:\n- name: traefik\nchartname: traefik/traefik\nversion: \"20.5.3\"\nnamespace: default\n- name: metallb\nchartname: bitnami/metallb\nversion: \"2.5.4\"\nnamespace: default\nvalues: |\nconfigInline:\naddress-pools:\n- name: generic-cluster-pool\nprotocol: layer2\naddresses:\n- 192.168.0.5-192.168.0.10\n
"},{"location":"examples/traefik-ingress/#2-retrieve-the-load-balancer-ip","title":"2. Retrieve the Load Balancer IP","text":"After you start your cluster, run kubectl get all
to confirm the deployment of Traefik and MetalLB. The command should return a response with the metallb
and traefik
resources, along with a service load balancer that has an assigned EXTERNAL-IP
.
kubectl get all\n
Output:
NAME READY STATUS RESTARTS AGE\npod/metallb-1607085578-controller-864c9757f6-bpx6r 1/1 Running 0 81s\npod/metallb-1607085578-speaker-245c2 1/1 Running 0 60s\npod/traefik-1607085579-77bbc57699-b2f2t 1/1 Running 0 81s\n\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\nservice/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 96s\nservice/traefik-1607085579 LoadBalancer 10.105.119.102 192.168.0.5 80:32153/TCP,443:30791/TCP 84s\n\nNAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE\ndaemonset.apps/metallb-1607085578-speaker 1 1 1 1 1 kubernetes.io/os=linux 87s\n\nNAME READY UP-TO-DATE AVAILABLE AGE\ndeployment.apps/metallb-1607085578-controller 1/1 1 1 87s\ndeployment.apps/traefik-1607085579 1/1 1 1 84s\n\nNAME DESIRED CURRENT READY AGE\nreplicaset.apps/metallb-1607085578-controller-864c9757f6 1 1 1 81s\nreplicaset.apps/traefik-1607085579-77bbc57699 1 1 1 81s\n
Take note of the EXTERNAL-IP
given to the service/traefik-n
load balancer. In this example, 192.168.0.5
has been assigned and can be used to access services via the Ingress proxy:
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\nservice/traefik-1607085579 LoadBalancer 10.105.119.102 192.168.0.5 80:32153/TCP,443:30791/TCP 84s\n
Receiving a 404 response here is normal, as you've not configured any Ingress resources to respond yet:
curl http://192.168.0.5\n
404 page not found\n
"},{"location":"examples/traefik-ingress/#3-deploy-and-access-the-traefik-dashboard","title":"3. Deploy and access the Traefik Dashboard","text":"With an available and addressable load balancer present on your cluster, now you can quickly deploy the Traefik dashboard and access it from anywhere on your LAN (assuming that MetalLB is configured with an addressable range).
Create the Traefik Dashboard IngressRoute in a YAML file:
apiVersion: traefik.containo.us/v1alpha1\nkind: IngressRoute\nmetadata:\nname: dashboard\nspec:\nentryPoints:\n- web\nroutes:\n- match: PathPrefix(`/dashboard`) || PathPrefix(`/api`)\nkind: Rule\nservices:\n- name: api@internal\nkind: TraefikService\n
Deploy the resource:
kubectl apply -f traefik-dashboard.yaml\n
Output:
ingressroute.traefik.containo.us/dashboard created\n
At this point you should be able to access the dashboard using the EXTERNAL-IP
that you noted above by visiting http://192.168.0.5/dashboard/
in your browser:
Create a simple whoami
Deployment, Service, and Ingress manifest:
apiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: whoami-deployment\nspec:\nreplicas: 1\nselector:\nmatchLabels:\napp: whoami\ntemplate:\nmetadata:\nlabels:\napp: whoami\nspec:\ncontainers:\n- name: whoami-container\nimage: containous/whoami\n---\napiVersion: v1\nkind: Service\nmetadata:\nname: whoami-service\nspec:\nports:\n- name: http\ntargetPort: 80\nport: 80\nselector:\napp: whoami\n---\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\nname: whoami-ingress\nspec:\nrules:\n- http:\npaths:\n- path: /whoami\npathType: Exact\nbackend:\nservice:\nname: whoami-service\nport:\nnumber: 80\n
Apply the manifests:
kubectl apply -f whoami.yaml\n
Output:
deployment.apps/whoami-deployment created\nservice/whoami-service created\ningress.networking.k8s.io/whoami-ingress created\n
Test the ingress and service:
curl http://192.168.0.5/whoami\n
Output:
Hostname: whoami-deployment-85bfbd48f-7l77c\nIP: 127.0.0.1\nIP: ::1\nIP: 10.244.214.198\nIP: fe80::b049:f8ff:fe77:3e64\nRemoteAddr: 10.244.214.196:34858\nGET /whoami HTTP/1.1\nHost: 192.168.0.5\nUser-Agent: curl/7.68.0\nAccept: */*\nAccept-Encoding: gzip\nX-Forwarded-For: 192.168.0.82\nX-Forwarded-Host: 192.168.0.5\nX-Forwarded-Port: 80\nX-Forwarded-Proto: http\nX-Forwarded-Server: traefik-1607085579-77bbc57699-b2f2t\nX-Real-Ip: 192.168.0.82\n
With the Traefik Ingress Controller it is possible to use 3rd party tools, such as ngrok, to go further and expose your load balancer to the world. In doing this you enable dynamic certificate provisioning through Let's Encrypt, using either cert-manager or Traefik's own built-in ACME provider.
"},{"location":"examples/oidc/oidc-cluster-configuration/","title":"OpenID Connect integration","text":"Developers use kubectl
to access Kubernetes clusters. By default kubectl
uses a certificate to authenticate to the Kubernetes API. This means that when multiple developers need to access a cluster, the certificate needs to be shared. Sharing the credentials to access a Kubernetes cluster presents a significant security problem. Compromise of the certificate is very easy and the consequences can be catastrophic.
In this tutorial, we walk through how to set up your Kubernetes cluster to add Single Sign-On support for kubectl using OpenID Connect (OIDC).
"},{"location":"examples/oidc/oidc-cluster-configuration/#openid-connect-based-authentication","title":"OpenID Connect based authentication","text":"OpenID Connect can be enabled by modifying k0s configuration (using extraArgs).
"},{"location":"examples/oidc/oidc-cluster-configuration/#configuring-k0s-overview","title":"Configuring k0s: overview","text":"There are list of arguments for the kube-api that allows us to manage OIDC based authentication
Parameter Description Example Required--oidc-issuer-url
URL of the provider which allows the API server to discover public signing keys. Only URLs which use the https://
scheme are accepted. This is typically the provider's discovery URL without a path, for example \"https://accounts.google.com\" or \"https://login.salesforce.com\". This URL should point to the level below .well-known/openid-configuration If the discovery URL is https://accounts.google.com/.well-known/openid-configuration
, the value should be https://accounts.google.com
Yes --oidc-client-id
A client id that all tokens must be issued for. kubernetes Yes --oidc-username-claim
JWT claim to use as the user name. By default sub
, which is expected to be a unique identifier of the end user. Admins can choose other claims, such as email
or name
, depending on their provider. However, claims other than email
will be prefixed with the issuer URL to prevent naming clashes with other plugins. sub No --oidc-username-prefix
Prefix prepended to username claims to prevent clashes with existing names (such as system:
users). For example, the value oidc:
will create usernames like oidc:jane.doe
. If this flag isn't provided and --oidc-username-claim
is a value other than email
the prefix defaults to ( Issuer URL )#
where ( Issuer URL )
is the value of --oidc-issuer-url
. The value -
can be used to disable all prefixing. oidc:
No --oidc-groups-claim
JWT claim to use as the user's group. If the claim is present it must be an array of strings. groups No --oidc-groups-prefix
Prefix prepended to group claims to prevent clashes with existing names (such as system:
groups). For example, the value oidc:
will create group names like oidc:engineering
and oidc:infra
. oidc:
No --oidc-required-claim
A key=value pair that describes a required claim in the ID Token. If set, the claim is verified to be present in the ID Token with a matching value. Repeat this flag to specify multiple claims. claim=value
No --oidc-ca-file
The path to the certificate for the CA that signed your identity provider's web certificate. Defaults to the host's root CAs. /etc/kubernetes/ssl/kc-ca.pem
No To set up bare minimum example we need to use:
You will require:
Please, refer to providers configuration guide or your selected OIDC provider's own documentation (we don't cover all of them in k0s docs).
"},{"location":"examples/oidc/oidc-cluster-configuration/#configuration-example","title":"Configuration example","text":"apiVersion: k0s.k0sproject.io/v1beta1\nkind: ClusterConfig\nspec:\napi:\nextraArgs:\noidc-issuer-url: <issuer-url>\noidc-client-id: <client-id>\noidc-username-claim: email # we use email token claim field as a username\n
Use the configuration as a starting point. Continue with configuration guide for finishing k0s cluster installation.
"},{"location":"examples/oidc/oidc-cluster-configuration/#openid-connect-based-authorisation","title":"OpenID Connect based authorisation","text":"There are two alternative options to implement authorization
"},{"location":"examples/oidc/oidc-cluster-configuration/#provider-based-role-mapping","title":"Provider based role mapping","text":"Please refer to the providers configuration guide. Generally speaking, using the oidc-groups-claim
argument let's you specify which token claim is used a list of RBAC roles for a given user. You still need somehow sync up that data between your OIDC provider and kube-api RBAC system.
To use manual role management for each user you will need to create a role and role-binding for each new user within k0s cluster. The role can be shared for all the users. Role example:
---\nkind: Role\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nnamespace: default\nname: dev-role\nrules:\n- apiGroups: [\"*\"]\nresources: [\"*\"]\nverbs: [\"*\"]\n
RoleBinding example:
kind: RoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nname: dev-role-binding\nsubjects:\n- kind: User\nname: <provider side user id>\nroleRef:\nkind: Role\nname: dev-role\napiGroup: rbac.authorization.k8s.io\n
The provided Role example is an all-inclusive and comprehensive example and should be tuned up to your actual requirements.
"},{"location":"examples/oidc/oidc-cluster-configuration/#kubeconfig-management","title":"kubeconfig management","text":"NB: it's not safe to provide full content of the /var/lib/k0s/pki/admin.conf
to the end-user. Instead, create a user specific kubeconfig with limited permissions.
The authorization side of the kubeconfig management is described in provider specific guides. Use /var/lib/k0s/pki/admin.conf
as a template for cluster specific kubeconfig.
OAuth2 spec Kubernetes authorization system (RBAC) Kubernetes authenticating system
"},{"location":"examples/oidc/oidc-provider-configuration/","title":"Providers","text":"We use Google Cloud as a provider for the sake of the example. Check your vendor documentation in case if you use some other vendor.
"},{"location":"examples/oidc/oidc-provider-configuration/#notes-on-stand-alone-providers","title":"Notes on stand-alone providers","text":"If you are using stand-alone OIDC provider, you might need to specify oidc-ca-file
argument for the kube-api.
We use k8s-oidc-helper tool to create proper kubeconfig user record.
The issuer URL for the Google cloud is https://accounts.google.com
Use the command and follow the instructions:
k8s-oidc-helper --client-id=<CLIENT_ID> \\\n--client-secret=<CLIENT_SECRET> \\\n--write=true\n
"},{"location":"examples/oidc/oidc-provider-configuration/#using-kubelogin","title":"Using kubelogin","text":"For other OIDC providers it is possible to use kubelogin
plugin. Please refer to the setup guide for details.
kubelogin
","text":"kubectl oidc-login setup \\\n--oidc-issuer-url=https://accounts.google.com \\\n--oidc-client-id=<CLIENT_ID> \\\n--oidc-client-secret=<CLIENT_SECRET>\n\n kubectl config set-credentials oidc \\\n--exec-api-version=client.authentication.k8s.io/v1beta1 \\\n--exec-command=kubectl \\\n--exec-arg=oidc-login \\\n--exec-arg=get-token \\\n--exec-arg=--oidc-issuer-url=https://accounts.google.com \\\n--exec-arg=--oidc-client-id=<CLIENT_ID> \\\n--exec-arg=--oidc-client-secret=<CLIENT_SECRET>\n
You can switch the current context to oidc.
kubectl config set-context --current --user=oidc
We use mkdocs and mike for publishing docs to docs.k0sproject.io. This guide will provide a simple how-to on how to configure and deploy newly added docs to our website.
"},{"location":"internal/publishing_docs_using_mkdocs/#requirements","title":"Requirements","text":"Install mike: https://github.com/jimporter/mike#installation
"},{"location":"internal/publishing_docs_using_mkdocs/#adding-a-new-link-to-the-navigation","title":"Adding A New link to the Navigation","text":"docs
directory (I.E., changes to the main README.md
are not reflected in the website).nav
in the main mkdocs.yml file:nav:\n- Overview: README.md\n- Creating A Cluster:\n- Quick Start Guide: create-cluster.md\n- Run in Docker: k0s-in-docker.md\n- Single node set-up: k0s-single-node.md\n- Configuration Reference:\n- Architecture: architecture.md\n- Networking: networking.md\n- Configuration Options: configuration.md\n- Using Cloud Providers: cloud-providers.md\n- Running k0s with Traefik: examples/traefik-ingress.md\n- Running k0s as a service: install.md\n- k0s CLI Help Pages: cli/k0s.md\n- Deploying Manifests: manifests.md\n- FAQ: FAQ.md\n- Troubleshooting: troubleshooting.md\n- Contributing:\n- Overview: contributors/overview.md\n- Workflow: contributors/github_workflow.md\n- Testing: contributors/testing.md\n
main
, the \"Publish Docs\" jos will start running: https://github.com/k0sproject/k0s/actions?query=workflow%3A%22Publish+docs+via+GitHub+Pages%22gh-pages
deployment page: https://github.com/k0sproject/k0s/deployments/activity_log?environment=github-pagesWe've got a dockerized setup for easily testing docs locally. Simply run make docs-serve-dev
. The docs will be available on http://localhost:8000.
Note If you have something already running locally on port 8000
you can choose another port like so: make docs-serve-dev DOCS_DEV_PORT=9999
. The docs will then be available on http://localhost:9999.
k0s bundles Kubernetes manifests for Calico. The manifests are retrieved from the official Calico repo.
As fetching and modifying the entire multi-thousand line file is error-prone, you may follow these steps to upgrade Calico to the latest version:
./hack/get-calico.sh <version>
make bindata-manifests
Note: All manual adjustments should be fairly obvious from the git diff. This section attempts to provide a sanity checklist to go through and make sure we still have those changes applied. The code blocks in this section are our modifications, not the calico originals.
To see the diff without CRDs, you can do something like:
git diff ':!static/manifests/calico/CustomResourceDefinition'\n
That'll make it easier to spot any needed changes.
static/manifests/calico/DaemonSet/calico-node.yaml
:
ipip
to find):{{- if eq .Mode \"ipip\" }}\n# Enable IPIP\n- name: CALICO_IPV4POOL_IPIP\nvalue: {{ .Overlay }}\n# Enable or Disable VXLAN on the default IP pool.\n- name: CALICO_IPV4POOL_VXLAN\nvalue: \"Never\"\n{{- else if eq .Mode \"vxlan\" }}\n# Disable IPIP\n- name: CALICO_IPV4POOL_IPIP\nvalue: \"Never\"\n# Enable VXLAN on the default IP pool.\n- name: CALICO_IPV4POOL_VXLAN\nvalue: {{ .Overlay }}\n- name: FELIX_VXLANPORT\nvalue: \"{{ .VxlanPort }}\"\n- name: FELIX_VXLANVNI\nvalue: \"{{ .VxlanVNI }}\"\n{{- end }}\n
# Auto detect the iptables backend\n- name: FELIX_IPTABLESBACKEND\nvalue: \"auto\"\n
{{- if .EnableWireguard }}\n- name: FELIX_WIREGUARDENABLED\nvalue: \"true\"\n{{- end }}\n
- name: CALICO_IPV4POOL_CIDR\nvalue: \"{{ .ClusterCIDR }}\"\n
# calico-config.yaml\ncalico_backend: \"{{ .Mode }}\"\nveth_mtu: \"{{ .MTU }}\"\n
CLUSTER_TYPE
- name: CLUSTER_TYPE\nvalue: \"k8s\"\n
-bird-ready
and -bird-live
from the readiness and liveness probes respectivelyInstead of hardcoded image names and versions use placeholders to support configuration level settings. Following placeholders are used:
CalicoCNIImage
for calico/cniCalicoNodeImage
for calico/nodeCalicoKubeControllersImage
for calico/kube-controllersAlso, all containers in manifests were modified to have 'imagePullPolicy' field:
imagePullPolicy: {{ .PullPolicy }}\n
Example:
# calico-node.yaml\nimage: {{ .CalicoCNIImage }}\n
"}]}
\ No newline at end of file
diff --git a/head/selinux/index.html b/head/selinux/index.html
index ef87e08dc785..05b31572772f 100644
--- a/head/selinux/index.html
+++ b/head/selinux/index.html
@@ -20,8 +20,9 @@
+
-
+
@@ -29,15 +30,18 @@
-
+
-
+
+
+
+
@@ -74,7 +78,7 @@
-
+
@@ -124,6 +128,7 @@
Controller nodes are isolated by default, which thus means that a cluster user cannot schedule workloads onto controller nodes.
+Controller nodes are isolated by default, which thus means that a cluster user cannot schedule workloads onto controller nodes.
k0s provides a mechanism to expose system components for monitoring. System component metrics can give a better look into what is happening inside them. Metrics are particularly useful for building dashboards and alerts. You can read more about metrics for Kubernetes system components here.
Note: the mechanism is an opt-in feature, you can enable it on installation:
-```shell
-sudo k0s install controller --enable-metrics-scraper
-```
+sudo k0s install controller --enable-metrics-scraper
Jobs#
The list of components which is scrapped by k0s:
- kube-scheduler
- kube-controller-manager
+- etcd
+- kine
Note: kube-apiserver metrics are not scrapped since they are accessible via kubernetes
endpoint within the cluster.
Architecture#
@@ -1518,7 +1926,7 @@ Architecture
-
+
@@ -1526,7 +1934,7 @@ Architecture
-
+
@@ -1534,7 +1942,7 @@ Architecture
-
+
@@ -1542,7 +1950,7 @@ Architecture
-
+
@@ -1550,7 +1958,7 @@ Architecture
-
+
spec:
k0s:
- version: 1.27.5+k0s.0
+ version: 1.28.1+k0s.0
If you do not specify a version, k0sctl checks online for the latest version and defaults to it.
k0sctl apply
@@ -1549,7 +1957,7 @@ k0sctl cluster upgrade process[0027] [ssh] 10.0.0.17:22: upgrade successful
INFO[0027] ==> Running phase: Disconnect from hosts
INFO[0027] ==> Finished in 27s
-INFO[0027] k0s cluster version 1.27.5+k0s.0 is now installed
+INFO[0027] k0s cluster version 1.28.1+k0s.0 is now installed
INFO[0027] Tip: To access the cluster you can now fetch the admin kubeconfig using:
INFO[0027] k0sctl kubeconfig
kubectl get node --show-labels
NAME STATUS ROLES AGE VERSION LABELS
-worker0 NotReady <none> 10s v1.27.5+k0s beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,k0sproject.io/foo=bar,k0sproject.io/other=xyz,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker0,kubernetes.io/os=linux
+worker0 NotReady <none> 10s v1.28.1+k0s beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,k0sproject.io/foo=bar,k0sproject.io/other=xyz,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker0,kubernetes.io/os=linux
Controller worker nodes are assigned node.k0sproject.io/role=control-plane
and node-role.kubernetes.io/control-plane=true
labels:
kubectl get node --show-labels
NAME STATUS ROLES AGE VERSION LABELS
-controller0 NotReady control-plane 10s v1.27.5+k0s beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=worker0,kubernetes.io/os=linux,node.k0sproject.io/role=control-plane,node-role.kubernetes.io/control-plane=true
+controller0 NotReady control-plane 10s v1.28.1+k0s beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=worker0,kubernetes.io/os=linux,node.k0sproject.io/role=control-plane,node-role.kubernetes.io/control-plane=true
Note: Setting the labels is only effective on the first registration of the node. Changing the labels thereafter has no effect.