-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathen.search-data.min.9d28f3fcf7b2072d0402f9867df77bbf3f93ffd88c25685d867d6632a1c2ff25.js
1 lines (1 loc) · 65.1 KB
/
en.search-data.min.9d28f3fcf7b2072d0402f9867df77bbf3f93ffd88c25685d867d6632a1c2ff25.js
1
'use strict';(function(){const indexCfg={cache:true};indexCfg.doc={id:'id',field:['title','content'],store:['title','href'],};const index=FlexSearch.create('balance',indexCfg);window.bookSearchIndex=index;index.add({'id':0,'href':'/documentation/','title':"Documentation",'content':"Flexkube documentation "});index.add({'id':1,'href':'/documentation/concepts/','title':"Concepts",'content':"Concepts "});index.add({'id':2,'href':'/documentation/concepts/managing-certificates/','title':"Managing Certificates",'content':""});index.add({'id':3,'href':'/documentation/concepts/managing-containers/','title':"Managing Containers",'content':"Managing containers This document should explain how libflexkube manages the containers.\n"});index.add({'id':4,'href':'/documentation/concepts/self-hosted-kubernetes-controlplane/','title':"Self Hosted Kubernetes Controlplane",'content':"Self-hosted Kubernetes controlplane This document should describe why Flexkube uses and recommends using self-hosted Kubernetes controlplane, how it works etc.\n"});index.add({'id':5,'href':'/documentation/concepts/supported-container-runtimes/','title':"Supported Container Runtimes",'content':"Supported container runtimes This document should explain how libflexkube utilizes different container runtimes and link to existing implementations. Also, it should describe what other possible container runtimes could be added.\n"});index.add({'id':6,'href':'/documentation/concepts/supported-container-runtimes/docker/','title':"Docker",'content':""});index.add({'id':7,'href':'/documentation/concepts/supported-transport-protocols/','title':"Supported Transport Protocols",'content':"Supported transport protocols This document should explain what are transport protocols in libflexkube, how they are used and should link to all implementations.\n"});index.add({'id':8,'href':'/documentation/concepts/supported-transport-protocols/direct/','title':"Direct",'content':"Direct transport protocol This document should explain how direct transport protocol works.\n"});index.add({'id':9,'href':'/documentation/concepts/supported-transport-protocols/ssh/','title':"Ssh",'content':"SSH transport protocol This document should explain how SSH transport protocol works.\n"});index.add({'id':10,'href':'/documentation/getting-started/','title':"Getting Started",'content':"Getting started This section includes some basic information of how to start using the project, how to download release binaries, what are the requirements etc.\n"});index.add({'id':11,'href':'/documentation/getting-started/installing/','title':"Installing",'content':"Installing Depending how you want to use Flexkube, you should see appropriate installing section:\n CLI for flexkube CLI users Terraform for Terraform users "});index.add({'id':12,'href':'/documentation/getting-started/installing/cli/','title':"Cli",'content':"Flexkube CLI Download the pre-built binary The easiest way to get Flexkube CLI\tis to use one of the pre-built release binaries which are available for macOS and Linux.\nSee Github Releases page for finding the latest available release.\nFor example, to download version v0.3.0 on Linux, execute the following command:\nVERSION=v0.3.0 It will download the flexkube binary into your current directory. It is recommende to move this binary into one of directories mentioned in your $PATH environment variable, e.g. to ~/.local/bin or /usr/local/bin, to make it easy to access.\nBuilding from source For building from source, make sure you have go and git binaries available in your system.\nUsing go get You can install Flexkube CLI from source using the following command:\ngo get github.com/flexkube/libflexkube/cmd/flexkube Once done, make sure your Go binary path is included in $PATH, so the binary is accessible for execution.\nUsing git and go build To build Flexkube CLI from source, first clone libflexkube repository. This can be done using the following command:\ngit clone https://github.com/flexkube/libflexkube.git \u0026amp;\u0026amp; cd libflexkube Then, to build Terraform Provider binary, run the following command:\ngo build ./cmd/flexkube It will build the flexkube binary into your current directory. It is recommende to move this binary into one of directories mentioned in your $PATH environment variable, e.g. to ~/.local/bin or /usr/local/bin, to make it easy to access.\n"});index.add({'id':13,'href':'/documentation/getting-started/installing/go/','title':"Go",'content':"Go module This document should describe how to include libflexkube Go packages as part of your Go project.\n"});index.add({'id':14,'href':'/documentation/getting-started/installing/terraform/','title':"Terraform",'content':"Terraform provider Download the pre-built binary The easiest way to get Flexkube Terraform provider is to use one of the pre-built release binaries which are available for macOS and Linux.\nSee Github Releases page for finding the latest available release.\nFor example, to download version v.0.2.2 on Linux, execute the following command:\nVERSION=v0.2.2 wget -qO- https://github.com/flexkube/libflexkube/releases/download/$VERSION/terraform-provider-flexkube_$VERSION_linux_amd64.tar.gz | tar zxvf - terraform-provider-flexkube_$VERSION_x4 It will download the terraform-provider-flexkube binary into your current directory. If you have your Terraform code in the same directory, you can start using it right away, e.g. with terraform init command.\nIf you want the provider to be available in other directories, it is recommended to move the binary into ~/.terraform.d/plugins/ directory. This can be done using the following command:\nmkdir -p ~/.terraform.d/plugins/ \u0026amp;\u0026amp; mv terraform-provider-flexkube_v0.2.2_x4 ~/.terraform.d/plugins/ Building from source For building from source, make sure you have go and git binaries available in your system.\nUsing go get You can install Flexkube Terraform Provider from source using the following command:\ngo get github.com/flexkube/libflexkube/cmd/terraform-provider-flexkube Once done, it is recommended to move the binary into ~/.terraform.d/plugins/ directory to make it available for all Terraform environments:\nmkdir -p ~/.terraform.d/plugins/ \u0026amp;\u0026amp; mv $(go env GOPATH)/bin/terraform-provider-flexkube ~/.terraform.d/plugins/terraform-provider-flexkube_v0.2.2_x4 Using git and go build To build Terraform provider from source, first clone libflexkube repository. This can be done using the following command:\ngit clone https://github.com/flexkube/libflexkube.git \u0026amp;\u0026amp; cd libflexkube Then, to build Terraform Provider binary, run the following command:\ngo build ./cmd/terraform-provider-flexkube Once done, it is recommended to move the binary into ~/.terraform.d/plugins/ directory to make it available for all Terraform environments:\nmkdir -p ~/.terraform.d/plugins/ \u0026amp;\u0026amp; mv $(go env GOPATH)/bin/terraform-provider-flexkube ~/.terraform.d/plugins/terraform-provider-flexkube "});index.add({'id':15,'href':'/documentation/getting-started/requirements/','title':"Requirements",'content':"Requirements This section describes various requirements of Flexkube.\nIt is recommended to deploy Flexkube resources (e.g. etcd, kubelet) into dedicated machine, not into local host, as resources will write to some hosts locations like /etc/kubernetes, /var/lib/kubelet or /var/lib/etcd to persist the cluster state across updates. See TODO section to learn how to create VM for testing. Summary Short summary of the requirements for each machine where Kubernetes will be deployed:\n Minimum 2 GB of RAM SSH server configured (if deploying to remote machines) Internet access docker daemon installed and running Hardware requirements To create Kubernetes cluster using Flexkube, you need a machine with at least 2 GB of RAM for controller node and at least 1 GB of RAM for worker nodes.\nConnectivity Containers registry Machines which will be part of the cluster must have access to container registry from where the cluster component images will be pulled. By default public registries are used, so machines must have internet access.\nIf you re-configure the cluster to use images from private repository, internet access should not be required.\nSSH For deploying on remote machines, Flexkube use SSH tunnels to talk to container runtime on remote machine, so make sure SSH daemon is configured on them and is accessible from the host you will be deploying.\nIf you deploy only on local machine, SSH is not required.\nNetwork It is recommended, that all machines which are part of the cluster are connected using private network, to avoid exposing your cluster components to the internet.\nContainer runtime Flexkube runs all of Kubernetes controlplane components as containers, so container runtime must be installed and configured on the machines before deploying.\nAt the moment only Docker runtime is supported. In the future, support for more container runtime might be added.\n"});index.add({'id':16,'href':'/documentation/guides/','title':"Guides",'content':"Guides "});index.add({'id':17,'href':'/documentation/guides/etcd/','title':"Etcd",'content':"etcd guides In this section you can find all guides related to managing etcd clusters using Flexkube.\n"});index.add({'id':18,'href':'/documentation/guides/etcd/creating-multi-member-cluster-over-ssh-using-terraform/','title':"Creating Multi Member Cluster Over Ssh Using Terraform",'content':"Creating multi-member cluster over SSH using Terraform This guide describes how to create multi member etcd cluster using Terraform and Flexkube provider. The process is very simple and requires just a few steps. If you have at least 3 members, your cluster will be able to tolerate loss on one member, so it will be highly available.\nRequirements For this guide, it is required to have at least 2 Linux machines, with Docker daemon installed and running.\nIt is recommended that machines has at least 1 GB of RAM and are fresh machines, as in tutorial the tools will write to directories like /var/lib/etcd or /etc/kubernetes without notice.\nThe Docker version should be 18.06+. You can follow Docker documentation to see how to install Docker on your machine.\nNetwork interfaces setup is not important, however having a private IP address is recommended from security perspective.\nThe machines must be able to communicate with each other.\n I don\u0026#39;t have such machines. ↕ If you don\u0026rsquo;t have such machines available, you can create it locally, using VirtualBox and Vagrant. Make sure you have both tools installed by following respective guides:\n Installing VirtualBox Installing Vagrant Once done, create file named Vagrantfile with following content:\nVagrant.configure(\u0026#34;2\u0026#34;) do |config| config.vm.box = \u0026#34;flatcar-stable\u0026#34; config.vm.box_url = \u0026#34;https://stable.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vagrant.box\u0026#34; config.ssh.username = \u0026#39;core\u0026#39; config.vm.provider :virtualbox do |v| v.memory = 1024 end config.vm.define \u0026#34;member1\u0026#34; do |config| config.vm.hostname = \u0026#34;member1\u0026#34; config.vm.network \u0026#34;private_network\u0026#34;, ip: \u0026#34;192.168.52.10\u0026#34; end config.vm.define \u0026#34;member2\u0026#34; do |config| config.vm.hostname = \u0026#34;member2\u0026#34; config.vm.network \u0026#34;private_network\u0026#34;, ip: \u0026#34;192.168.52.11\u0026#34; end end Then, run the following commands to create the machines:\nvagrant up Preparation Before we start creating a cluster, we need to gather some information and download required binaries.\nIP addresses for etcd members and SSH IP addresses of members must be known ahead of cluster creation time.\nYou can find available IP addresses on your machines using e.g. ifconfig tool.\nYou can try getting the IP address automatically using the following command:\nip addr show dev $(ip r | grep default | tr \u0026#39; \u0026#39; \\\\n | grep -A1 dev | tail -n1) | grep \u0026#39;inet \u0026#39; | awk \u0026#39;{print $2}\u0026#39; | cut -d/ -f1 Save the IP addresses of your machines, as they will be needed later on for configuration.\nIf you plan to use different IP addresses for connecting over SSH to your machines and different for members to communicate, note both of them. Downloading terraform binary For this guide, you must have terraform binary available. You can download it using the following command:\nexport VERSION=0.12.26 wget https://releases.hashicorp.com/terraform/${VERSION}/terraform_${VERSION}_linux_amd64.zip \u0026amp;\u0026amp; \\ unzip terraform_${VERSION}_linux_amd64.zip \u0026amp;\u0026amp; \\ rm terraform_0.12.26_linux_amd64.zip Downloading terraform-provider-flexkube binary Execute the following command to download flexkube CLI binary into working directory on the machine where you want to create the etcd cluster.\nexport VERSION=v0.3.0 wget -O- https://github.com/flexkube/libflexkube/releases/download/${VERSION}/terraform-provider-flexkube_${VERSION}_linux_amd64.tar.gz | tar zxvf - terraform-provider-flexkube_${VERSION}_x4 Downloading etcdctl binary (optional) To test cluster functionality, you can download etcdctl binary, however, this is optional. Also, if you use Flatcar Container Linux, the binary should be available on the system already.\nYou can download it using the following command:\nexport ETCD_VER=v3.4.9 wget https://storage.googleapis.com/etcd/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -O- | tar zxvf - etcd-${ETCD_VER}-linux-amd64/etcdctl \u0026amp;\u0026amp; mv etcd-${ETCD_VER}-linux-amd64/etcdctl ./ \u0026amp;\u0026amp; rm dir etcd-${ETCD_VER}-linux-amd64 Make downloaded binaries available in $PATH For compatibility with rest of the tutorial, you should make sure that downloaded binaries are in one of the directories in the $PATH environment variable.\nYou can also add working directory to the $PATH using the following command:\nexport PATH=\u0026#34;$(pwd):${PATH}\u0026#34; Creating the cluster Now that you have all required binaries and information, we can start creating the cluster.\nTerraform configuration First, create main.tf file with the following content:\nprovider \u0026#34;flexkube\u0026#34; { version = \u0026#34;0.3.0\u0026#34; } provider \u0026#34;local\u0026#34; { version = \u0026#34;1.4.0\u0026#34; } variable \u0026#34;members\u0026#34; { type = map(object({ peer_address = string ssh_address = string })) } variable \u0026#34;ssh_user\u0026#34; { default = \u0026#34;\u0026#34; } variable \u0026#34;ssh_password\u0026#34; { default = \u0026#34;\u0026#34; } variable \u0026#34;ssh_private_key\u0026#34; { default = \u0026#34;\u0026#34; } resource \u0026#34;flexkube_pki\u0026#34; \u0026#34;pki\u0026#34; { etcd { peers = { for name, member in var.members : name =\u0026gt; member.peer_address } servers = { for name, member in var.members : name =\u0026gt; member.peer_address } client_cns = [\u0026#34;root\u0026#34;] } } resource \u0026#34;flexkube_etcd_cluster\u0026#34; \u0026#34;etcd\u0026#34; { pki_yaml = flexkube_pki.pki.state_yaml dynamic \u0026#34;member\u0026#34; { for_each = var.members content { name = member.key peer_address = member.value.peer_address server_address = member.value.peer_address host { ssh { user = var.ssh_user password = var.ssh_password private_key = var.ssh_private_key address = member.value.ssh_address } } } } } locals { ca_cert = \u0026#34;./ca.pem\u0026#34; cert = \u0026#34;./client.pem\u0026#34; key = \u0026#34;./client.key\u0026#34; } resource \u0026#34;local_file\u0026#34; \u0026#34;etcd_ca_certificate\u0026#34; { content = flexkube_pki.pki.etcd[0].ca[0].x509_certificate filename = local.ca_cert } resource \u0026#34;local_file\u0026#34; \u0026#34;etcd_root_user_certificate\u0026#34; { content = flexkube_pki.pki.etcd[0].client_certificates[index(flexkube_pki.pki.etcd[0].client_cns, \u0026#34;root\u0026#34;)].x509_certificate filename = local.cert } resource \u0026#34;local_file\u0026#34; \u0026#34;etcd_root_user_private_key\u0026#34; { sensitive_content = flexkube_pki.pki.etcd[0].client_certificates[index(flexkube_pki.pki.etcd[0].client_cns, \u0026#34;root\u0026#34;)].private_key filename = local.key } resource \u0026#34;local_file\u0026#34; \u0026#34;etcd_environment\u0026#34; { filename = \u0026#34;./etcd.env\u0026#34; content = \u0026lt;\u0026lt;EOF#!/bin/bash export ETCDCTL_API=3 export ETCDCTL_CACERT=${abspath(local.ca_cert)} export ETCDCTL_CERT=${abspath(local.cert)} export ETCDCTL_KEY=${abspath(local.key)} export ETCDCTL_ENDPOINTS=\u0026#34;${join(\u0026#34;,\u0026#34;, formatlist(\u0026#34;https://%s:2379\u0026#34;, [for name, member in var.members : member.peer_address]))}\u0026#34; EOF depends_on = [ flexkube_etcd_cluster.etcd, ] } Terraform values Next, create file named values.auto.tfvars, which will store the values required by the Terraform configuration. The file should look like following:\nmembers = { \u0026#34;member1\u0026#34; = { peer_address = \u0026#34;192.168.52.10\u0026#34; ssh_address = \u0026#34;192.168.52.10\u0026#34; }, \u0026#34;member2\u0026#34; = { peer_address = \u0026#34;192.168.52.11\u0026#34; ssh_address = \u0026#34;192.168.52.11\u0026#34; }, } ssh_user = \u0026#34;core\u0026#34; ssh_password = \u0026#34;\u0026#34; ssh_port = 22 ssh_private_key = \u0026lt;\u0026lt;EOF EOF First, it has defined map of members, where they key is the member name and then each member has peer address and SSH address defined. peer_address will be used for etcd and ssh_address will be used to SSH into the machines.\nNext, make sure that SSH settings are correct. If the SSH key, which is authorized to log in into the machines, is loaded in your ssh-agent, you don\u0026rsquo;t need to specify any credentials. Flexkube will automatically pick it up and use it. If not, you can specify content of private key in ssh_private_key field or use password authentication using ssh_password field.\nUsing bastion host is currently not supported, though it will be in the future.\nRunning Terraform Now, to create the cluster run following commands:\nterraform init \u0026amp;\u0026amp; terraform apply If everything went successfully, you should see now running etcd container, when you execute docker ps on the machines.\nVerifying cluster functionality Now that the cluster is running, we can verify that it is functional.\nInspect created files After creating the cluster, you can find following files in the working directory, created by Terraform:\n ca.pem containing etcd CA X.509 certificate in PEM format. client.pem containing etcd client X.509 certificate in PEM format, with root Common Name. client.key RSA private key in PEM format for certificate in client.pemfile. etcd.env containing environment variables needed for etcdctl. Certificates and private key files are required to access the cluster. The etcd.env file is just a helper file for this tutorial.\nThe files can also be safely removed, as all the certificates are stored in Terraform state anyway.\nUsing etcdctl etcdctl can be used to verify that the cluster is functional and to perform some basic operations as well as administrative tasks.\nTo be able to use it, it is recommended to set environment variables, pointing to the certificates and cluster members, so they don\u0026rsquo;t have to be repeated for each command.\nWith this guide, you get etcd.env helper file created, from which you can load the environment variables, using following command:\nsource etcd.env Now etcdctl is ready to use.\nTo check if cluster is healthy, execute the following command:\netcdctl endpoint health What\u0026rsquo;s next With cluster running, you can now start using it, e.g. to deploy Kubernetes cluster. To do that using Flexkube and Terraform, you can follow Creating multi node Kubernetes cluster using Terraform.\nTo clean up created resources, see the section below.\nCleaning up First step of removing the cluster is running Terraform, to remove all containers. To perform that, run this command:\nterraform destroy Once finished, you can remove the directories created by the cluster, using the following command on the machines:\nsudo rm -rf /var/lib/etcd/ /etc/kubernetes/ "});index.add({'id':19,'href':'/documentation/guides/etcd/creating-single-member-cluster-on-local-machine-using-flexkube-cli/','title':"Creating Single Member Cluster on Local Machine Using Flexkube Cli",'content':"Creating single-node etcd cluster on local machine using \u0026ldquo;flexkube\u0026rdquo; CLI This guide describes how to create single member etcd cluster using flexkube CLI. It will explain cluster creation process step by step to explain the configuration and provide some insights.\nFor fully automated creation, see Creating single-member etcd cluster on local machine using Terraform.\nRequirements For this guide, it is required to have one Linux machine, with Docker daemon installed and running.\nIt is recommended that machine has at least 1 GB of RAM and is a fresh machine, as in tutorial the tools will write to directories like /var/lib/etcd or /etc/kubernetes without notice.\nThe Docker version should be 18.06+. You can follow Docker documentation to see how to install Docker on your machine.\nNetwork interfaces setup is not important, however having a private IP address is recommended from security perspective.\n I don\u0026#39;t have such machine. ↕ If you don\u0026rsquo;t have such machine available, you can create it locally, using VirtualBox and Vagrant. Make sure you have both tools installed by following respective guides:\n Installing VirtualBox Installing Vagrant Once done, create file named Vagrantfile with following content:\nVagrant.configure(\u0026#34;2\u0026#34;) do |config| config.vm.box = \u0026#34;flatcar-stable\u0026#34; config.vm.box_url = \u0026#34;https://stable.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vagrant.box\u0026#34; config.ssh.username = \u0026#39;core\u0026#39; config.vm.provider :virtualbox do |v| v.memory = 1024 end end Then, run the following commands to create and connect to the machine:\nvagrant up \u0026amp;\u0026amp; vagrant ssh Preparation Before we start creating a cluster, we need to gather some information and download required binaries.\nLog in into the machine where you want to deploy etcd before proceeding.\nIP address for etcd member IP addresses of members must be known ahead of cluster creation time.\nYou can find available IP addresses on your machine using e.g. ifconfig tool.\nYou can try getting the IP address automatically using the following command:\nexport IP=$(ip addr show dev $(ip r | grep default | tr \u0026#39; \u0026#39; \\\\n | grep -A1 dev | tail -n1) | grep \u0026#39;inet \u0026#39; | awk \u0026#39;{print $2}\u0026#39; | cut -d/ -f1); echo $IP On VirtualBox, we can use 10.0.2.15 IP.\nSave the IP address for future use using the following command:\nexport IP=10.0.2.15 Downloading flexkube binary Once logged in, execute the following command to download flexkube CLI binary into working directory. This is the binary, which will be used to create a cluster components.\nexport VERSION=v0.3.0 wget -O- https://github.com/flexkube/libflexkube/releases/download/${VERSION}/flexkube_${VERSION}_linux_amd64.tar.gz | tar zxvf - Downloading etcdctl binary (optional) To test cluster functionality, you can download etcdctl binary, however, this is optional. Also, if you use Flatcar Container Linux, the binary should be available on the system already.\nYou can download it using the following command:\nexport ETCD_VER=v3.4.9 wget https://storage.googleapis.com/etcd/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -O- | tar zxvf - etcd-${ETCD_VER}-linux-amd64/etcdctl \u0026amp;\u0026amp; mv etcd-${ETCD_VER}-linux-amd64/etcdctl ./ \u0026amp;\u0026amp; rm dir etcd-${ETCD_VER}-linux-amd64 Make downloaded binaries available in $PATH For compatibility with rest of the tutorial, you should make sure that downloaded binaries are in one of the directories in the $PATH environment variable.\nYou can also add working directory to the $PATH using the following command:\nexport PATH=\u0026#34;$(pwd):${PATH}\u0026#34; Checking Docker availability To avoid runtime issues while running flexkube, run the following command to ensure, that Docker is running and is accessible on your machine:\ndocker ps Creating the cluster Now that you have all required binaries and information, we can start creating the cluster.\nCreating certificates First step to create a cluster is to generate all certificates required by etcd. For that, we will use Flexkube PKI resource.\nBefore we create the certificates, we need to provide some configuration to tell PKI resource to create etcd certificates, as by default it only creates Root CA certificate.\nFor this guide, you can create configuration using the following command:\ncat \u0026lt;\u0026lt;EOF | sed \u0026#39;/^$/d\u0026#39; \u0026gt; config.yaml pki: etcd: peers: member1: ${IP} EOF See PKI configuration reference to see all available configuration options. In the following example, we use member1 as a etcd member name. There is no strict convention about the names. etcd documentation suggests to use the hostname or machine-id, which might be a good choice, if you plan to run only one member on a single machine. Please note, that changing the member name here must also be reflected in next steps of the tutorial.\nOnce created, run the following command to generate the certificates:\nflexkube pki If everything succeeded, you should find many certificates in newly created state.yaml file.\nYou can inspect state.yaml file using the following command:\nless state.yaml In there, you should find etcd CA certificate and private key, peer and server certificates and private keys for all members we defined (in this tutorial only member1) and root CA certificate with private key.\nThe certificates properties are generated with accordance with Kubernetes PKI certificates and requirements.\n Creating etcd cluster With certificates ready, we can now create etcd cluster using etcd resource.\nTo create etcd cluster, we need to configure it\u0026rsquo;s members in config.yaml file. This can be done using the following command:\ncat \u0026lt;\u0026lt;EOF \u0026gt;\u0026gt; config.yaml etcd: members: member1: peerAddress: ${IP} EOF See etcd configuration reference to see all available configuration options. Now, you can run the following command to create etcd cluster:\nflexkube etcd When you execute this command, it will print you the list of the containers which will be created and ask you to confirm it.\nYou can also run it with --yes flag, to skip the confirmation.\nAfter confirmation, flexkube binary will by default talk to Docker runtime over UNIX socket on local host and create desired containers.\nOn consecutive runs, flexkube will first check the state of the created containers and then, it there is any update pending (e.g. container image update), it will again show you the diff which will be applied and ask you for confirmation.\nYou can also run it with --noop to only see if there are some updates pending.\n Once finished, if you run docker ps, you should see etcd container running.\nInspecting state.yaml file (optional) With etcd cluster created, state about running containers will be stored in state.yaml file. Storing state is needed to calculate configuration updates in the future runs and also to allow cleaning up created containers.\nYou can have a look into state file using the following command:\nless state.yaml In there, you can find list of all containers which has been created, their configuration files, flags and on which host and using which container runtime they has been created. This is useful if you want to inspect the configuration of created containers.\nVerifying cluster functionality To verify that the cluster is healthy, we will use etcd member certificates itself and previously downloaded etcdctl binary.\nFirst, we need to prepare the environment variables used by etcdctl to define how to authenticate to cluster. This can be done using the following commands:\nexport ETCDCTL_API=3 export ETCDCTL_CACERT=/etc/kubernetes/etcd/ca.crt export ETCDCTL_CERT=/etc/kubernetes/etcd/peer.crt export ETCDCTL_KEY=/etc/kubernetes/etcd/peer.key export ETCDCTL_ENDPOINTS=\u0026#34;https://10.0.2.15:2379\u0026#34; Now, we check if all endpoints are healthy, using this command:\nsudo -E etcdctl endpoint health We use sudo, as created certificate files are only readable by root user. If you are using root user already or you don\u0026rsquo;t want to use sudo, you can extract client certificates from state.yaml file. What\u0026rsquo;s next With cluster running, you can now start using it, e.g. to deploy Kubernetes cluster. To do that using Flexkube and Terraform, you can follow Creating single node Kubernetes cluster on local machine using Terraform.\nTo clean up created resources, see the section below.\nCleaning up To clean up the host, first, rename or remove config.yaml file, so CLI will be able to clean up the resources. For example, execute:\nmv config.yaml config.yaml.old Now you can remove all containers managed by flexkube using following commands:\nflexkube etcd Finally, following directories can be removed as well:\nsudo rm -rf /etc/kubernetes/ /var/lib/etcd/ "});index.add({'id':20,'href':'/documentation/guides/etcd/creating-single-member-cluster-on-local-machine-using-terraform/','title':"Creating Single Member Cluster on Local Machine Using Terraform",'content':"Creating single-member cluster on local machine using Terraform This guide describes how to create single member etcd cluster using Terraform and Flexkube provider. The process is very simple and requires just a few steps.\nFor more detailed guide, see Creating single member etcd cluster on local machine using flexkube CLI.\nRequirements For this guide, it is required to have one Linux machine, with Docker daemon installed and running.\nIt is recommended that machine has at least 1 GB of RAM and is a fresh machine, as in tutorial the tools will write to directories like /var/lib/etcd or /etc/kubernetes without notice.\nThe Docker version should be 18.06+. You can follow Docker documentation to see how to install Docker on your machine.\nNetwork interfaces setup is not important, however having a private IP address is recommended from security perspective.\n I don\u0026#39;t have such machine. ↕ If you don\u0026rsquo;t have such machine available, you can create it locally, using VirtualBox and Vagrant. Make sure you have both tools installed by following respective guides:\n Installing VirtualBox Installing Vagrant Once done, create file named Vagrantfile with following content:\nVagrant.configure(\u0026#34;2\u0026#34;) do |config| config.vm.box = \u0026#34;flatcar-stable\u0026#34; config.vm.box_url = \u0026#34;https://stable.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vagrant.box\u0026#34; config.ssh.username = \u0026#39;core\u0026#39; config.vm.provider :virtualbox do |v| v.memory = 1024 end end Then, run the following commands to create and connect to the machine:\nvagrant up \u0026amp;\u0026amp; vagrant ssh Preparation Before we start creating a cluster, we need to gather some information and download required binaries.\nLog in into the machine where you want to deploy etcd before proceeding.\nIP address for etcd member IP addresses of members must be known ahead of cluster creation time.\nYou can find available IP addresses on your machine using e.g. ifconfig tool.\nYou can try getting the IP address automatically using the following command:\nexport TF_VAR_ip=$(ip addr show dev $(ip r | grep default | tr \u0026#39; \u0026#39; \\\\n | grep -A1 dev | tail -n1) | grep \u0026#39;inet \u0026#39; | awk \u0026#39;{print $2}\u0026#39; | cut -d/ -f1); echo $TF_VAR_ip On VirtualBox, we can use 10.0.2.15 IP.\nSave the IP address for future use using the following command:\nexport TF_VAR_ip=10.0.2.15 Downloading terraform binary For this guide, you must have terraform binary available. You can download it using the following command:\nexport VERSION=0.12.26 wget https://releases.hashicorp.com/terraform/${VERSION}/terraform_${VERSION}_linux_amd64.zip \u0026amp;\u0026amp; \\ unzip terraform_${VERSION}_linux_amd64.zip \u0026amp;\u0026amp; \\ rm terraform_0.12.26_linux_amd64.zip Downloading terraform-provider-flexkube binary Execute the following command to download flexkube CLI binary into working directory on the machine where you want to create the etcd cluster.\nexport VERSION=v0.3.0 wget -O- https://github.com/flexkube/libflexkube/releases/download/${VERSION}/terraform-provider-flexkube_${VERSION}_linux_amd64.tar.gz | tar zxvf - terraform-provider-flexkube_${VERSION}_x4 Downloading etcdctl binary (optional) To test cluster functionality, you can download etcdctl binary, however, this is optional. Also, if you use Flatcar Container Linux, the binary should be available on the system already.\nYou can download it using the following command:\nexport ETCD_VER=v3.4.9 wget https://storage.googleapis.com/etcd/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -O- | tar zxvf - etcd-${ETCD_VER}-linux-amd64/etcdctl \u0026amp;\u0026amp; mv etcd-${ETCD_VER}-linux-amd64/etcdctl ./ \u0026amp;\u0026amp; rm dir etcd-${ETCD_VER}-linux-amd64 Make downloaded binaries available in $PATH For compatibility with rest of the tutorial, you should make sure that downloaded binaries are in one of the directories in the $PATH environment variable.\nYou can also add working directory to the $PATH using the following command:\nexport PATH=\u0026#34;$(pwd):${PATH}\u0026#34; Creating the cluster Now that you have all required binaries and information, we can start creating the cluster.\nCreate main.tf file with the following content:\nprovider \u0026#34;flexkube\u0026#34; { version = \u0026#34;0.3.0\u0026#34; } provider \u0026#34;local\u0026#34; { version = \u0026#34;1.4.0\u0026#34; } variable \u0026#34;ip\u0026#34; {} variable \u0026#34;name\u0026#34; { default = \u0026#34;member01\u0026#34; } resource \u0026#34;flexkube_pki\u0026#34; \u0026#34;pki\u0026#34; { etcd { peers = { \u0026#34;${var.name}\u0026#34; = var.ip } servers = { \u0026#34;${var.name}\u0026#34; = var.ip } client_cns = [\u0026#34;root\u0026#34;] } } resource \u0026#34;flexkube_etcd_cluster\u0026#34; \u0026#34;etcd\u0026#34; { pki_yaml = flexkube_pki.pki.state_yaml member { name = var.name peer_address = var.ip server_address = var.ip } } locals { ca_cert = \u0026#34;./ca.pem\u0026#34; cert = \u0026#34;./client.pem\u0026#34; key = \u0026#34;./client.key\u0026#34; } resource \u0026#34;local_file\u0026#34; \u0026#34;etcd_ca_certificate\u0026#34; { content = flexkube_pki.pki.etcd[0].ca[0].x509_certificate filename = local.ca_cert } resource \u0026#34;local_file\u0026#34; \u0026#34;etcd_root_user_certificate\u0026#34; { content = flexkube_pki.pki.etcd[0].client_certificates[index(flexkube_pki.pki.etcd[0].client_cns, \u0026#34;root\u0026#34;)].x509_certificate filename = local.cert } resource \u0026#34;local_file\u0026#34; \u0026#34;etcd_root_user_private_key\u0026#34; { sensitive_content = flexkube_pki.pki.etcd[0].client_certificates[index(flexkube_pki.pki.etcd[0].client_cns, \u0026#34;root\u0026#34;)].private_key filename = local.key } resource \u0026#34;local_file\u0026#34; \u0026#34;etcd_environment\u0026#34; { filename = \u0026#34;./etcd.env\u0026#34; content = \u0026lt;\u0026lt;EOF#!/bin/bash export ETCDCTL_API=3 export ETCDCTL_CACERT=${abspath(local.ca_cert)} export ETCDCTL_CERT=${abspath(local.cert)} export ETCDCTL_KEY=${abspath(local.key)} export ETCDCTL_ENDPOINTS=\u0026#34;https://${var.ip}:2379\u0026#34; EOF depends_on = [ flexkube_etcd_cluster.etcd, ] } Now, to create the cluster run following commands:\nterraform init \u0026amp;\u0026amp; terraform apply Terraform should pick up the IP address automatically, if you exported it to TF_VAR_ip environment variable.\nIf everything went successfully, you should see now running etcd container, when you execute docker ps.\nVerifying cluster functionality Now that the cluster is running, we can verify that it is functional.\nInspect created files After creating the cluster, you can find following files in the working directory, created by Terraform:\n ca.pem containing etcd CA X.509 certificate in PEM format. client.pem containing etcd client X.509 certificate in PEM format, with root Common Name. client.key RSA private key in PEM format for certificate in client.pemfile. etcd.env containing environment variables needed for etcdctl. Certificates and private key files are required to access the cluster. The etcd.env file is just a helper file for this tutorial.\nThe files can also be safely removed, as all the certificates are stored in Terraform state anyway.\nUsing etcdctl etcdctl can be used to verify that the cluster is functional and to perform some basic operations as well as administrative tasks.\nTo be able to use it, it is recommended to set environment variables, pointing to the certificates and cluster members, so they don\u0026rsquo;t have to be repeated for each command.\nWith this guide, you get etcd.env helper file created, from which you can load the environment variables, using following command:\nsource etcd.env Now etcdctl is ready to use.\nTo check if cluster is healthy, execute the following command:\netcdctl endpoint health What\u0026rsquo;s next With cluster running, you can now start using it, e.g. to deploy Kubernetes cluster. To do that using Flexkube and Terraform, you can follow Creating single node Kubernetes cluster on local machine using Terraform.\nTo clean up created resources, see the section below.\nCleaning up First step of removing the cluster is running Terraform, to remove all containers. To perform that, run this command:\nterraform destroy Once finished, you can remove the directories created by the cluster, using the following command:\nsudo rm -rf /var/lib/etcd/ /etc/kubernetes/ "});index.add({'id':21,'href':'/documentation/guides/kubernetes/','title':"Kubernetes",'content':"Kubernetes guides "});index.add({'id':22,'href':'/documentation/guides/kubernetes/creating-multi-node-cluster-using-terraform/','title':"Creating Multi Node Cluster Using Terraform",'content':"Creating multi-node cluster using Terraform "});index.add({'id':23,'href':'/documentation/guides/kubernetes/creating-single-node-cluster-on-local-machine-using-flexkube-cli/','title':"Creating Single Node Cluster on Local Machine Using Flexkube Cli",'content':"Creating single-node cluster on local machine using \u0026ldquo;flexkube\u0026rdquo; CLI This guide describes how to create single node Kubernetes cluster using flexkube CLI. It will explain cluster creation process step by step to explain the configuration and provide some insights.\nFor fully automated creation, see Creating single-node Kubernetes cluster on local machine using Terraform.\nRequirements For this guide, it is required to have one Linux machine, with Docker daemon installed and running.\nIt is recommended that machine has at least 2 GB of RAM and is a fresh machine, as in tutorial the tools will write to directories like /etc/kubernetes or /var/lib/kubelet without notice.\nThe Docker version should be 18.06+.\nNetwork interfaces setup is not important, however having a private IP address is recommended from security perspective.\n I don\u0026#39;t have such machine. ↕ If you don\u0026rsquo;t have such machine available, you can create it locally, using VirtualBox and Vagrant. Make sure you have both tools installed by following respective guides:\n Installing VirtualBox Installing Vagrant Once done, create file named Vagrantfile with following content:\nVagrant.configure(\u0026#34;2\u0026#34;) do |config| config.vm.box = \u0026#34;flatcar-stable\u0026#34; config.vm.box_url = \u0026#34;https://stable.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vagrant.box\u0026#34; config.ssh.username = \u0026#39;core\u0026#39; config.vm.provider :virtualbox do |v| v.memory = 2048 end end Then, run the following commands to create and connect to the machine:\nvagrant up \u0026amp;\u0026amp; vagrant ssh Preparation Before we start creating a cluster, we need to gather some information and download required binaries.\nLog in into the machine where you want to deploy Kubernetes before proceeding.\nIP address for deployment To configure cluster components, you need to provide the IP address, which will be used by the cluster. You can find available IP addresses using e.g. ifconfig command.\nYou can try getting the IP address automatically using the following command:\nexport IP=$(ip addr show dev $(ip r | grep default | tr \u0026#39; \u0026#39; \\\\n | grep -A1 dev | tail -n1) | grep \u0026#39;inet \u0026#39; | awk \u0026#39;{print $2}\u0026#39; | cut -d/ -f1); echo $IP On VirtualBox, we can use 10.0.2.15 IP.\nSave the IP address for future use using the following command:\nexport IP=10.0.2.15 Selecting service CIDR and pod CIDR Kubernetes requires 2 network CIDRs to operate, one from each pod will receive the IP address and one for Service objects with type ClusterIP. While selecting the CIDRs, make sure they don\u0026rsquo;t overlap with each other and other networks your machine is connected to.\nOnce decided on CIDRs, we should also save 2 special IP addresses:\n kubernetes Service - This IP address will be used by pods which talk to Kubernetes API. It must be included in kube-apiserver server certificate IP addresses list. This must be first address of Service CIDR. So if your service CIDR is 11.0.0.0/24, it should be 11.0.0.1. DNS Service - This IP address will be used by cluster\u0026rsquo;s DNS service. This IP is usually 10th address of Service CIDR. So if your service CIDR is 11.0.0.0/24, it should be 11.0.0.10. With all this information gathered, you command like this to save this information for later use:\nexport POD_CIDR=10.0.0.0/24 export SERVICE_CIDR=11.0.0.0/24 export KUBERNETES_SERVICE_IP=11.0.0.1 export DNS_SERVICE_IP=11.0.0.10 Downloading flexkube binary Once logged in, execute the following command to download flexkube CLI binary into working directory. This is the binary, which will be used to create a cluster components.\nexport VERSION=v0.3.0 wget -O- https://github.com/flexkube/libflexkube/releases/download/${VERSION}/flexkube_${VERSION}_linux_amd64.tar.gz | tar zxvf - Downloading kubectl binary To verify that cluster is operational it is recommended to have kubectl binary available. You can install it using the following command:\ncurl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl \u0026amp;\u0026amp; chmod +x kubectl Downloading helm binary Parts of cluster provisioning is done using Helm 3 binary, when deploying the cluster using the flexkube CLI. You can install it using the following command:\nwget -O- https://get.helm.sh/helm-v3.2.1-linux-amd64.tar.gz | tar -zxvf - linux-amd64/helm \u0026amp;\u0026amp; mv linux-amd64/helm ./ \u0026amp;\u0026amp; rmdir linux-amd64 Make downloaded binaries available in $PATH For compatibility with rest of the tutorial, you should make sure that downloaded binaries are in one of the directories in the $PATH environment variable.\nYou can also add working directory to the $PATH using the following command:\nexport PATH=\u0026#34;$(pwd):${PATH}\u0026#34; Creating the cluster Now that you have all required binaries and information, we can start creating the cluster.\nCreating certificates First step to create a cluster is to generate all certificates required by Kubernetes. As this is not a trivial task to create and manage those certificates, Flexkube provides PKI resource, which does exactly that.\nBefore we create the certificates, we need to provide some configuration to tell PKI resource to create for you both etcd and Kubernetes certificates, as by default it only creates Root CA certificate.\nFor this guide, you can create configuration using the following command:\ncat \u0026lt;\u0026lt;EOF | sed \u0026#39;/^$/d\u0026#39; \u0026gt; config.yaml pki: etcd: clientCNs: - kube-apiserver peers: testing: ${IP} kubernetes: kubeAPIServer: serverIPs: - ${IP} - ${KUBERNETES_SERVICE_IP} EOF See PKI configuration reference to see all available configuration options. Once created, run the following command to generate the certificates:\nflexkube pki If everything succeeded, you should find many certificates in newly created state.yaml file.\nCreating etcd cluster Before we start Kubernetes containers, we need etcd cluster. Flexkube provides etcd resource to manage such clusters.\nTo create etcd cluster, we need to configure it\u0026rsquo;s members in config.yaml file. This can be done using the following command:\ncat \u0026lt;\u0026lt;EOF \u0026gt;\u0026gt; config.yaml etcd: members: testing: peerAddress: ${IP} EOF See etcd configuration reference to see all available configuration options. Now, you can run the following command to create etcd cluster:\nflexkube etcd Once finished, you should see etcd container running, if you run docker ps.\nCreating static Kubernetes controlplane With etcd running, you can now create static Kubernetes controlplane. Static, as Flexkube recommends to run Kubernetes controlplane self-hosted, so managed using Kubernetes itself. However, before this can be done, temporary, or static controlplane is needed. And this is exactly what Controlplane resource provides.\nYou can configure it by running the following command:\ncat \u0026lt;\u0026lt;EOF \u0026gt;\u0026gt; config.yaml controlplane: apiServerAddress: ${IP} apiServerPort: 6443 kubeAPIServer: serviceCIDR: ${SERVICE_CIDR} etcdServers: - https://${IP}:2379 kubeControllerManager: flexVolumePluginDir: /var/lib/kubelet/volumeplugins EOF See Controlplane configuration reference to see all available configuration options. Now, you can create Kubernetes controlplane using the following command:\nflexkube controlplane Execution can take a while, as Kubernetes docker images must be now pulled.\nOnce finished, you should see 3 new containers running when you run docker ps.\nGetting kubeconfig file Even though the cluster has no objects or deployments yet, you should be able to access it already. For that, you need kubeconfig file. flexkube CLI provides flexkube kubeconfig command, which will read information about the cluster from configuration and state files and print it to you.\nTo generate kubeconfig file, run the following command:\nflexkube kubeconfig | grep -v \u0026#34;Trying to read\u0026#34; \u0026gt; kubeconfig kubeconfig file should be created.\nNow, you need to configure Kubernetes clients to use this file. This can be done using the following command:\nexport KUBECONFIG=$(pwd)/kubeconfig You can now run kubectl version to verify, that the cluster is accessible.\nAdding nodes to the cluster Having a cluster without nodes is not very useful. This section describes how to add nodes to your cluster.\nCreating TLS bootstrapping RBAC rules and bootstrap tokens Flexkube requires TLS bootstrapping process to be used while adding new nodes to the cluster. To enable that, extra RBAC rules must be created before nodes tries to join the cluster.\nThis step is handled by tls-bootstrapping helm chart, which creates RBAC rules and allows to create bootstrap tokens.\nFirst, we need to generate bootstrap token, which will be used in next steps. You can do it by running the following commands:\nexport TOKEN_ID=$(cat /dev/urandom | tr -dc \u0026#39;a-z0-9\u0026#39; | fold -w 6 | head -n 1) export TOKEN_SECRET=$(cat /dev/urandom | tr -dc \u0026#39;a-z0-9\u0026#39; | fold -w 16 | head -n 1) Then, install the chart to create RBAC rules and bootstrap token, by running this command:\nhelm upgrade --install -n kube-system tls-bootstrapping flexkube/tls-bootstrapping --set tokens[0].token-id=$TOKEN_ID --set tokens[0].token-secret=$TOKEN_SECRET Creating kubelet pool With Flexkube, kubelets are managed in pools by Kubelet Pool resource. This allows to group them to share the configuration. Usually clusters have one group called controllers which runs controlplane components and one or more worker pools, which might characterize with e.g. different hardware.\nFor this tutorial, we will just create single pool default.\nYou can configure this pool by running the following command:\ncat \u0026lt;\u0026lt;EOF \u0026gt;\u0026gt; config.yaml kubeletPools: default: bootstrapConfig: token: ${TOKEN_ID}.${TOKEN_SECRET} server: ${IP}:6443 adminConfig: server: ${IP}:6443 privilegedLabels: node-role.kubernetes.io/master: \u0026#34;\u0026#34; volumePluginDir: /var/lib/kubelet/volumeplugins kubelets: - name: testing address: ${IP} EOF See Kubelet pool configuration reference to see all available configuration options. Now, to create default pool, run the following command:\nflexkube kubelet-pool default Once finished, you should see that node testing has been added to the cluster by running kubectl get nodes.\nInstalling CNI, CoreDNS and other packages Now that you have cluster running with nodes, you need to install some extra packages to make the cluster fully functional.\nAdding helm repositories Before proceeding, make sure you have stable and flexkube Helm repositories configured, as it is the recommended source for installing the charts mentioned in next sections. You can add required repositories by running the following commands:\nhelm repo add stable https://kubernetes-charts.storage.googleapis.com/ helm repo add flexkube https://flexkube.github.io/charts/ Installing kube-proxy kube-proxy is not required for bare Kubernetes cluster, so it can be fully managed using Kubernetes itself.\nkube-proxy handles load balancing traffic to service CIDR in the cluster.\nTo install it, run the following command:\nhelm upgrade --install -n kube-system kube-proxy flexkube/kube-proxy --set \u0026#34;podCIDR=${POD_CIDR}\u0026#34; --set apiServers=\u0026#34;{${IP}:6443}\u0026#34; Installing Calico chart as CNI plugin While not necessarily required for this guide, as we only run one node, it is recommended to install some CNI plugin on the cluster, as without that, kubelet will stay in NotReady state.\nFlexkube recommends using Calico as a CNI plugin, as it works on variety of platforms and provides both IPAM and NetworkPolicies implementation. Flexkube also provides calico helm chart, so Calico installation can be easily configured and managed.\nTo install it, run the following command:\nhelm upgrade --install -n kube-system calico flexkube/calico --set flexVolumePluginDir=/var/lib/kubelet/volumeplugins --set podCIDR=$POD_CIDR We specify flexVolumePluginDir, as default path is on /usr partition, which is read-only in Flatcar Container Linux. Installing CoreDNS as Cluster DNS To provide DNS resolving for pods and service names it is recommended to run CoreDNS on your cluster. It can be installed from upstream Helm chart.\nTo install it, run the following command:\nhelm upgrade --install -n kube-system coredns stable/coredns --set rbac.pspEnable=true --set service.ClusterIP=$DNS_SERVICE_IP Installing kubelet-rubber-stamp As part of kubelet TLS bootstrapping process, kubelet requests serving certificate from Kubernetes API, to be able to use it for serving logs and metrics securely to kube-apiserver.\nAt the time of writing, kube-controller-manager does not approve those certificates and 3rd party controller needs to be used to automate this process. This is what kubelet-rubber-stamp does.\nIt can be installed by running the following command:\nhelm upgrade --install -n kube-system kubelet-rubber-stamp flexkube/kubelet-rubber-stamp Verifying cluster functionality Now your cluster is ready to use. Go ahead and try deploying some application on it. Please keep following things in mind, while using the cluster:\n Service of type LoadBalancer won\u0026rsquo;t get the IP address, as there is no controller, which could assign it. The cluster has Pod Security Policies enabled by default. Make sure your deployment ships the PSP. There is no storage provider on the cluster, so pods requesting PVCs will be stuck in pending state. Cleaning up To clean up the host, first, uninstall all helm releases, so kubelet removes all the pods. This can be done using the following command:\nhelm uninstall -n kube-system calico coredns kube-proxy kubelet-rubber-stamp tls-bootstrapping Then, rename or remove config.yaml file, so CLI will be able to clean up the resources. For example, execute:\nmv config.yaml config.yaml.old Now you can remove all containers managed by flexkube using following commands:\nflexkube kubelet-pool default flexkube controlplane flexkube etcd Finally, following directories can be removed as well:\nsudo rm -rf /etc/kubernetes/ /var/lib/etcd/ /var/lib/kubelet/ /var/lib/calico/ What\u0026rsquo;s next This guide explains, how to create a cluster using flexkube CLI, which explains every step and provides insights, but might be time consuming and error-prone. For fully automated installation, see \u0026ldquo;Creating single-node Kubernetes cluster on local machine using Terraform\u0026rdquo;.\nIf you want to deploy the cluster to remote machine(s), which also supports HA controlplane, see \u0026ldquo;Creating multi-node cluster using Terraform\u0026rdquo;.\n"});index.add({'id':24,'href':'/documentation/guides/kubernetes/creating-single-node-cluster-on-local-machine-using-terraform/','title':"Creating Single Node Cluster on Local Machine Using Terraform",'content':"Creating single-node cluster on local machine using Terraform "});index.add({'id':25,'href':'/documentation/helm-charts/','title':"Helm Charts",'content':"Helm Charts Resources provided by Flexkube only allow to run minimal Kubernetes cluster, without many essential services like kube-proxy, CoreDNS or Network Plugin. However, those processes can be easily managed using Kubernetes itself, which allows to manage them as any other Kubernetes workload.\nIt is also recommended to run Kubernetes control plane components (kube-apiserver, kube-scheduler etc.) as Kubernetes workloads, as this allows easy integration with metrics collection, centralized logging, auto-scaling etc.\nThe recommended way of installing remaining components is trough helm 3.x, which no longer require Tiller for operating. This allows installing Helm charts directly into the Kubernetes temporary control plane.\nUpstream charts Following charts can be used directly from upstream and it is recommended to install them on every cluster:\n coredns - provides Cluster DNS service metrics-server - provides API for Pods and Nodes metrics, which is required by kubectl top command and auto-scaling Those charts can be installed from the stable repository e.g. using the following command:\nhelm repo add stable https://kubernetes-charts.storage.googleapis.com/ \u0026amp;\u0026amp; \\ helm install -n kube-system coredns stable/coredns Flexkube charts For the charts, which are not available in upstream projects, Flexkube maintains it\u0026rsquo;s own charts and provides user a repository, from where the charts can be deployed. Here is the list of charts provided by Flexkube:\n kubernetes - provides kube-proxy, kube-scheduler, kube-controller-manager, extra roles etc. kube-apiserver - provides kube-apiserver, separately from other Kubernetes components to be able to enforce Kubernetes version skew policy calico - provides Calico CNI kubelet-rubber-stamp - provides daemon, which approves Kubelet serving certificates, which is not done by kube-controller-manager as for other Kubelet certificates Those charts can be installed from the flexkube repository e.g. using the following command:\nhelm repo add flexkube https://flexkube.github.io/charts/ \u0026amp;\u0026amp; \\ helm install -n kube-system calico flexkube/calico "});index.add({'id':26,'href':'/documentation/helm-charts/maintained/','title':"Maintained",'content':""});index.add({'id':27,'href':'/documentation/helm-charts/maintained/calico/','title':"Calico",'content':""});index.add({'id':28,'href':'/documentation/helm-charts/maintained/kube-apiserver/','title':"Kube Apiserver",'content':""});index.add({'id':29,'href':'/documentation/helm-charts/maintained/kube-proxy/','title':"Kube Proxy",'content':""});index.add({'id':30,'href':'/documentation/helm-charts/maintained/kubelet-rubber-stamp/','title':"Kubelet Rubber Stamp",'content':""});index.add({'id':31,'href':'/documentation/helm-charts/maintained/kubernetes/','title':"Kubernetes",'content':""});index.add({'id':32,'href':'/documentation/helm-charts/maintained/tls-bootstrapping/','title':"Tls Bootstrapping",'content':""});index.add({'id':33,'href':'/documentation/helm-charts/upstream/','title':"Upstream",'content':""});index.add({'id':34,'href':'/documentation/helm-charts/upstream/stable-coredns/','title':"Stable Coredns",'content':""});index.add({'id':35,'href':'/documentation/helm-charts/upstream/stable-metrics-server/','title':"Stable Metrics Server",'content':""});index.add({'id':36,'href':'/documentation/overview/','title':"Overview",'content':"Overview "});index.add({'id':37,'href':'/documentation/project-status/','title':"Project Status",'content':""});index.add({'id':38,'href':'/documentation/reference/','title':"Reference",'content':"Reference This section includes the reference documentation for the Flexkube Go API, Terraform provider, CLI and configuration options:\n CLI - For people interested in using flexkube CLI Terraform - For people interested in using Flexkube using Terraform. Go - For people interested in using libflexkube in other Go projects. "});index.add({'id':39,'href':'/documentation/reference/cli/','title':"Cli",'content':"Flexkube CLI (flexkube) This section includes the reference documentation for the Flexkube CLI (flexkube), it\u0026rsquo;s subcommands and flags and configuration syntax and options.\n"});index.add({'id':40,'href':'/documentation/reference/cli/commands/','title':"Commands",'content':""});index.add({'id':41,'href':'/documentation/reference/cli/commands/apiloadbalancer-pool/','title':"Apiloadbalancer Pool",'content':""});index.add({'id':42,'href':'/documentation/reference/cli/commands/containers/','title':"Containers",'content':""});index.add({'id':43,'href':'/documentation/reference/cli/commands/controlplane/','title':"Controlplane",'content':""});index.add({'id':44,'href':'/documentation/reference/cli/commands/etcd/','title':"Etcd",'content':""});index.add({'id':45,'href':'/documentation/reference/cli/commands/kubeconfig/','title':"Kubeconfig",'content':""});index.add({'id':46,'href':'/documentation/reference/cli/commands/kubelet-pool/','title':"Kubelet Pool",'content':""});index.add({'id':47,'href':'/documentation/reference/cli/commands/pki/','title':"Pki",'content':""});index.add({'id':48,'href':'/documentation/reference/cli/configuration/','title':"Configuration",'content':""});index.add({'id':49,'href':'/documentation/reference/cli/configuration/apiloadbalancer-pool/','title':"Apiloadbalancer Pool",'content':""});index.add({'id':50,'href':'/documentation/reference/cli/configuration/containers/','title':"Containers",'content':""});index.add({'id':51,'href':'/documentation/reference/cli/configuration/controlplane/','title':"Controlplane",'content':""});index.add({'id':52,'href':'/documentation/reference/cli/configuration/etcd/','title':"Etcd",'content':""});index.add({'id':53,'href':'/documentation/reference/cli/configuration/kubelet-pool/','title':"Kubelet Pool",'content':""});index.add({'id':54,'href':'/documentation/reference/cli/configuration/pki/','title':"Pki",'content':""});index.add({'id':55,'href':'/documentation/reference/go/','title':"Go",'content':"Go For Go language reference documentation, see https://godoc.org/github.com/flexkube/libflexkube.\n"});index.add({'id':56,'href':'/documentation/reference/helm-charts/','title':"Helm Charts",'content':""});index.add({'id':57,'href':'/documentation/reference/helm-charts/calico/','title':"Calico",'content':""});index.add({'id':58,'href':'/documentation/reference/helm-charts/kube-apiserver/','title':"Kube Apiserver",'content':""});index.add({'id':59,'href':'/documentation/reference/helm-charts/kubelet-rubber-stamp/','title':"Kubelet Rubber Stamp",'content':""});index.add({'id':60,'href':'/documentation/reference/helm-charts/kubernetes/','title':"Kubernetes",'content':""});index.add({'id':61,'href':'/documentation/reference/helm-charts/tls-bootstrapping/','title':"Tls Bootstrapping",'content':""});index.add({'id':62,'href':'/documentation/reference/terraform/','title':"Terraform",'content':"Terraform This page contains reference documentation for all Terraform resources provided by Flexkube.\n"});index.add({'id':63,'href':'/documentation/reference/terraform/flexkube-provider/','title':"Flexkube Provider",'content':"Flexkube Provider The Flexkube (flexkube) provider is used to interact with the resources supported by libflexkube. The provider itself do not require any configuration.\nUse the navigation to the left to read about the available resources.\nExample Usage provider \u0026#34;flexkube\u0026#34; {} "});index.add({'id':64,'href':'/documentation/reference/terraform/resources/','title':"Resources",'content':"Resources Here is the list of all Terraform resources provided by Flexkube provider.\n"});index.add({'id':65,'href':'/documentation/reference/terraform/resources/flexkube_api_loadbalancer_pool/','title':"Flexkube Api Loadbalancer Pool",'content':""});index.add({'id':66,'href':'/documentation/reference/terraform/resources/flexkube_controlplane/','title':"Flexkube Controlplane",'content':"flexkube_controlplane "});index.add({'id':67,'href':'/documentation/reference/terraform/resources/flexkube_etcd_cluster/','title':"Flexkube Etcd Cluster",'content':"flexkube_etcd_cluster "});index.add({'id':68,'href':'/documentation/reference/terraform/resources/flexkube_helm_release/','title':"Flexkube Helm Release",'content':"flexkube_helm_release "});index.add({'id':69,'href':'/documentation/reference/terraform/resources/flexkube_kubelet_pool/','title':"Flexkube Kubelet Pool",'content':"flexkube_kubelet_pool "});index.add({'id':70,'href':'/documentation/reference/terraform/resources/flexkube_pki/','title':"Flexkube Pki",'content':"flexkube_pki "});index.add({'id':71,'href':'/documentation/resources/','title':"Resources",'content':"Resources This section describes all resources, which can be managed using Flexkube.\n"});index.add({'id':72,'href':'/documentation/resources/api-loadbalancer/','title':"Api Loadbalancer",'content':""});index.add({'id':73,'href':'/documentation/resources/containers/','title':"Containers",'content':"Containers resource This document should describe when containers resource is useful.\n"});index.add({'id':74,'href':'/documentation/resources/controlplane/','title':"Controlplane",'content':""});index.add({'id':75,'href':'/documentation/resources/etcd/','title':"Etcd",'content':"etcd "});index.add({'id':76,'href':'/documentation/resources/kubelet-pool/','title':"Kubelet Pool",'content':""});index.add({'id':77,'href':'/documentation/resources/pki/','title':"Pki",'content':"PKI PKI (Public Key Infrastructure) resource is responsible for generating all X.509 certificates and RSA key pairs which are required by Kubernetes cluster. Kubernetes requires several certificates to be generated, with specific CNs, different CAs etc, which is difficult to manage, so Flexkube provides configurable and convenient interface to manage them.\nAll certificates are generated by following Kubernetes PKI certificates and requirements best practices.\nCurrent implementation of PKI is experimental and only supports generating the certificates. Renewing the certificates or changing certificate properties is currently not implemented. Example configuration: CLI To generate the certificates using flexkube CLI, create the following config.yaml file:\npki: certificate: organization: \u0026#34;example\u0026#34; etcd: peers: controller01: \u0026#34;192.168.1.10\u0026#34; clientCNs: - \u0026#34;root\u0026#34; - \u0026#34;kube-apiserver\u0026#34; - \u0026#34;prometheus\u0026#34; kubernetes: kubeAPIServer: externalNames: \u0026#34;kube-apiserver.example.com\u0026#34; serverIPs: - \u0026#34;192.168.1.10\u0026#34; Then, run the following command:\nflexkube pki If the configuration is correct, PKI will be created in state.yaml file.\nGo To generate Kubernetes PKI using Go, for example create file main.go with following content:\npackage main import ( \u0026#34;fmt\u0026#34; \u0026#34;github.com/flexkube/libflexkube/pkg/pki\u0026#34; ) func main() { p := \u0026amp;pki.PKI{ Certificate: pki.Certificate{ Organization: \u0026#34;example\u0026#34;, }, Etcd: \u0026amp;pki.Etcd{ Peers: map[string]string{ \u0026#34;controller01\u0026#34;: \u0026#34;192.168.1.10\u0026#34;, }, ClientCNs: []string{ \u0026#34;root\u0026#34;, \u0026#34;kube-apiserver\u0026#34;, \u0026#34;prometheus\u0026#34;, }, }, Kubernetes: \u0026amp;pki.Kubernetes{ KubeAPIServer: \u0026amp;pki.KubeAPIServer{ ExternalNames: []string{\u0026#34;kube-apiserver.example.com\u0026#34;}, ServerIPs: []string{\u0026#34;192.168.1.10\u0026#34;}, }, }, } p.Generate() fmt.Printf(\u0026#34;%+v\u0026#34;, p) } Then run the following command:\ngo run main.go If everything went successfully, you should get all generated certificates with their properties printed. Please not, that it is up to the user to persist generated certificates when using Go interface.\nTerraform To create Kubernetes PKI using Terraform, create main.tf file with the following content:\nresource \u0026#34;flexkube_pki\u0026#34; \u0026#34;pki\u0026#34; { certificate { organization = \u0026#34;example\u0026#34; } etcd { peers = { \u0026#34;controller01\u0026#34; = \u0026#34;192.168.1.10\u0026#34; } client_cns = [ \u0026#34;root\u0026#34;, \u0026#34;kube-apiserver\u0026#34;, \u0026#34;prometheus\u0026#34;, ] } kubernetes { kube_api_server { external_names = [\u0026#34;kube-apiserver.example.com\u0026#34;] server_ips = [\u0026#34;192.168.1.10\u0026#34;] } } } output \u0026#34;kubernetes_ca\u0026#34; { value = flexkube_pki.pki.kubernetes[0].ca[0].x509_certificate } Then, run following commands:\nterraform init \u0026amp;\u0026amp; terraform apply If everything went successfully, you should see Kubernetes CA certificate in PEM format printed as Terraform output.\nTo see all available parameters, see flexkube_pki page in Reference section.\n "});})();