From 5239a382bca0c41f5835c9881ebc15a4b866d480 Mon Sep 17 00:00:00 2001 From: Kay Yan Date: Fri, 16 Aug 2024 07:12:51 +0000 Subject: [PATCH] Rename master to control-plane in docs and titles Signed-off-by: Kay Yan --- README.md | 2 +- Vagrantfile | 2 +- contrib/azurerm/README.md | 2 +- contrib/azurerm/group_vars/all | 4 +- contrib/dind/README.md | 12 +-- contrib/network-storage/glusterfs/README.md | 8 +- .../terraform/aws/create-infrastructure.tf | 2 +- contrib/terraform/aws/variables.tf | 6 +- contrib/terraform/equinix/README.md | 6 +- .../equinix/sample-inventory/cluster.tfvars | 2 +- contrib/terraform/gcp/generate-inventory.sh | 2 +- contrib/terraform/openstack/README.md | 24 +++--- .../openstack/modules/compute/main.tf | 4 +- .../openstack/sample-inventory/cluster.tfvars | 4 +- contrib/terraform/openstack/variables.tf | 6 +- .../modules/kubernetes-cluster/main.tf | 4 +- contrib/terraform/vsphere/README.md | 6 +- contrib/terraform/vsphere/main.tf | 2 +- .../modules/kubernetes-cluster/variables.tf | 2 +- contrib/terraform/vsphere/variables.tf | 2 +- docs/CNI/multus.md | 2 +- docs/advanced/downloads.md | 2 +- docs/ansible/ansible.md | 2 +- docs/cloud_providers/aws.md | 2 +- docs/cloud_providers/azure.md | 2 +- docs/developers/ci-setup.md | 2 +- docs/developers/vagrant.md | 8 +- docs/operations/cgroups.md | 4 +- docs/operations/etcd.md | 2 +- docs/operations/upgrades.md | 84 +++++++++---------- inventory/sample/group_vars/all/all.yml | 2 +- .../group_vars/k8s_cluster/k8s-cluster.yml | 4 +- playbooks/remove_node.yml | 2 +- playbooks/upgrade_cluster.yml | 4 +- .../metrics_server/tasks/main.yml | 4 +- .../control-plane/defaults/main/main.yml | 4 +- .../control-plane/handlers/main.yml | 50 +++++------ .../control-plane/tasks/encrypt-at-rest.yml | 2 +- .../tasks/kubeadm-fix-apiserver.yml | 6 +- .../control-plane/tasks/kubeadm-setup.yml | 8 +- .../control-plane/tasks/kubeadm-upgrade.yml | 8 +- .../kubelet-fix-client-cert-rotation.yml | 4 +- .../control-plane/tasks/pre-upgrade.yml | 4 +- roles/kubernetes/kubeadm/tasks/main.yml | 4 +- roles/kubernetes/node/defaults/main.yml | 10 +-- .../preinstall/tasks/0040-verify-settings.yml | 2 +- .../kubernetes/tokens/tasks/check-tokens.yml | 2 +- roles/kubernetes/tokens/tasks/gen_tokens.yml | 6 +- .../kubespray-defaults/defaults/main/main.yml | 2 +- .../calico/templates/calico-node.yml.j2 | 2 +- .../kube-router/defaults/main.yml | 2 +- roles/remove-node/post-remove/tasks/main.yml | 2 +- tests/scripts/testcases_run.sh | 2 +- 53 files changed, 174 insertions(+), 174 deletions(-) diff --git a/README.md b/README.md index fdefd7eb0d3..fb32a173586 100644 --- a/README.md +++ b/README.md @@ -215,7 +215,7 @@ Note: Upstart/SysV init based OS types are not supported. Hardware: These limits are safeguarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide. -- Master +- Control Plane - Memory: 1500 MB - Node - Memory: 1024 MB diff --git a/Vagrantfile b/Vagrantfile index 9733105b04b..3a7f1905dc5 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -64,7 +64,7 @@ $download_run_once ||= "True" $download_force_cache ||= "False" # The first three nodes are etcd servers $etcd_instances ||= [$num_instances, 3].min -# The first two nodes are kube masters +# The first two nodes are kube control planes $kube_master_instances ||= [$num_instances, 2].min # All nodes are kube nodes $kube_node_instances ||= $num_instances diff --git a/contrib/azurerm/README.md b/contrib/azurerm/README.md index 8869ec09114..6901ef1ebfb 100644 --- a/contrib/azurerm/README.md +++ b/contrib/azurerm/README.md @@ -23,7 +23,7 @@ experience. ## Bastion host You can enable the use of a Bastion Host by changing **use_bastion** in group_vars/all to **true**. The generated -templates will then include an additional bastion VM which can then be used to connect to the masters and nodes. The option +templates will then include an additional bastion VM which can then be used to connect to the control planes and nodes. The option also removes all public IPs from all other VMs. ## Generating and applying diff --git a/contrib/azurerm/group_vars/all b/contrib/azurerm/group_vars/all index 44dc1e384ee..d86aa2bc15c 100644 --- a/contrib/azurerm/group_vars/all +++ b/contrib/azurerm/group_vars/all @@ -3,8 +3,8 @@ # this name must be globally unique - it will be used as a prefix for azure components cluster_name: example -# Set this to true if you do not want to have public IPs for your masters and minions. This will provision a bastion -# node that can be used to access the masters and minions +# Set this to true if you do not want to have public IPs for your control planes and minions. This will provision a bastion +# node that can be used to access the control planes and minions use_bastion: false # Set this to a preferred name that will be used as the first part of the dns name for your bastotion host. For example: k8s-bastion..cloudapp.azure.com. diff --git a/contrib/dind/README.md b/contrib/dind/README.md index 5e72cfc2cf5..6314c77ced9 100644 --- a/contrib/dind/README.md +++ b/contrib/dind/README.md @@ -104,12 +104,12 @@ CONTAINER ID IMAGE COMMAND CREATED STATUS c581ef662ed2 debian:9.5 "sh -c 'apt-get -qy …" 44 minutes ago Up 44 minutes kube-node1 $ docker exec kube-node1 kubectl get node -NAME STATUS ROLES AGE VERSION -kube-node1 Ready master,node 18m v1.12.1 -kube-node2 Ready master,node 17m v1.12.1 -kube-node3 Ready node 17m v1.12.1 -kube-node4 Ready node 17m v1.12.1 -kube-node5 Ready node 17m v1.12.1 +NAME STATUS ROLES AGE VERSION +kube-node1 Ready control-plane,node 18m v1.12.1 +kube-node2 Ready control-plane,node 17m v1.12.1 +kube-node3 Ready node 17m v1.12.1 +kube-node4 Ready node 17m v1.12.1 +kube-node5 Ready node 17m v1.12.1 $ docker exec kube-node1 kubectl get pod --all-namespaces NAMESPACE NAME READY STATUS RESTARTS AGE diff --git a/contrib/network-storage/glusterfs/README.md b/contrib/network-storage/glusterfs/README.md index bfe0a4d6e5c..9f9e590473a 100644 --- a/contrib/network-storage/glusterfs/README.md +++ b/contrib/network-storage/glusterfs/README.md @@ -4,7 +4,7 @@ You can either deploy using Ansible on its own by supplying your own inventory f ## Using an Ansible inventory -In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group. +In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/control-planes, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group. Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/sample/k8s_gfs_inventory`. Make sure that the settings on `inventory/sample/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu): @@ -21,9 +21,9 @@ ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --u If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=` variable in the inventory file that you just created, for each machine/VM: ```shell -k8s-master-1 ansible_ssh_host=192.168.0.147 ip=192.168.0.147 ansible_ssh_user=core -k8s-master-node-1 ansible_ssh_host=192.168.0.148 ip=192.168.0.148 ansible_ssh_user=core -k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_user=core +k8s-control-plane-1 ansible_ssh_host=192.168.0.147 ip=192.168.0.147 ansible_ssh_user=core +k8s-control-plane-node-1 ansible_ssh_host=192.168.0.148 ip=192.168.0.148 ansible_ssh_user=core +k8s-control-plane-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_user=core ``` ## Using Terraform and Ansible diff --git a/contrib/terraform/aws/create-infrastructure.tf b/contrib/terraform/aws/create-infrastructure.tf index 0a388447c5c..106b1ca2c82 100644 --- a/contrib/terraform/aws/create-infrastructure.tf +++ b/contrib/terraform/aws/create-infrastructure.tf @@ -68,7 +68,7 @@ resource "aws_instance" "bastion-server" { } /* -* Create K8s Master and worker nodes and etcd instances +* Create K8s control plane and worker nodes and etcd instances * */ diff --git a/contrib/terraform/aws/variables.tf b/contrib/terraform/aws/variables.tf index 783d4adffbb..eebfa9a9b57 100644 --- a/contrib/terraform/aws/variables.tf +++ b/contrib/terraform/aws/variables.tf @@ -86,15 +86,15 @@ variable "aws_bastion_num" { } variable "aws_kube_master_num" { - description = "Number of Kubernetes Master Nodes" + description = "Number of Kubernetes Control Plane Nodes" } variable "aws_kube_master_disk_size" { - description = "Disk size for Kubernetes Master Nodes (in GiB)" + description = "Disk size for Kubernetes Control Plane Nodes (in GiB)" } variable "aws_kube_master_size" { - description = "Instance size of Kube Master Nodes" + description = "Instance size of Kube Control Plane Nodes" } variable "aws_etcd_num" { diff --git a/contrib/terraform/equinix/README.md b/contrib/terraform/equinix/README.md index f81e066a068..450538c04ca 100644 --- a/contrib/terraform/equinix/README.md +++ b/contrib/terraform/equinix/README.md @@ -20,15 +20,15 @@ to actually install Kubernetes with Kubespray. You can create many different kubernetes topologies by setting the number of different classes of hosts. -- Master nodes with etcd: `number_of_k8s_masters` variable -- Master nodes without etcd: `number_of_k8s_masters_no_etcd` variable +- Control plane nodes with etcd: `number_of_k8s_masters` variable +- Control plane nodes without etcd: `number_of_k8s_masters_no_etcd` variable - Standalone etcd hosts: `number_of_etcd` variable - Kubernetes worker nodes: `number_of_k8s_nodes` variable Note that the Ansible script will report an invalid configuration if you wind up with an *even number* of etcd instances since that is not a valid configuration. This restriction includes standalone etcd nodes that are deployed in a cluster along with -master nodes with etcd replicas. As an example, if you have three master nodes with +master nodes with etcd replicas. As an example, if you have three control plane nodes with etcd replicas and three standalone etcd nodes, the script will fail since there are now six total etcd replicas. diff --git a/contrib/terraform/equinix/sample-inventory/cluster.tfvars b/contrib/terraform/equinix/sample-inventory/cluster.tfvars index 039f20878c7..4fdc327983a 100644 --- a/contrib/terraform/equinix/sample-inventory/cluster.tfvars +++ b/contrib/terraform/equinix/sample-inventory/cluster.tfvars @@ -20,7 +20,7 @@ number_of_etcd = 0 plan_etcd = "t1.small.x86" -# masters +# control planes number_of_k8s_masters = 1 number_of_k8s_masters_no_etcd = 0 diff --git a/contrib/terraform/gcp/generate-inventory.sh b/contrib/terraform/gcp/generate-inventory.sh index 585a4f415eb..b34b08ef63c 100755 --- a/contrib/terraform/gcp/generate-inventory.sh +++ b/contrib/terraform/gcp/generate-inventory.sh @@ -33,7 +33,7 @@ mapfile -t WORKER_NAMES < <(jq -r '.key' <(echo "${WORKERS}")) API_LB=$(jq -r '.control_plane_lb_ip_address.value' <(echo "${TF_OUT}")) -# Generate master hosts +# Generate control plane hosts i=1 for name in "${MASTER_NAMES[@]}"; do private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${MASTERS}")) diff --git a/contrib/terraform/openstack/README.md b/contrib/terraform/openstack/README.md index 5e88849f570..1415f6f6bb4 100644 --- a/contrib/terraform/openstack/README.md +++ b/contrib/terraform/openstack/README.md @@ -60,15 +60,15 @@ You can create many different kubernetes topologies by setting the number of different classes of hosts. For each class there are options for allocating floating IP addresses or not. -- Master nodes with etcd -- Master nodes without etcd +- Control Plane nodes with etcd +- Control Plane nodes without etcd - Standalone etcd hosts - Kubernetes worker nodes Note that the Ansible script will report an invalid configuration if you wind up with an even number of etcd instances since that is not a valid configuration. This restriction includes standalone etcd nodes that are deployed in a cluster along with -master nodes with etcd replicas. As an example, if you have three master nodes with +control plane nodes with etcd replicas. As an example, if you have three control plane nodes with etcd replicas and three standalone etcd nodes, the script will fail since there are now six total etcd replicas. @@ -254,15 +254,15 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`. |`network_dns_domain` | (Optional) The dns_domain for the internal network that will be generated | |`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. | |`floatingip_pool` | Name of the pool from which floating IPs will be allocated | -|`k8s_master_fips` | A list of floating IPs that you have already pre-allocated; they will be attached to master nodes instead of creating new random floating IPs. | +|`k8s_master_fips` | A list of floating IPs that you have already pre-allocated; they will be attached to control plane nodes instead of creating new random floating IPs. | |`bastion_fips` | A list of floating IPs that you have already pre-allocated; they will be attached to bastion node instead of creating new random floating IPs. | |`external_net` | UUID of the external network that will be routed to | |`flavor_k8s_master`,`flavor_k8s_node`,`flavor_etcd`, `flavor_bastion`,`flavor_gfs_node` | Flavor depends on your openstack installation, you can get available flavor IDs through `openstack flavor list` | |`image`,`image_gfs` | Name of the image to use in provisioning the compute resources. Should already be loaded into glance. | |`ssh_user`,`ssh_user_gfs` | The username to ssh into the image with. This usually depends on the image you have selected | |`public_key_path` | Path on your local workstation to the public key file you wish to use in creating the key pairs | -|`number_of_k8s_masters`, `number_of_k8s_masters_no_floating_ip` | Number of nodes that serve as both master and etcd. These can be provisioned with or without floating IP addresses| -|`number_of_k8s_masters_no_etcd`, `number_of_k8s_masters_no_floating_ip_no_etcd` | Number of nodes that serve as just master with no etcd. These can be provisioned with or without floating IP addresses | +|`number_of_k8s_masters`, `number_of_k8s_masters_no_floating_ip` | Number of nodes that serve as both control plane and etcd. These can be provisioned with or without floating IP addresses| +|`number_of_k8s_masters_no_etcd`, `number_of_k8s_masters_no_floating_ip_no_etcd` | Number of nodes that serve as just control plane with no etcd. These can be provisioned with or without floating IP addresses | |`number_of_etcd` | Number of pure etcd nodes | |`number_of_k8s_nodes`, `number_of_k8s_nodes_no_floating_ip` | Kubernetes worker nodes. These can be provisioned with or without floating ip addresses. | |`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one | @@ -281,8 +281,8 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`. |`k8s_allowed_egress_ipv6_ips` | List of IPv6 CIDRs allowed for egress traffic, `["::/0"]` by default | |`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default | |`worker_allowed_ports_ipv6` | List of ports to open on worker nodes for IPv6 CIDR blocks, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "::/0"}]` by default | -|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default | -|`master_allowed_ports_ipv6` | List of ports to open on master nodes for IPv6 CIDR blocks, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "::/0"}]`, empty by default | +|`master_allowed_ports` | List of ports to open on control plane nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default | +|`master_allowed_ports_ipv6` | List of ports to open on control plane nodes for IPv6 CIDR blocks, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "::/0"}]`, empty by default | |`node_root_volume_size_in_gb` | Size of the root volume for nodes, 0 to use ephemeral storage | |`master_root_volume_size_in_gb` | Size of the root volume for masters, 0 to use ephemeral storage | |`master_volume_type` | Volume type of the root volume for control_plane, 'Default' by default | @@ -290,7 +290,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`. |`gfs_root_volume_size_in_gb` | Size of the root volume for gluster, 0 to use ephemeral storage | |`etcd_root_volume_size_in_gb` | Size of the root volume for etcd nodes, 0 to use ephemeral storage | |`bastion_root_volume_size_in_gb` | Size of the root volume for bastions, 0 to use ephemeral storage | -|`master_server_group_policy` | Enable and use openstack nova servergroups for masters with set policy, default: "" (disabled) | +|`master_server_group_policy` | Enable and use openstack nova servergroups for control planes with set policy, default: "" (disabled) | |`node_server_group_policy` | Enable and use openstack nova servergroups for nodes with set policy, default: "" (disabled) | |`etcd_server_group_policy` | Enable and use openstack nova servergroups for etcd with set policy, default: "" (disabled) | |`additional_server_groups` | Extra server groups to create. Set "policy" to the policy for the group, expected format is `{"new-server-group" = {"policy" = "anti-affinity"}}`, default: {} (to not create any extra groups) | @@ -298,8 +298,8 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`. |`port_security_enabled` | Allow to disable port security by setting this to `false`. `true` by default | |`force_null_port_security` | Set `null` instead of `true` or `false` for `port_security`. `false` by default | |`k8s_nodes` | Map containing worker node definition, see explanation below | -|`k8s_masters` | Map containing master node definition, see explanation for k8s_nodes and `sample-inventory/cluster.tfvars` | -| `k8s_master_loadbalancer_enabled`| Enable and use an Octavia load balancer for the K8s master nodes | +|`k8s_masters` | Map containing control plane node definition, see explanation for k8s_nodes and `sample-inventory/cluster.tfvars` | +| `k8s_master_loadbalancer_enabled`| Enable and use an Octavia load balancer for the K8s control plane nodes | | `k8s_master_loadbalancer_listener_port` | Define via which port the K8s Api should be exposed. `6443` by default | | `k8s_master_loadbalancer_server_port` | Define via which port the K8S api is available on the mas. `6443` by default | | `k8s_master_loadbalancer_public_ip` | Specify if an existing floating IP should be used for the load balancer. A new floating IP is assigned by default | @@ -656,7 +656,7 @@ This will take some time as there are many tasks to run. ### Set up kubectl 1. [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your workstation -2. Add a route to the internal IP of a master node (if needed): +2. Add a route to the internal IP of a control plane node (if needed): ```ShellSession sudo route add [master-internal-ip] gw [router-ip] diff --git a/contrib/terraform/openstack/modules/compute/main.tf b/contrib/terraform/openstack/modules/compute/main.tf index 2256ea2b4e6..756b8594129 100644 --- a/contrib/terraform/openstack/modules/compute/main.tf +++ b/contrib/terraform/openstack/modules/compute/main.tf @@ -44,7 +44,7 @@ resource "openstack_networking_secgroup_v2" "k8s_master" { resource "openstack_networking_secgroup_v2" "k8s_master_extra" { count = "%{if var.extra_sec_groups}1%{else}0%{endif}" name = "${var.cluster_name}-k8s-master-${var.extra_sec_groups_name}" - description = "${var.cluster_name} - Kubernetes Master nodes - rules not managed by terraform" + description = "${var.cluster_name} - Kubernetes Control Plane nodes - rules not managed by terraform" delete_default_rules = true } @@ -269,7 +269,7 @@ resource "openstack_compute_servergroup_v2" "k8s_node_additional" { } locals { -# master groups +# control plane groups master_sec_groups = compact([ openstack_networking_secgroup_v2.k8s_master.id, openstack_networking_secgroup_v2.k8s.id, diff --git a/contrib/terraform/openstack/sample-inventory/cluster.tfvars b/contrib/terraform/openstack/sample-inventory/cluster.tfvars index 8ab7c6d38e4..e11e83386ce 100644 --- a/contrib/terraform/openstack/sample-inventory/cluster.tfvars +++ b/contrib/terraform/openstack/sample-inventory/cluster.tfvars @@ -7,7 +7,7 @@ cluster_name = "i-didnt-read-the-docs" # SSH key to use for access to nodes public_key_path = "~/.ssh/id_rsa.pub" -# image to use for bastion, masters, standalone etcd instances, and nodes +# image to use for bastion, control planes, standalone etcd instances, and nodes image = "" # user on the node (ex. core on Container Linux, ubuntu on Ubuntu, etc.) @@ -21,7 +21,7 @@ number_of_bastions = 0 # standalone etcds number_of_etcd = 0 -# masters +# control planes number_of_k8s_masters = 1 number_of_k8s_masters_no_etcd = 0 diff --git a/contrib/terraform/openstack/variables.tf b/contrib/terraform/openstack/variables.tf index 90416df50d0..23997389c61 100644 --- a/contrib/terraform/openstack/variables.tf +++ b/contrib/terraform/openstack/variables.tf @@ -3,7 +3,7 @@ variable "cluster_name" { } variable "az_list" { - description = "List of Availability Zones to use for masters in your OpenStack cluster" + description = "List of Availability Zones to use for control planes in your OpenStack cluster" type = list(string) default = ["nova"] } @@ -179,7 +179,7 @@ variable "dns_nameservers" { } variable "k8s_master_fips" { - description = "specific pre-existing floating IPs to use for master nodes" + description = "specific pre-existing floating IPs to use for control plane nodes" type = list(string) default = [] } @@ -380,7 +380,7 @@ variable "image_master" { } variable "image_master_uuid" { - description = "uuid of image to be used on master nodes. If empty defaults to image_uuid" + description = "uuid of image to be used on control plane nodes. If empty defaults to image_uuid" default = "" } diff --git a/contrib/terraform/upcloud/modules/kubernetes-cluster/main.tf b/contrib/terraform/upcloud/modules/kubernetes-cluster/main.tf index 91c8b9ec7b3..e5117fc0e71 100644 --- a/contrib/terraform/upcloud/modules/kubernetes-cluster/main.tf +++ b/contrib/terraform/upcloud/modules/kubernetes-cluster/main.tf @@ -173,7 +173,7 @@ resource "upcloud_firewall_rules" "master" { content { action = "accept" - comment = "Allow master API access from this network" + comment = "Allow control plane API access from this network" destination_port_end = "6443" destination_port_start = "6443" direction = "in" @@ -189,7 +189,7 @@ resource "upcloud_firewall_rules" "master" { content { action = "drop" - comment = "Deny master API access from other networks" + comment = "Deny control plane API access from other networks" destination_port_end = "6443" destination_port_start = "6443" direction = "in" diff --git a/contrib/terraform/vsphere/README.md b/contrib/terraform/vsphere/README.md index 7aa50d899ea..b1009398508 100644 --- a/contrib/terraform/vsphere/README.md +++ b/contrib/terraform/vsphere/README.md @@ -116,9 +116,9 @@ ansible-playbook -i inventory.ini ../../cluster.yml -b -v * `dns_secondary`: The IP address of secondary DNS server (default: `8.8.8.8`) * `firmware`: Firmware to use (default: `bios`) * `hardware_version`: The version of the hardware (default: `15`) -* `master_cores`: The number of CPU cores for the master nodes (default: 4) -* `master_memory`: The amount of RAM for the master nodes in MB (default: 4096) -* `master_disk_size`: The amount of disk space for the master nodes in GB (default: 20) +* `master_cores`: The number of CPU cores for the control plane nodes (default: 4) +* `master_memory`: The amount of RAM for the control plane nodes in MB (default: 4096) +* `master_disk_size`: The amount of disk space for the control plane nodes in GB (default: 20) * `worker_cores`: The number of CPU cores for the worker nodes (default: 16) * `worker_memory`: The amount of RAM for the worker nodes in MB (default: 8192) * `worker_disk_size`: The amount of disk space for the worker nodes in GB (default: 100) diff --git a/contrib/terraform/vsphere/main.tf b/contrib/terraform/vsphere/main.tf index fb2d8c8327e..9ccc684bb22 100644 --- a/contrib/terraform/vsphere/main.tf +++ b/contrib/terraform/vsphere/main.tf @@ -45,7 +45,7 @@ module "kubernetes" { machines = var.machines - ## Master ## + ## Control Plane ## master_cores = var.master_cores master_memory = var.master_memory master_disk_size = var.master_disk_size diff --git a/contrib/terraform/vsphere/modules/kubernetes-cluster/variables.tf b/contrib/terraform/vsphere/modules/kubernetes-cluster/variables.tf index cb99142321c..662e6a8bde5 100644 --- a/contrib/terraform/vsphere/modules/kubernetes-cluster/variables.tf +++ b/contrib/terraform/vsphere/modules/kubernetes-cluster/variables.tf @@ -32,7 +32,7 @@ variable "ssh_public_keys" { } variable "hardware_version" {} -## Master ## +## Control Plane ## variable "master_cores" {} variable "master_memory" {} variable "master_disk_size" {} diff --git a/contrib/terraform/vsphere/variables.tf b/contrib/terraform/vsphere/variables.tf index 03f9007e11d..4769360c226 100644 --- a/contrib/terraform/vsphere/variables.tf +++ b/contrib/terraform/vsphere/variables.tf @@ -73,7 +73,7 @@ variable "hardware_version" { default = "15" } -## Master ## +## Control Plane ## variable "master_cores" { default = 4 diff --git a/docs/CNI/multus.md b/docs/CNI/multus.md index 1f724848db3..d8613fd5209 100644 --- a/docs/CNI/multus.md +++ b/docs/CNI/multus.md @@ -8,7 +8,7 @@ See [multus documentation](https://github.com/k8snetworkplumbingwg/multus-cni). ## Multus installation -Since Multus itself does not implement networking, it requires a master plugin, which is specified through the variable `kube_network_plugin`. To enable Multus an additional variable `kube_network_plugin_multus` must be set to `true`. For example, +Since Multus itself does not implement networking, it requires a control plane plugin, which is specified through the variable `kube_network_plugin`. To enable Multus an additional variable `kube_network_plugin_multus` must be set to `true`. For example, ```yml kube_network_plugin: calico diff --git a/docs/advanced/downloads.md b/docs/advanced/downloads.md index 9961eab5ae5..95f2fa3e4c3 100644 --- a/docs/advanced/downloads.md +++ b/docs/advanced/downloads.md @@ -9,7 +9,7 @@ Kubespray supports several download/upload modes. The default is: There is also a "pull once, push many" mode as well: * Setting ``download_run_once: True`` will make kubespray download container images and binaries only once and then push them to the cluster nodes. The default download delegate node is the first `kube_control_plane`. -* Set ``download_localhost: True`` to make localhost the download delegate. This can be useful if cluster nodes cannot access external addresses. To use this requires that the container runtime is installed and running on the Ansible master and that the current user is either in the docker group or can do passwordless sudo, to be able to use the container runtime. Note: even if `download_localhost` is false, files will still be copied to the Ansible server (local host) from the delegated download node, and then distributed from the Ansible server to all cluster nodes. +* Set ``download_localhost: True`` to make localhost the download delegate. This can be useful if cluster nodes cannot access external addresses. To use this requires that the container runtime is installed and running on the Ansible control plane and that the current user is either in the docker group or can do passwordless sudo, to be able to use the container runtime. Note: even if `download_localhost` is false, files will still be copied to the Ansible server (local host) from the delegated download node, and then distributed from the Ansible server to all cluster nodes. NOTE: When `download_run_once` is true and `download_localhost` is false, all downloads will be done on the delegate node, including downloads for container images that are not required on that node. As a consequence, the storage required on that node will probably be more than if download_run_once was false, because all images will be loaded into the storage of the container runtime on that node, instead of just the images required for that node. diff --git a/docs/ansible/ansible.md b/docs/ansible/ansible.md index 40d52f9c334..43e005247fb 100644 --- a/docs/ansible/ansible.md +++ b/docs/ansible/ansible.md @@ -199,7 +199,7 @@ The following tags are defined in playbooks: | local-path-provisioner | Configure External provisioner: local-path | | local-volume-provisioner | Configure External provisioner: local-volume | | macvlan | Network plugin macvlan | -| master | Configuring K8s master node role | +| master | Configuring K8s control plane node role | | metallb | Installing and configuring metallb | | metrics_server | Configuring metrics_server | | netchecker | Installing netchecker K8s app | diff --git a/docs/cloud_providers/aws.md b/docs/cloud_providers/aws.md index 45938980c3e..252eb22c50a 100644 --- a/docs/cloud_providers/aws.md +++ b/docs/cloud_providers/aws.md @@ -82,7 +82,7 @@ Declare the cloud config variables for the `aws` provider as follows. Setting th | Variable | Type | Comment | |------------------------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | aws_zone | string | Force set the AWS zone. Recommended to leave blank. | -| aws_vpc | string | The AWS VPC flag enables the possibility to run the master components on a different aws account, on a different cloud provider or on-premise. If the flag is set also the KubernetesClusterTag must be provided | +| aws_vpc | string | The AWS VPC flag enables the possibility to run the control plane components on a different aws account, on a different cloud provider or on-premise. If the flag is set also the KubernetesClusterTag must be provided | | aws_subnet_id | string | SubnetID enables using a specific subnet to use for ELB's | | aws_route_table_id | string | RouteTableID enables using a specific RouteTable | | aws_role_arn | string | RoleARN is the IAM role to assume when interaction with AWS APIs | diff --git a/docs/cloud_providers/azure.md b/docs/cloud_providers/azure.md index a164ea75701..075515259d1 100644 --- a/docs/cloud_providers/azure.md +++ b/docs/cloud_providers/azure.md @@ -101,7 +101,7 @@ Sku of Load Balancer and Public IP. Candidate values are: basic and standard. ### azure\_exclude\_master\_from\_standard\_lb -azure\_exclude\_master\_from\_standard\_lb excludes master nodes from `standard` load balancer. +azure\_exclude\_master\_from\_standard\_lb excludes control plane nodes from `standard` load balancer. ### azure\_disable\_outbound\_snat diff --git a/docs/developers/ci-setup.md b/docs/developers/ci-setup.md index ae52415c717..494ddc80d0d 100644 --- a/docs/developers/ci-setup.md +++ b/docs/developers/ci-setup.md @@ -55,7 +55,7 @@ number_of_etcd = 0 plan_etcd = "t1.small.x86" -# masters +# control planes number_of_k8s_masters = 1 number_of_k8s_masters_no_etcd = 0 diff --git a/docs/developers/vagrant.md b/docs/developers/vagrant.md index 824b3953bde..7a49a255687 100644 --- a/docs/developers/vagrant.md +++ b/docs/developers/vagrant.md @@ -126,10 +126,10 @@ The output should look like this: ```ShellSession $ kubectl get nodes -NAME STATUS ROLES AGE VERSION -kub-1 Ready control-plane,master 4m37s v1.22.5 -kub-2 Ready control-plane,master 4m7s v1.22.5 -kub-3 Ready 3m7s v1.22.5 +NAME STATUS ROLES AGE VERSION +kub-1 Ready control-plane 4m37s v1.22.5 +kub-2 Ready control-plane 4m7s v1.22.5 +kub-3 Ready 3m7s v1.22.5 ``` Another nice test is the following: diff --git a/docs/operations/cgroups.md b/docs/operations/cgroups.md index 68c7581b0fc..b7d796c0b22 100644 --- a/docs/operations/cgroups.md +++ b/docs/operations/cgroups.md @@ -30,7 +30,7 @@ kube_memory_reserved: 256Mi kube_cpu_reserved: 100m # kube_ephemeral_storage_reserved: 2Gi # kube_pid_reserved: "1000" -# Reservation for master hosts +# Reservation for control plane hosts kube_master_memory_reserved: 512Mi kube_master_cpu_reserved: 200m # kube_master_ephemeral_storage_reserved: 2Gi @@ -44,7 +44,7 @@ system_memory_reserved: 512Mi system_cpu_reserved: 500m # system_ephemeral_storage_reserved: 2Gi # system_pid_reserved: "1000" -# Reservation for master hosts +# Reservation for control plane hosts system_master_memory_reserved: 256Mi system_master_cpu_reserved: 250m # system_master_ephemeral_storage_reserved: 2Gi diff --git a/docs/operations/etcd.md b/docs/operations/etcd.md index 2efc85ce88f..ffe26eb51ae 100644 --- a/docs/operations/etcd.md +++ b/docs/operations/etcd.md @@ -14,7 +14,7 @@ Installs docker in etcd group members and runs etcd on docker containers. Only u ### Kubeadm -This deployment method is experimental and is only available for new deployments. This deploys etcd as a static pod in master hosts. +This deployment method is experimental and is only available for new deployments. This deploys etcd as a static pod in control plane hosts. ## Metrics diff --git a/docs/operations/upgrades.md b/docs/operations/upgrades.md index 6c915c76585..9dae0a68e15 100644 --- a/docs/operations/upgrades.md +++ b/docs/operations/upgrades.md @@ -134,10 +134,10 @@ The below example shows taking a cluster that was set up for v2.6.0 up to v2.10. ```ShellSession $ kubectl get node -NAME STATUS ROLES AGE VERSION -apollo Ready master,node 1h v1.10.4 -boomer Ready master,node 42m v1.10.4 -caprica Ready master,node 42m v1.10.4 +NAME STATUS ROLES AGE VERSION +apollo Ready control-plane,node 1h v1.10.4 +boomer Ready control-plane,node 42m v1.10.4 +caprica Ready control-plane,node 42m v1.10.4 $ git describe --tags v2.6.0 @@ -162,10 +162,10 @@ ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml ... $ kubectl get node -NAME STATUS ROLES AGE VERSION -apollo Ready master,node 1h v1.11.3 -boomer Ready master,node 1h v1.11.3 -caprica Ready master,node 1h v1.11.3 +NAME STATUS ROLES AGE VERSION +apollo Ready control-plane,node 1h v1.11.3 +boomer Ready control-plane,node 1h v1.11.3 +caprica Ready control-plane,node 1h v1.11.3 $ git checkout v2.8.0 Previous HEAD position was 05dabb7e Fix Bionic networking restart error #3430 (#3431) @@ -189,10 +189,10 @@ yes ... $ kubectl get node -NAME STATUS ROLES AGE VERSION -apollo Ready master,node 114m v1.12.3 -boomer Ready master,node 114m v1.12.3 -caprica Ready master,node 114m v1.12.3 +NAME STATUS ROLES AGE VERSION +apollo Ready control-plane,node 114m v1.12.3 +boomer Ready control-plane,node 114m v1.12.3 +caprica Ready control-plane,node 114m v1.12.3 $ git checkout v2.8.1 Previous HEAD position was 9051aa52 Fix ubuntu-contiv test failed (#3808) @@ -207,10 +207,10 @@ yes ... $ kubectl get node -NAME STATUS ROLES AGE VERSION -apollo Ready master,node 2h36m v1.12.4 -boomer Ready master,node 2h36m v1.12.4 -caprica Ready master,node 2h36m v1.12.4 +NAME STATUS ROLES AGE VERSION +apollo Ready control-plane,node 2h36m v1.12.4 +boomer Ready control-plane,node 2h36m v1.12.4 +caprica Ready control-plane,node 2h36m v1.12.4 $ git checkout v2.8.2 Previous HEAD position was 2ac1c756 More Feature/2.8 backports for 2.8.1 (#3911) @@ -225,10 +225,10 @@ yes ... $ kubectl get node -NAME STATUS ROLES AGE VERSION -apollo Ready master,node 3h3m v1.12.5 -boomer Ready master,node 3h3m v1.12.5 -caprica Ready master,node 3h3m v1.12.5 +NAME STATUS ROLES AGE VERSION +apollo Ready control-plane,node 3h3m v1.12.5 +boomer Ready control-plane,node 3h3m v1.12.5 +caprica Ready control-plane,node 3h3m v1.12.5 $ git checkout v2.8.3 Previous HEAD position was 4167807f Upgrade to 1.12.5 (#4066) @@ -243,10 +243,10 @@ yes ... $ kubectl get node -NAME STATUS ROLES AGE VERSION -apollo Ready master,node 5h18m v1.12.5 -boomer Ready master,node 5h18m v1.12.5 -caprica Ready master,node 5h18m v1.12.5 +NAME STATUS ROLES AGE VERSION +apollo Ready control-plane,node 5h18m v1.12.5 +boomer Ready control-plane,node 5h18m v1.12.5 +caprica Ready control-plane,node 5h18m v1.12.5 $ git checkout v2.8.4 Previous HEAD position was ea41fc5e backport cve-2019-5736 to release-2.8 (#4234) @@ -261,10 +261,10 @@ yes ... $ kubectl get node -NAME STATUS ROLES AGE VERSION -apollo Ready master,node 5h37m v1.12.7 -boomer Ready master,node 5h37m v1.12.7 -caprica Ready master,node 5h37m v1.12.7 +NAME STATUS ROLES AGE VERSION +apollo Ready control-plane,node 5h37m v1.12.7 +boomer Ready control-plane,node 5h37m v1.12.7 +caprica Ready control-plane,node 5h37m v1.12.7 $ git checkout v2.8.5 Previous HEAD position was 3901480b go to k8s 1.12.7 (#4400) @@ -279,10 +279,10 @@ yes ... $ kubectl get node -NAME STATUS ROLES AGE VERSION -apollo Ready master,node 5h45m v1.12.7 -boomer Ready master,node 5h45m v1.12.7 -caprica Ready master,node 5h45m v1.12.7 +NAME STATUS ROLES AGE VERSION +apollo Ready control-plane,node 5h45m v1.12.7 +boomer Ready control-plane,node 5h45m v1.12.7 +caprica Ready control-plane,node 5h45m v1.12.7 $ git checkout v2.9.0 Previous HEAD position was 6f97687d Release 2.8 robust san handling (#4478) @@ -292,7 +292,7 @@ HEAD is now at a4e65c7c Upgrade to Ansible >2.7.0 (#4471) > **Warning** > IMPORTANT: Some variable formats changed in the k8s_cluster.yml between 2.8.5 and 2.9.0 -If you do not keep your inventory copy up to date, **your upgrade will fail** and your first master will be left non-functional until fixed and re-run. +If you do not keep your inventory copy up to date, **your upgrade will fail** and your first control plane will be left non-functional until fixed and re-run. It is at this point the cluster was upgraded from non-kubeadm to kubeadm as per the deprecation warning. @@ -302,10 +302,10 @@ ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml ... $ kubectl get node -NAME STATUS ROLES AGE VERSION -apollo Ready master,node 6h54m v1.13.5 -boomer Ready master,node 6h55m v1.13.5 -caprica Ready master,node 6h54m v1.13.5 +NAME STATUS ROLES AGE VERSION +apollo Ready control-plane,node 6h54m v1.13.5 +boomer Ready control-plane,node 6h55m v1.13.5 +caprica Ready control-plane,node 6h54m v1.13.5 # Watch out: 2.10.0 is hiding between 2.1.2 and 2.2.0 @@ -327,10 +327,10 @@ ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml ... $ kubectl get node -NAME STATUS ROLES AGE VERSION -apollo Ready master,node 7h40m v1.14.1 -boomer Ready master,node 7h40m v1.14.1 -caprica Ready master,node 7h40m v1.14.1 +NAME STATUS ROLES AGE VERSION +apollo Ready control-plane,node 7h40m v1.14.1 +boomer Ready control-plane,node 7h40m v1.14.1 +caprica Ready control-plane,node 7h40m v1.14.1 ``` @@ -395,7 +395,7 @@ Upgrade kubelet: ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=node --skip-tags=k8s-gen-certs,k8s-gen-tokens ``` -Upgrade Kubernetes master components: +Upgrade Kubernetes control plane components: ```ShellSession ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=master diff --git a/inventory/sample/group_vars/all/all.yml b/inventory/sample/group_vars/all/all.yml index c7f76287002..cb201a50a5e 100644 --- a/inventory/sample/group_vars/all/all.yml +++ b/inventory/sample/group_vars/all/all.yml @@ -75,7 +75,7 @@ loadbalancer_apiserver_healthcheck_port: 8081 # skip_http_proxy_on_os_packages: false ## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all -## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the +## pods will restart) when adding or removing workers. To override this behaviour by only including control plane nodes in the ## no_proxy variable, set below to true: no_proxy_exclude_workers: false diff --git a/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml index 6bcdde8cbcb..56803936d12 100644 --- a/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml @@ -272,7 +272,7 @@ default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" # kube_cpu_reserved: 100m # kube_ephemeral_storage_reserved: 2Gi # kube_pid_reserved: "1000" -# Reservation for master hosts +# Reservation for control plane hosts # kube_master_memory_reserved: 512Mi # kube_master_cpu_reserved: 200m # kube_master_ephemeral_storage_reserved: 2Gi @@ -287,7 +287,7 @@ default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" # system_memory_reserved: 512Mi # system_cpu_reserved: 500m # system_ephemeral_storage_reserved: 2Gi -## Reservation for master hosts +## Reservation for control plane hosts # system_master_memory_reserved: 256Mi # system_master_cpu_reserved: 250m # system_master_ephemeral_storage_reserved: 2Gi diff --git a/playbooks/remove_node.yml b/playbooks/remove_node.yml index e01338965e1..abfe0ecd148 100644 --- a/playbooks/remove_node.yml +++ b/playbooks/remove_node.yml @@ -33,7 +33,7 @@ - { role: remove-node/remove-etcd-node } - { role: reset, tags: reset, when: reset_nodes | default(True) | bool } -# Currently cannot remove first master or etcd +# Currently cannot remove first control plane or etcd - name: Post node removal hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}" gather_facts: no diff --git a/playbooks/upgrade_cluster.yml b/playbooks/upgrade_cluster.yml index 3180fec9310..44466fde937 100644 --- a/playbooks/upgrade_cluster.yml +++ b/playbooks/upgrade_cluster.yml @@ -38,7 +38,7 @@ - name: Install etcd import_playbook: install_etcd.yml -- name: Handle upgrades to master components first to maintain backwards compat. +- name: Handle upgrades to control plane components first to maintain backwards compat. gather_facts: False hosts: kube_control_plane any_errors_fatal: "{{ any_errors_fatal | default(true) }}" @@ -60,7 +60,7 @@ - { role: kubernetes-apps, tags: csi-driver } - { role: upgrade/post-upgrade, tags: post-upgrade } -- name: Upgrade calico and external cloud provider on all masters, calico-rrs, and nodes +- name: Upgrade calico and external cloud provider on all control-planes, calico-rrs, and nodes hosts: kube_control_plane:calico_rr:kube_node gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" diff --git a/roles/kubernetes-apps/metrics_server/tasks/main.yml b/roles/kubernetes-apps/metrics_server/tasks/main.yml index 3517686cb7e..39011f7cd44 100644 --- a/roles/kubernetes-apps/metrics_server/tasks/main.yml +++ b/roles/kubernetes-apps/metrics_server/tasks/main.yml @@ -1,6 +1,6 @@ --- -# If all masters have node role, there are no tainted master and toleration should not be specified. -- name: Check all masters are node or not +# If all control planes have node role, there are no tainted master and toleration should not be specified. +- name: Check all control planes are node or not set_fact: masters_are_not_tainted: "{{ groups['kube_node'] | intersect(groups['kube_control_plane']) == groups['kube_control_plane'] }}" diff --git a/roles/kubernetes/control-plane/defaults/main/main.yml b/roles/kubernetes/control-plane/defaults/main/main.yml index df92c419be7..9c5562e1c25 100644 --- a/roles/kubernetes/control-plane/defaults/main/main.yml +++ b/roles/kubernetes/control-plane/defaults/main/main.yml @@ -5,7 +5,7 @@ upgrade_cluster_setup: false # By default the external API listens on all interfaces, this can be changed to # listen on a specific address/interface. # NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost -# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too. +# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on control planes on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too. kube_apiserver_bind_address: 0.0.0.0 # A port range to reserve for services with NodePort visibility. @@ -38,7 +38,7 @@ kube_controller_manager_leader_elect_renew_deadline: 10s # discovery_timeout modifies the discovery timeout discovery_timeout: 5m0s -# Instruct first master to refresh kubeadm token +# Instruct first control plane to refresh kubeadm token kubeadm_refresh_token: true # Scale down coredns replicas to 0 if not using coredns dns_mode diff --git a/roles/kubernetes/control-plane/handlers/main.yml b/roles/kubernetes/control-plane/handlers/main.yml index be5fdffb125..31d98b4e9c3 100644 --- a/roles/kubernetes/control-plane/handlers/main.yml +++ b/roles/kubernetes/control-plane/handlers/main.yml @@ -1,16 +1,16 @@ --- -- name: Master | reload systemd +- name: Control-Plane | reload systemd systemd_service: daemon_reload: true - listen: Master | restart kubelet + listen: Control-Plane | restart kubelet -- name: Master | reload kubelet +- name: Control-Plane | reload kubelet service: name: kubelet state: restarted - listen: Master | restart kubelet + listen: Control-Plane | restart kubelet -- name: Master | Remove apiserver container docker +- name: Control-Plane | Remove apiserver container docker shell: "set -o pipefail && docker ps -af name=k8s_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f" args: executable: /bin/bash @@ -19,9 +19,9 @@ until: remove_apiserver_container.rc == 0 delay: 1 when: container_manager == "docker" - listen: Master | Restart apiserver + listen: Control-Plane | Restart apiserver -- name: Master | Remove apiserver container containerd/crio +- name: Control-Plane | Remove apiserver container containerd/crio shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" args: executable: /bin/bash @@ -30,9 +30,9 @@ until: remove_apiserver_container.rc == 0 delay: 1 when: container_manager in ['containerd', 'crio'] - listen: Master | Restart apiserver + listen: Control-Plane | Restart apiserver -- name: Master | Remove scheduler container docker +- name: Control-Plane | Remove scheduler container docker shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_kube-scheduler* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" args: executable: /bin/bash @@ -41,9 +41,9 @@ until: remove_scheduler_container.rc == 0 delay: 1 when: container_manager == "docker" - listen: Master | Restart kube-scheduler + listen: Control-Plane | Restart kube-scheduler -- name: Master | Remove scheduler container containerd/crio +- name: Control-Plane | Remove scheduler container containerd/crio shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-scheduler* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" args: executable: /bin/bash @@ -52,9 +52,9 @@ until: remove_scheduler_container.rc == 0 delay: 1 when: container_manager in ['containerd', 'crio'] - listen: Master | Restart kube-scheduler + listen: Control-Plane | Restart kube-scheduler -- name: Master | Remove controller manager container docker +- name: Control-Plane | Remove controller manager container docker shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f" args: executable: /bin/bash @@ -63,9 +63,9 @@ until: remove_cm_container.rc == 0 delay: 1 when: container_manager == "docker" - listen: Master | Restart kube-controller-manager + listen: Control-Plane | Restart kube-controller-manager -- name: Master | Remove controller manager container containerd/crio +- name: Control-Plane | Remove controller manager container containerd/crio shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'" args: executable: /bin/bash @@ -74,9 +74,9 @@ until: remove_cm_container.rc == 0 delay: 1 when: container_manager in ['containerd', 'crio'] - listen: Master | Restart kube-controller-manager + listen: Control-Plane | Restart kube-controller-manager -- name: Master | wait for kube-scheduler +- name: Control-Plane | wait for kube-scheduler vars: endpoint: "{{ kube_scheduler_bind_address if kube_scheduler_bind_address != '0.0.0.0' else 'localhost' }}" uri: @@ -87,10 +87,10 @@ retries: 60 delay: 1 listen: - - Master | restart kubelet - - Master | Restart kube-scheduler + - Control-Plane | restart kubelet + - Control-Plane | Restart kube-scheduler -- name: Master | wait for kube-controller-manager +- name: Control-Plane | wait for kube-controller-manager vars: endpoint: "{{ kube_controller_manager_bind_address if kube_controller_manager_bind_address != '0.0.0.0' else 'localhost' }}" uri: @@ -101,10 +101,10 @@ retries: 60 delay: 1 listen: - - Master | restart kubelet - - Master | Restart kube-controller-manager + - Control-Plane | restart kubelet + - Control-Plane | Restart kube-controller-manager -- name: Master | wait for the apiserver to be running +- name: Control-Plane | wait for the apiserver to be running uri: url: "{{ kube_apiserver_endpoint }}/healthz" validate_certs: no @@ -113,5 +113,5 @@ retries: 60 delay: 1 listen: - - Master | restart kubelet - - Master | Restart apiserver + - Control-Plane | restart kubelet + - Control-Plane | Restart apiserver diff --git a/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml b/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml index 9b998c52bc7..480c2cdbeda 100644 --- a/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml +++ b/roles/kubernetes/control-plane/tasks/encrypt-at-rest.yml @@ -23,7 +23,7 @@ kube_encrypt_token_extracted: "{{ secret_file_decoded | json_query(secrets_encryption_query) | first | b64decode }}" when: secrets_encryption_file.stat.exists -- name: Set kube_encrypt_token across master nodes +- name: Set kube_encrypt_token across control plane nodes set_fact: kube_encrypt_token: "{{ kube_encrypt_token_extracted }}" delegate_to: "{{ item }}" diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-fix-apiserver.yml b/roles/kubernetes/control-plane/tasks/kubeadm-fix-apiserver.yml index 5376aba81e6..6c29b99960b 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-fix-apiserver.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-fix-apiserver.yml @@ -12,6 +12,6 @@ - kubelet.conf - scheduler.conf notify: - - "Master | Restart kube-controller-manager" - - "Master | Restart kube-scheduler" - - "Master | reload kubelet" + - "Control-Plane | Restart kube-controller-manager" + - "Control-Plane | Restart kube-scheduler" + - "Control-Plane | reload kubelet" diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml index dfbe604a4c8..988663cc52b 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml @@ -189,7 +189,7 @@ mode: "0644" when: kubeadm_patches is defined and kubeadm_patches.enabled -- name: Kubeadm | Initialize first master +- name: Kubeadm | Initialize first control plane command: >- timeout -k {{ kubeadm_init_timeout }} {{ kubeadm_init_timeout }} {{ bin_dir }}/kubeadm init @@ -205,7 +205,7 @@ failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr environment: PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}" - notify: Master | restart kubelet + notify: Control-Plane | restart kubelet - name: Set kubeadm certificate key set_fact: @@ -250,7 +250,7 @@ tags: - kubeadm_token -- name: Kubeadm | Join other masters +- name: Kubeadm | Join other control planes include_tasks: kubeadm-secondary.yml - name: Kubeadm | upgrade kubernetes cluster @@ -260,7 +260,7 @@ - kubeadm_already_run.stat.exists # FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file. -- name: Kubeadm | Remove taint for master with node role +- name: Kubeadm | Remove taint for control plane with node role command: "{{ kubectl }} taint node {{ inventory_hostname }} {{ item }}" delegate_to: "{{ first_kube_control_plane }}" with_items: diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml index 7638a896864..80ed2cc8c16 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml @@ -9,7 +9,7 @@ delay: 5 until: _result.status == 200 -- name: Kubeadm | Upgrade first master +- name: Kubeadm | Upgrade first control plane command: >- timeout -k 600s 600s {{ bin_dir }}/kubeadm @@ -29,9 +29,9 @@ failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr environment: PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}" - notify: Master | restart kubelet + notify: Control-Plane | restart kubelet -- name: Kubeadm | Upgrade other masters +- name: Kubeadm | Upgrade other control planes command: >- timeout -k 600s 600s {{ bin_dir }}/kubeadm @@ -51,7 +51,7 @@ failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr environment: PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}" - notify: Master | restart kubelet + notify: Control-Plane | restart kubelet - name: Kubeadm | Remove binding to anonymous user command: "{{ kubectl }} -n kube-public delete rolebinding kubeadm:bootstrap-signer-clusterinfo --ignore-not-found" diff --git a/roles/kubernetes/control-plane/tasks/kubelet-fix-client-cert-rotation.yml b/roles/kubernetes/control-plane/tasks/kubelet-fix-client-cert-rotation.yml index 7d0c1a0d59e..68bc36229c9 100644 --- a/roles/kubernetes/control-plane/tasks/kubelet-fix-client-cert-rotation.yml +++ b/roles/kubernetes/control-plane/tasks/kubelet-fix-client-cert-rotation.yml @@ -6,7 +6,7 @@ line: ' client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem' backup: yes notify: - - "Master | reload kubelet" + - "Control-Plane | reload kubelet" - name: Fixup kubelet client cert rotation 2/2 lineinfile: @@ -15,4 +15,4 @@ line: ' client-key: /var/lib/kubelet/pki/kubelet-client-current.pem' backup: yes notify: - - "Master | reload kubelet" + - "Control-Plane | reload kubelet" diff --git a/roles/kubernetes/control-plane/tasks/pre-upgrade.yml b/roles/kubernetes/control-plane/tasks/pre-upgrade.yml index 2d7dce5bd0d..3ccf9988f4c 100644 --- a/roles/kubernetes/control-plane/tasks/pre-upgrade.yml +++ b/roles/kubernetes/control-plane/tasks/pre-upgrade.yml @@ -1,5 +1,5 @@ --- -- name: "Pre-upgrade | Delete master manifests if etcd secrets changed" +- name: "Pre-upgrade | Delete control plane manifests if etcd secrets changed" file: path: "/etc/kubernetes/manifests/{{ item }}.manifest" state: absent @@ -8,7 +8,7 @@ register: kube_apiserver_manifest_replaced when: etcd_secret_changed | default(false) -- name: "Pre-upgrade | Delete master containers forcefully" # noqa no-handler +- name: "Pre-upgrade | Delete control plane containers forcefully" # noqa no-handler shell: "set -o pipefail && docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f" args: executable: /bin/bash diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml index 2b5778726aa..e754329bf83 100644 --- a/roles/kubernetes/kubeadm/tasks/main.yml +++ b/roles/kubernetes/kubeadm/tasks/main.yml @@ -169,8 +169,8 @@ tags: - kube-proxy -# FIXME(mattymo): Need to point to localhost, otherwise masters will all point -# incorrectly to first master, creating SPoF. +# FIXME(mattymo): Need to point to localhost, otherwise control planes will all point +# incorrectly to first control planes, creating SPoF. - name: Update server field in kube-proxy kubeconfig shell: >- set -o pipefail && {{ kubectl }} get configmap kube-proxy -n kube-system -o yaml diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index 7b8438e9bba..cf6a6d64713 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -42,7 +42,7 @@ kube_memory_reserved: 256Mi kube_cpu_reserved: 100m # kube_ephemeral_storage_reserved: 2Gi # kube_pid_reserved: "1000" -# Reservation for master hosts +# Reservation for control plane hosts kube_master_memory_reserved: 512Mi kube_master_cpu_reserved: 200m # kube_master_ephemeral_storage_reserved: 2Gi @@ -56,7 +56,7 @@ system_memory_reserved: 512Mi system_cpu_reserved: 500m # system_ephemeral_storage_reserved: 2Gi # system_pid_reserved: "1000" -# Reservation for master hosts +# Reservation for control plane hosts system_master_memory_reserved: 256Mi system_master_cpu_reserved: 250m # system_master_ephemeral_storage_reserved: 2Gi @@ -136,7 +136,7 @@ kubelet_config_extra_args_cgroupfs: systemCgroups: /system.slice cgroupRoot: / -## Support parameters to be passed to kubelet via kubelet-config.yaml only on nodes, not masters +## Support parameters to be passed to kubelet via kubelet-config.yaml only on nodes, not control planes kubelet_node_config_extra_args: {} # Maximum number of container log files that can be present for a container. @@ -148,7 +148,7 @@ kubelet_logfiles_max_size: 10Mi ## Support custom flags to be passed to kubelet kubelet_custom_flags: [] -## Support custom flags to be passed to kubelet only on nodes, not masters +## Support custom flags to be passed to kubelet only on nodes, not control planes kubelet_node_custom_flags: [] # If non-empty, will use this string as identification instead of the actual hostname @@ -216,7 +216,7 @@ vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK') | default('' # azure_vmtype: standard # Sku of Load Balancer and Public IP. Candidate values are: basic and standard. azure_loadbalancer_sku: basic -# excludes master nodes from standard load balancer. +# excludes control plane nodes from standard load balancer. azure_exclude_master_from_standard_lb: true # disables the outbound SNAT for public load balancer rules azure_disable_outbound_snat: false diff --git a/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml index 1bb0f4856f4..a7e0cb9b037 100644 --- a/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml +++ b/roles/kubernetes/preinstall/tasks/0040-verify-settings.yml @@ -60,7 +60,7 @@ - not ignore_assert_errors - inventory_hostname in groups.get('etcd',[]) -- name: Stop if memory is too small for masters +- name: Stop if memory is too small for control planes assert: that: ansible_memtotal_mb >= minimal_master_memory_mb when: diff --git a/roles/kubernetes/tokens/tasks/check-tokens.yml b/roles/kubernetes/tokens/tasks/check-tokens.yml index a157a0597ee..24ddb59976d 100644 --- a/roles/kubernetes/tokens/tasks/check-tokens.yml +++ b/roles/kubernetes/tokens/tasks/check-tokens.yml @@ -1,5 +1,5 @@ --- -- name: "Check_tokens | check if the tokens have already been generated on first master" +- name: "Check_tokens | check if the tokens have already been generated on first control plane" stat: path: "{{ kube_token_dir }}/known_tokens.csv" get_attributes: no diff --git a/roles/kubernetes/tokens/tasks/gen_tokens.yml b/roles/kubernetes/tokens/tasks/gen_tokens.yml index 1dabf965755..47f42777355 100644 --- a/roles/kubernetes/tokens/tasks/gen_tokens.yml +++ b/roles/kubernetes/tokens/tasks/gen_tokens.yml @@ -8,7 +8,7 @@ delegate_to: "{{ groups['kube_control_plane'][0] }}" when: gen_tokens | default(false) -- name: Gen_tokens | generate tokens for master components +- name: Gen_tokens | generate tokens for control plane components command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}" environment: TOKEN_DIR: "{{ kube_token_dir }}" @@ -34,7 +34,7 @@ delegate_to: "{{ groups['kube_control_plane'][0] }}" when: gen_tokens | default(false) -- name: Gen_tokens | Get list of tokens from first master +- name: Gen_tokens | Get list of tokens from first control plane command: "find {{ kube_token_dir }} -maxdepth 1 -type f" register: tokens_list check_mode: no @@ -52,7 +52,7 @@ run_once: true when: sync_tokens | default(false) -- name: Gen_tokens | Copy tokens on masters +- name: Gen_tokens | Copy tokens on control planes shell: "set -o pipefail && echo '{{ tokens_data.stdout | quote }}' | base64 -d | tar xz -C /" args: executable: /bin/bash diff --git a/roles/kubespray-defaults/defaults/main/main.yml b/roles/kubespray-defaults/defaults/main/main.yml index b6899111111..1a352a4596c 100644 --- a/roles/kubespray-defaults/defaults/main/main.yml +++ b/roles/kubespray-defaults/defaults/main/main.yml @@ -243,7 +243,7 @@ kube_network_node_prefix_ipv6: 120 kube_apiserver_ip: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}" # NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost -# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too. +# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on control planes on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too. kube_apiserver_bind_address: 0.0.0.0 # https diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2 index 6642ef2f689..0973bb7b068 100644 --- a/roles/network_plugin/calico/templates/calico-node.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-node.yml.j2 @@ -1,7 +1,7 @@ --- # This manifest installs the calico/node container, as well # as the Calico CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. +# each control plane and worker node in a Kubernetes cluster. kind: DaemonSet apiVersion: apps/v1 metadata: diff --git a/roles/network_plugin/kube-router/defaults/main.yml b/roles/network_plugin/kube-router/defaults/main.yml index c01a3532bd8..00770f6505c 100644 --- a/roles/network_plugin/kube-router/defaults/main.yml +++ b/roles/network_plugin/kube-router/defaults/main.yml @@ -50,7 +50,7 @@ kube_router_dns_policy: ClusterFirstWithHostNet # Adds annotations to kubernetes nodes for advanced configuration of BGP Peers. # https://github.com/cloudnativelabs/kube-router/blob/master/docs/bgp.md -# Array of annotations for master +# Array of annotations for control plane kube_router_annotations_master: [] # Array of annotations for every node diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml index bc8bfd6d623..473e49f55ee 100644 --- a/roles/remove-node/post-remove/tasks/main.yml +++ b/roles/remove-node/post-remove/tasks/main.yml @@ -7,7 +7,7 @@ # ignore servers that are not nodes - inventory_hostname in groups['k8s_cluster'] and kube_override_hostname | default(inventory_hostname) in nodes.stdout_lines retries: "{{ delete_node_retries }}" - # Sometimes the api-server can have a short window of indisponibility when we delete a master node + # Sometimes the api-server can have a short window of indisponibility when we delete a control plane node delay: "{{ delete_node_delay_seconds }}" register: result until: result is not failed diff --git a/tests/scripts/testcases_run.sh b/tests/scripts/testcases_run.sh index a1c09be66ce..4e7724030c6 100755 --- a/tests/scripts/testcases_run.sh +++ b/tests/scripts/testcases_run.sh @@ -118,7 +118,7 @@ EOF fi # Tests Cases -## Test Master API +## Test Control Plane API run_playbook tests/testcases/010_check-apiserver.yml run_playbook tests/testcases/015_check-nodes-ready.yml