From d6c2d050593e92e796d84679be6150f4cd523237 Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 15 Aug 2023 15:24:51 -0400 Subject: [PATCH 01/54] canonicized pages-for-subheaders --- docs/pages-for-subheaders/about-provisioning-drivers.md | 4 ++++ docs/pages-for-subheaders/about-rke1-templates.md | 4 ++++ docs/pages-for-subheaders/about-the-api.md | 4 ++++ docs/pages-for-subheaders/access-clusters.md | 4 ++++ docs/pages-for-subheaders/advanced-configuration.md | 4 ++++ docs/pages-for-subheaders/advanced-user-guides.md | 4 ++++ docs/pages-for-subheaders/air-gapped-helm-cli-install.md | 4 ++++ docs/pages-for-subheaders/authentication-config.md | 4 ++++ .../authentication-permissions-and-global-configuration.md | 4 ++++ docs/pages-for-subheaders/aws-cloud-marketplace.md | 4 ++++ .../backup-restore-and-disaster-recovery.md | 4 ++++ docs/pages-for-subheaders/backup-restore-configuration.md | 4 ++++ docs/pages-for-subheaders/best-practices.md | 4 ++++ .../checklist-for-production-ready-clusters.md | 4 ++++ docs/pages-for-subheaders/cis-scan-guides.md | 4 ++++ docs/pages-for-subheaders/cis-scans.md | 4 ++++ docs/pages-for-subheaders/cli-with-rancher.md | 4 ++++ docs/pages-for-subheaders/cloud-marketplace.md | 4 ++++ docs/pages-for-subheaders/cluster-configuration.md | 4 ++++ docs/pages-for-subheaders/configuration-options.md | 4 ++++ .../configure-microsoft-ad-federation-service-saml.md | 4 ++++ docs/pages-for-subheaders/configure-openldap.md | 4 ++++ .../create-kubernetes-persistent-storage.md | 4 ++++ docs/pages-for-subheaders/custom-resource-configuration.md | 4 ++++ docs/pages-for-subheaders/deploy-apps-across-clusters.md | 4 ++++ docs/pages-for-subheaders/deploy-rancher-manager.md | 4 ++++ docs/pages-for-subheaders/deploy-rancher-workloads.md | 4 ++++ docs/pages-for-subheaders/downstream-cluster-configuration.md | 4 ++++ docs/pages-for-subheaders/enable-experimental-features.md | 4 ++++ docs/pages-for-subheaders/gke-cluster-configuration.md | 4 ++++ docs/pages-for-subheaders/helm-charts-in-rancher.md | 4 ++++ docs/pages-for-subheaders/horizontal-pod-autoscaler.md | 4 ++++ docs/pages-for-subheaders/infrastructure-setup.md | 4 ++++ docs/pages-for-subheaders/install-cluster-autoscaler.md | 4 ++++ .../install-upgrade-on-a-kubernetes-cluster.md | 4 ++++ docs/pages-for-subheaders/installation-and-upgrade.md | 4 ++++ docs/pages-for-subheaders/installation-references.md | 4 ++++ docs/pages-for-subheaders/installation-requirements.md | 4 ++++ docs/pages-for-subheaders/istio-setup-guide.md | 4 ++++ docs/pages-for-subheaders/istio.md | 4 ++++ docs/pages-for-subheaders/k3s-hardening-guide.md | 4 ++++ docs/pages-for-subheaders/kubernetes-cluster-setup.md | 4 ++++ .../kubernetes-clusters-in-rancher-setup.md | 4 ++++ docs/pages-for-subheaders/kubernetes-components.md | 4 ++++ docs/pages-for-subheaders/kubernetes-resources-setup.md | 4 ++++ docs/pages-for-subheaders/launch-kubernetes-with-rancher.md | 4 ++++ .../load-balancer-and-ingress-controller.md | 4 ++++ docs/pages-for-subheaders/logging.md | 4 ++++ docs/pages-for-subheaders/machine-configuration.md | 4 ++++ docs/pages-for-subheaders/manage-clusters.md | 4 ++++ docs/pages-for-subheaders/manage-project-resource-quotas.md | 4 ++++ docs/pages-for-subheaders/manage-projects.md | 4 ++++ .../manage-role-based-access-control-rbac.md | 4 ++++ docs/pages-for-subheaders/monitoring-alerting-guides.md | 4 ++++ docs/pages-for-subheaders/monitoring-and-alerting.md | 4 ++++ .../monitoring-v2-configuration-guides.md | 4 ++++ docs/pages-for-subheaders/monitoring-v2-configuration.md | 4 ++++ docs/pages-for-subheaders/new-user-guides.md | 4 ++++ docs/pages-for-subheaders/node-template-configuration.md | 4 ++++ docs/pages-for-subheaders/nutanix.md | 4 ++++ docs/pages-for-subheaders/other-installation-methods.md | 4 ++++ docs/pages-for-subheaders/prometheus-federator-guides.md | 4 ++++ docs/pages-for-subheaders/prometheus-federator.md | 4 ++++ docs/pages-for-subheaders/provisioning-storage-examples.md | 4 ++++ docs/pages-for-subheaders/quick-start-guides.md | 4 ++++ docs/pages-for-subheaders/rancher-behind-an-http-proxy.md | 4 ++++ docs/pages-for-subheaders/rancher-hardening-guides.md | 4 ++++ docs/pages-for-subheaders/rancher-managed-clusters.md | 4 ++++ docs/pages-for-subheaders/rancher-manager-architecture.md | 4 ++++ .../rancher-on-a-single-node-with-docker.md | 4 ++++ docs/pages-for-subheaders/rancher-security.md | 4 ++++ docs/pages-for-subheaders/rancher-server-configuration.md | 4 ++++ docs/pages-for-subheaders/rancher-server.md | 4 ++++ docs/pages-for-subheaders/resources.md | 4 ++++ docs/pages-for-subheaders/rke1-hardening-guide.md | 4 ++++ docs/pages-for-subheaders/rke2-hardening-guide.md | 4 ++++ docs/pages-for-subheaders/selinux-rpm.md | 4 ++++ docs/pages-for-subheaders/set-up-cloud-providers.md | 4 ++++ .../set-up-clusters-from-hosted-kubernetes-providers.md | 4 ++++ docs/pages-for-subheaders/single-node-rancher-in-docker.md | 4 ++++ docs/pages-for-subheaders/use-existing-nodes.md | 4 ++++ .../use-new-nodes-in-an-infra-provider.md | 4 ++++ docs/pages-for-subheaders/use-windows-clusters.md | 4 ++++ docs/pages-for-subheaders/user-settings.md | 4 ++++ docs/pages-for-subheaders/vsphere.md | 4 ++++ docs/pages-for-subheaders/workloads-and-pods.md | 4 ++++ .../pages-for-subheaders/about-provisioning-drivers.md | 4 ++++ .../pages-for-subheaders/about-rke1-templates.md | 4 ++++ .../version-2.0-2.4/pages-for-subheaders/about-the-api.md | 4 ++++ .../version-2.0-2.4/pages-for-subheaders/access-clusters.md | 4 ++++ .../pages-for-subheaders/advanced-user-guides.md | 4 ++++ .../pages-for-subheaders/air-gapped-helm-cli-install.md | 4 ++++ .../pages-for-subheaders/authentication-config.md | 4 ++++ .../authentication-permissions-and-global-configuration.md | 4 ++++ .../backup-restore-and-disaster-recovery.md | 4 ++++ .../version-2.0-2.4/pages-for-subheaders/best-practices.md | 4 ++++ .../checklist-for-production-ready-clusters.md | 4 ++++ .../version-2.0-2.4/pages-for-subheaders/cis-scan-guides.md | 4 ++++ .../version-2.0-2.4/pages-for-subheaders/cis-scans.md | 4 ++++ .../version-2.0-2.4/pages-for-subheaders/cli-with-rancher.md | 4 ++++ .../pages-for-subheaders/cluster-configuration.md | 4 ++++ .../configure-microsoft-ad-federation-service-saml.md | 4 ++++ .../pages-for-subheaders/configure-openldap.md | 4 ++++ .../create-kubernetes-persistent-storage.md | 4 ++++ .../pages-for-subheaders/deploy-rancher-manager.md | 4 ++++ .../pages-for-subheaders/deploy-rancher-workloads.md | 4 ++++ .../pages-for-subheaders/downstream-cluster-configuration.md | 4 ++++ .../pages-for-subheaders/enable-experimental-features.md | 4 ++++ .../pages-for-subheaders/helm-charts-in-rancher.md | 4 ++++ .../pages-for-subheaders/horizontal-pod-autoscaler.md | 4 ++++ .../pages-for-subheaders/infrastructure-setup.md | 4 ++++ .../pages-for-subheaders/install-cluster-autoscaler.md | 4 ++++ .../install-upgrade-on-a-kubernetes-cluster.md | 4 ++++ .../pages-for-subheaders/installation-and-upgrade.md | 4 ++++ .../pages-for-subheaders/installation-references.md | 4 ++++ .../pages-for-subheaders/installation-requirements.md | 4 ++++ .../version-2.0-2.4/pages-for-subheaders/istio-setup-guide.md | 4 ++++ versioned_docs/version-2.0-2.4/pages-for-subheaders/istio.md | 4 ++++ .../pages-for-subheaders/kubernetes-cluster-setup.md | 4 ++++ .../kubernetes-clusters-in-rancher-setup.md | 4 ++++ .../pages-for-subheaders/kubernetes-components.md | 4 ++++ .../pages-for-subheaders/kubernetes-resources-setup.md | 4 ++++ .../pages-for-subheaders/launch-kubernetes-with-rancher.md | 4 ++++ .../load-balancer-and-ingress-controller.md | 4 ++++ .../version-2.0-2.4/pages-for-subheaders/manage-clusters.md | 4 ++++ .../pages-for-subheaders/manage-project-resource-quotas.md | 4 ++++ .../version-2.0-2.4/pages-for-subheaders/manage-projects.md | 4 ++++ .../manage-role-based-access-control-rbac.md | 4 ++++ .../version-2.0-2.4/pages-for-subheaders/new-user-guides.md | 4 ++++ .../pages-for-subheaders/node-template-configuration.md | 4 ++++ .../pages-for-subheaders/other-installation-methods.md | 4 ++++ .../pages-for-subheaders/provisioning-storage-examples.md | 4 ++++ .../pages-for-subheaders/quick-start-guides.md | 4 ++++ .../pages-for-subheaders/rancher-behind-an-http-proxy.md | 4 ++++ .../pages-for-subheaders/rancher-manager-architecture.md | 4 ++++ .../rancher-on-a-single-node-with-docker.md | 4 ++++ .../version-2.0-2.4/pages-for-subheaders/rancher-security.md | 4 ++++ .../pages-for-subheaders/rancher-server-configuration.md | 4 ++++ .../version-2.0-2.4/pages-for-subheaders/resources.md | 4 ++++ .../pages-for-subheaders/set-up-cloud-providers.md | 4 ++++ .../set-up-clusters-from-hosted-kubernetes-providers.md | 4 ++++ .../pages-for-subheaders/single-node-rancher-in-docker.md | 4 ++++ .../pages-for-subheaders/use-existing-nodes.md | 4 ++++ .../use-new-nodes-in-an-infra-provider.md | 4 ++++ .../pages-for-subheaders/use-windows-clusters.md | 4 ++++ .../version-2.0-2.4/pages-for-subheaders/user-settings.md | 4 ++++ .../version-2.0-2.4/pages-for-subheaders/vsphere.md | 4 ++++ .../pages-for-subheaders/workloads-and-pods.md | 4 ++++ .../pages-for-subheaders/about-provisioning-drivers.md | 4 ++++ .../version-2.5/pages-for-subheaders/about-rke1-templates.md | 4 ++++ .../version-2.5/pages-for-subheaders/about-the-api.md | 4 ++++ .../version-2.5/pages-for-subheaders/access-clusters.md | 4 ++++ .../pages-for-subheaders/advanced-configuration.md | 4 ++++ .../version-2.5/pages-for-subheaders/advanced-user-guides.md | 4 ++++ .../pages-for-subheaders/air-gapped-helm-cli-install.md | 4 ++++ .../version-2.5/pages-for-subheaders/authentication-config.md | 4 ++++ .../authentication-permissions-and-global-configuration.md | 4 ++++ .../backup-restore-and-disaster-recovery.md | 4 ++++ .../pages-for-subheaders/backup-restore-configuration.md | 4 ++++ .../version-2.5/pages-for-subheaders/best-practices.md | 4 ++++ .../checklist-for-production-ready-clusters.md | 4 ++++ .../version-2.5/pages-for-subheaders/cis-scan-guides.md | 4 ++++ versioned_docs/version-2.5/pages-for-subheaders/cis-scans.md | 4 ++++ .../version-2.5/pages-for-subheaders/cli-with-rancher.md | 4 ++++ .../version-2.5/pages-for-subheaders/cluster-configuration.md | 4 ++++ .../version-2.5/pages-for-subheaders/configuration-options.md | 4 ++++ .../configure-microsoft-ad-federation-service-saml.md | 4 ++++ .../version-2.5/pages-for-subheaders/configure-openldap.md | 4 ++++ .../create-kubernetes-persistent-storage.md | 4 ++++ .../pages-for-subheaders/custom-resource-configuration.md | 4 ++++ .../pages-for-subheaders/deploy-apps-across-clusters.md | 4 ++++ .../pages-for-subheaders/deploy-rancher-manager.md | 4 ++++ .../pages-for-subheaders/deploy-rancher-workloads.md | 4 ++++ .../pages-for-subheaders/downstream-cluster-configuration.md | 4 ++++ .../pages-for-subheaders/enable-experimental-features.md | 4 ++++ .../pages-for-subheaders/gke-cluster-configuration.md | 4 ++++ .../pages-for-subheaders/helm-charts-in-rancher.md | 4 ++++ .../pages-for-subheaders/horizontal-pod-autoscaler.md | 4 ++++ .../version-2.5/pages-for-subheaders/infrastructure-setup.md | 4 ++++ .../pages-for-subheaders/install-cluster-autoscaler.md | 4 ++++ .../install-upgrade-on-a-kubernetes-cluster.md | 4 ++++ .../pages-for-subheaders/installation-and-upgrade.md | 4 ++++ .../pages-for-subheaders/installation-references.md | 4 ++++ .../pages-for-subheaders/installation-requirements.md | 4 ++++ .../version-2.5/pages-for-subheaders/istio-setup-guide.md | 4 ++++ versioned_docs/version-2.5/pages-for-subheaders/istio.md | 4 ++++ .../pages-for-subheaders/kubernetes-cluster-setup.md | 4 ++++ .../kubernetes-clusters-in-rancher-setup.md | 4 ++++ .../version-2.5/pages-for-subheaders/kubernetes-components.md | 4 ++++ .../pages-for-subheaders/kubernetes-resources-setup.md | 4 ++++ .../pages-for-subheaders/launch-kubernetes-with-rancher.md | 4 ++++ .../load-balancer-and-ingress-controller.md | 4 ++++ versioned_docs/version-2.5/pages-for-subheaders/logging.md | 4 ++++ .../version-2.5/pages-for-subheaders/manage-clusters.md | 4 ++++ .../pages-for-subheaders/manage-project-resource-quotas.md | 4 ++++ .../version-2.5/pages-for-subheaders/manage-projects.md | 4 ++++ .../manage-role-based-access-control-rbac.md | 4 ++++ .../pages-for-subheaders/monitoring-alerting-guides.md | 4 ++++ .../pages-for-subheaders/monitoring-and-alerting.md | 4 ++++ .../monitoring-v2-configuration-guides.md | 4 ++++ .../pages-for-subheaders/monitoring-v2-configuration.md | 4 ++++ .../version-2.5/pages-for-subheaders/new-user-guides.md | 4 ++++ .../pages-for-subheaders/node-template-configuration.md | 4 ++++ .../pages-for-subheaders/other-installation-methods.md | 4 ++++ .../pages-for-subheaders/provisioning-storage-examples.md | 4 ++++ .../version-2.5/pages-for-subheaders/quick-start-guides.md | 4 ++++ .../pages-for-subheaders/rancher-behind-an-http-proxy.md | 4 ++++ .../pages-for-subheaders/rancher-managed-clusters.md | 4 ++++ .../pages-for-subheaders/rancher-manager-architecture.md | 4 ++++ .../rancher-on-a-single-node-with-docker.md | 4 ++++ .../version-2.5/pages-for-subheaders/rancher-security.md | 4 ++++ .../pages-for-subheaders/rancher-server-configuration.md | 4 ++++ .../version-2.5/pages-for-subheaders/rancher-server.md | 4 ++++ versioned_docs/version-2.5/pages-for-subheaders/resources.md | 4 ++++ .../version-2.5/pages-for-subheaders/selinux-rpm.md | 4 ++++ .../pages-for-subheaders/set-up-cloud-providers.md | 4 ++++ .../set-up-clusters-from-hosted-kubernetes-providers.md | 4 ++++ .../pages-for-subheaders/single-node-rancher-in-docker.md | 4 ++++ .../version-2.5/pages-for-subheaders/use-existing-nodes.md | 4 ++++ .../use-new-nodes-in-an-infra-provider.md | 4 ++++ .../version-2.5/pages-for-subheaders/use-windows-clusters.md | 4 ++++ .../version-2.5/pages-for-subheaders/user-settings.md | 4 ++++ versioned_docs/version-2.5/pages-for-subheaders/vsphere.md | 4 ++++ .../version-2.5/pages-for-subheaders/workloads-and-pods.md | 4 ++++ .../pages-for-subheaders/about-provisioning-drivers.md | 4 ++++ .../version-2.6/pages-for-subheaders/about-rke1-templates.md | 4 ++++ .../version-2.6/pages-for-subheaders/about-the-api.md | 4 ++++ .../version-2.6/pages-for-subheaders/access-clusters.md | 4 ++++ .../pages-for-subheaders/advanced-configuration.md | 4 ++++ .../version-2.6/pages-for-subheaders/advanced-user-guides.md | 4 ++++ .../pages-for-subheaders/air-gapped-helm-cli-install.md | 4 ++++ .../version-2.6/pages-for-subheaders/authentication-config.md | 4 ++++ .../authentication-permissions-and-global-configuration.md | 4 ++++ .../version-2.6/pages-for-subheaders/aws-cloud-marketplace.md | 4 ++++ .../backup-restore-and-disaster-recovery.md | 4 ++++ .../pages-for-subheaders/backup-restore-configuration.md | 4 ++++ .../version-2.6/pages-for-subheaders/best-practices.md | 4 ++++ .../checklist-for-production-ready-clusters.md | 4 ++++ .../version-2.6/pages-for-subheaders/cis-scan-guides.md | 4 ++++ versioned_docs/version-2.6/pages-for-subheaders/cis-scans.md | 4 ++++ .../version-2.6/pages-for-subheaders/cli-with-rancher.md | 4 ++++ .../version-2.6/pages-for-subheaders/cloud-marketplace.md | 4 ++++ .../version-2.6/pages-for-subheaders/cluster-configuration.md | 4 ++++ .../version-2.6/pages-for-subheaders/configuration-options.md | 4 ++++ .../configure-microsoft-ad-federation-service-saml.md | 4 ++++ .../version-2.6/pages-for-subheaders/configure-openldap.md | 4 ++++ .../create-kubernetes-persistent-storage.md | 4 ++++ .../pages-for-subheaders/custom-resource-configuration.md | 4 ++++ .../pages-for-subheaders/deploy-apps-across-clusters.md | 4 ++++ .../pages-for-subheaders/deploy-rancher-manager.md | 4 ++++ .../pages-for-subheaders/deploy-rancher-workloads.md | 4 ++++ .../pages-for-subheaders/downstream-cluster-configuration.md | 4 ++++ .../pages-for-subheaders/enable-experimental-features.md | 4 ++++ .../pages-for-subheaders/gke-cluster-configuration.md | 4 ++++ .../pages-for-subheaders/helm-charts-in-rancher.md | 4 ++++ .../pages-for-subheaders/horizontal-pod-autoscaler.md | 4 ++++ .../version-2.6/pages-for-subheaders/infrastructure-setup.md | 4 ++++ .../pages-for-subheaders/install-cluster-autoscaler.md | 4 ++++ .../install-upgrade-on-a-kubernetes-cluster.md | 4 ++++ .../pages-for-subheaders/installation-and-upgrade.md | 4 ++++ .../pages-for-subheaders/installation-references.md | 4 ++++ .../pages-for-subheaders/installation-requirements.md | 4 ++++ .../version-2.6/pages-for-subheaders/istio-setup-guide.md | 4 ++++ versioned_docs/version-2.6/pages-for-subheaders/istio.md | 4 ++++ .../pages-for-subheaders/kubernetes-cluster-setup.md | 4 ++++ .../kubernetes-clusters-in-rancher-setup.md | 4 ++++ .../version-2.6/pages-for-subheaders/kubernetes-components.md | 4 ++++ .../pages-for-subheaders/kubernetes-resources-setup.md | 4 ++++ .../pages-for-subheaders/launch-kubernetes-with-rancher.md | 4 ++++ .../load-balancer-and-ingress-controller.md | 4 ++++ versioned_docs/version-2.6/pages-for-subheaders/logging.md | 4 ++++ .../version-2.6/pages-for-subheaders/machine-configuration.md | 4 ++++ .../version-2.6/pages-for-subheaders/manage-clusters.md | 4 ++++ .../pages-for-subheaders/manage-project-resource-quotas.md | 4 ++++ .../version-2.6/pages-for-subheaders/manage-projects.md | 4 ++++ .../manage-role-based-access-control-rbac.md | 4 ++++ .../pages-for-subheaders/monitoring-alerting-guides.md | 4 ++++ .../pages-for-subheaders/monitoring-and-alerting.md | 4 ++++ .../monitoring-v2-configuration-guides.md | 4 ++++ .../pages-for-subheaders/monitoring-v2-configuration.md | 4 ++++ .../version-2.6/pages-for-subheaders/new-user-guides.md | 4 ++++ .../pages-for-subheaders/node-template-configuration.md | 4 ++++ versioned_docs/version-2.6/pages-for-subheaders/nutanix.md | 4 ++++ .../pages-for-subheaders/other-installation-methods.md | 4 ++++ .../pages-for-subheaders/prometheus-federator-guides.md | 4 ++++ .../version-2.6/pages-for-subheaders/prometheus-federator.md | 4 ++++ .../pages-for-subheaders/provisioning-storage-examples.md | 4 ++++ .../version-2.6/pages-for-subheaders/quick-start-guides.md | 4 ++++ .../pages-for-subheaders/rancher-behind-an-http-proxy.md | 4 ++++ .../pages-for-subheaders/rancher-managed-clusters.md | 4 ++++ .../pages-for-subheaders/rancher-manager-architecture.md | 4 ++++ .../rancher-on-a-single-node-with-docker.md | 4 ++++ .../version-2.6/pages-for-subheaders/rancher-security.md | 4 ++++ .../pages-for-subheaders/rancher-server-configuration.md | 4 ++++ .../version-2.6/pages-for-subheaders/rancher-server.md | 4 ++++ versioned_docs/version-2.6/pages-for-subheaders/resources.md | 4 ++++ .../version-2.6/pages-for-subheaders/selinux-rpm.md | 4 ++++ .../pages-for-subheaders/set-up-cloud-providers.md | 4 ++++ .../set-up-clusters-from-hosted-kubernetes-providers.md | 4 ++++ .../pages-for-subheaders/single-node-rancher-in-docker.md | 4 ++++ .../version-2.6/pages-for-subheaders/use-existing-nodes.md | 4 ++++ .../use-new-nodes-in-an-infra-provider.md | 4 ++++ .../version-2.6/pages-for-subheaders/use-windows-clusters.md | 4 ++++ .../version-2.6/pages-for-subheaders/user-settings.md | 4 ++++ versioned_docs/version-2.6/pages-for-subheaders/vsphere.md | 4 ++++ .../version-2.6/pages-for-subheaders/workloads-and-pods.md | 4 ++++ .../pages-for-subheaders/about-provisioning-drivers.md | 4 ++++ .../version-2.7/pages-for-subheaders/about-rke1-templates.md | 4 ++++ .../version-2.7/pages-for-subheaders/about-the-api.md | 4 ++++ .../version-2.7/pages-for-subheaders/access-clusters.md | 4 ++++ .../pages-for-subheaders/advanced-configuration.md | 4 ++++ .../version-2.7/pages-for-subheaders/advanced-user-guides.md | 4 ++++ .../pages-for-subheaders/air-gapped-helm-cli-install.md | 4 ++++ .../version-2.7/pages-for-subheaders/authentication-config.md | 4 ++++ .../authentication-permissions-and-global-configuration.md | 4 ++++ .../version-2.7/pages-for-subheaders/aws-cloud-marketplace.md | 4 ++++ .../backup-restore-and-disaster-recovery.md | 4 ++++ .../pages-for-subheaders/backup-restore-configuration.md | 4 ++++ .../version-2.7/pages-for-subheaders/best-practices.md | 4 ++++ .../checklist-for-production-ready-clusters.md | 4 ++++ .../version-2.7/pages-for-subheaders/cis-scan-guides.md | 4 ++++ versioned_docs/version-2.7/pages-for-subheaders/cis-scans.md | 4 ++++ .../version-2.7/pages-for-subheaders/cli-with-rancher.md | 4 ++++ .../version-2.7/pages-for-subheaders/cloud-marketplace.md | 4 ++++ .../version-2.7/pages-for-subheaders/cluster-configuration.md | 4 ++++ .../version-2.7/pages-for-subheaders/configuration-options.md | 4 ++++ .../configure-microsoft-ad-federation-service-saml.md | 4 ++++ .../version-2.7/pages-for-subheaders/configure-openldap.md | 4 ++++ .../create-kubernetes-persistent-storage.md | 4 ++++ .../pages-for-subheaders/custom-resource-configuration.md | 4 ++++ .../pages-for-subheaders/deploy-apps-across-clusters.md | 4 ++++ .../pages-for-subheaders/deploy-rancher-manager.md | 4 ++++ .../pages-for-subheaders/deploy-rancher-workloads.md | 4 ++++ .../pages-for-subheaders/downstream-cluster-configuration.md | 4 ++++ .../pages-for-subheaders/enable-experimental-features.md | 4 ++++ .../pages-for-subheaders/gke-cluster-configuration.md | 4 ++++ .../pages-for-subheaders/helm-charts-in-rancher.md | 4 ++++ .../pages-for-subheaders/horizontal-pod-autoscaler.md | 4 ++++ .../version-2.7/pages-for-subheaders/infrastructure-setup.md | 4 ++++ .../pages-for-subheaders/install-cluster-autoscaler.md | 4 ++++ .../install-upgrade-on-a-kubernetes-cluster.md | 4 ++++ .../pages-for-subheaders/installation-and-upgrade.md | 4 ++++ .../pages-for-subheaders/installation-references.md | 4 ++++ .../pages-for-subheaders/installation-requirements.md | 4 ++++ .../version-2.7/pages-for-subheaders/istio-setup-guide.md | 4 ++++ versioned_docs/version-2.7/pages-for-subheaders/istio.md | 4 ++++ .../version-2.7/pages-for-subheaders/k3s-hardening-guide.md | 4 ++++ .../pages-for-subheaders/kubernetes-cluster-setup.md | 4 ++++ .../kubernetes-clusters-in-rancher-setup.md | 4 ++++ .../version-2.7/pages-for-subheaders/kubernetes-components.md | 4 ++++ .../pages-for-subheaders/kubernetes-resources-setup.md | 4 ++++ .../pages-for-subheaders/launch-kubernetes-with-rancher.md | 4 ++++ .../load-balancer-and-ingress-controller.md | 4 ++++ versioned_docs/version-2.7/pages-for-subheaders/logging.md | 4 ++++ .../version-2.7/pages-for-subheaders/machine-configuration.md | 4 ++++ .../version-2.7/pages-for-subheaders/manage-clusters.md | 4 ++++ .../pages-for-subheaders/manage-project-resource-quotas.md | 4 ++++ .../version-2.7/pages-for-subheaders/manage-projects.md | 4 ++++ .../manage-role-based-access-control-rbac.md | 4 ++++ .../pages-for-subheaders/monitoring-alerting-guides.md | 4 ++++ .../pages-for-subheaders/monitoring-and-alerting.md | 4 ++++ .../monitoring-v2-configuration-guides.md | 4 ++++ .../pages-for-subheaders/monitoring-v2-configuration.md | 4 ++++ .../version-2.7/pages-for-subheaders/new-user-guides.md | 4 ++++ .../pages-for-subheaders/node-template-configuration.md | 4 ++++ versioned_docs/version-2.7/pages-for-subheaders/nutanix.md | 4 ++++ .../pages-for-subheaders/other-installation-methods.md | 4 ++++ .../pages-for-subheaders/prometheus-federator-guides.md | 4 ++++ .../version-2.7/pages-for-subheaders/prometheus-federator.md | 4 ++++ .../pages-for-subheaders/provisioning-storage-examples.md | 4 ++++ .../version-2.7/pages-for-subheaders/quick-start-guides.md | 4 ++++ .../pages-for-subheaders/rancher-behind-an-http-proxy.md | 4 ++++ .../pages-for-subheaders/rancher-hardening-guides.md | 4 ++++ .../pages-for-subheaders/rancher-managed-clusters.md | 4 ++++ .../pages-for-subheaders/rancher-manager-architecture.md | 4 ++++ .../rancher-on-a-single-node-with-docker.md | 4 ++++ .../version-2.7/pages-for-subheaders/rancher-security.md | 4 ++++ .../pages-for-subheaders/rancher-server-configuration.md | 4 ++++ .../version-2.7/pages-for-subheaders/rancher-server.md | 4 ++++ versioned_docs/version-2.7/pages-for-subheaders/resources.md | 4 ++++ .../version-2.7/pages-for-subheaders/rke1-hardening-guide.md | 4 ++++ .../version-2.7/pages-for-subheaders/rke2-hardening-guide.md | 4 ++++ .../version-2.7/pages-for-subheaders/selinux-rpm.md | 4 ++++ .../pages-for-subheaders/set-up-cloud-providers.md | 4 ++++ .../set-up-clusters-from-hosted-kubernetes-providers.md | 4 ++++ .../pages-for-subheaders/single-node-rancher-in-docker.md | 4 ++++ .../version-2.7/pages-for-subheaders/use-existing-nodes.md | 4 ++++ .../use-new-nodes-in-an-infra-provider.md | 4 ++++ .../version-2.7/pages-for-subheaders/use-windows-clusters.md | 4 ++++ .../version-2.7/pages-for-subheaders/user-settings.md | 4 ++++ versioned_docs/version-2.7/pages-for-subheaders/vsphere.md | 4 ++++ .../version-2.7/pages-for-subheaders/workloads-and-pods.md | 4 ++++ 392 files changed, 1568 insertions(+) diff --git a/docs/pages-for-subheaders/about-provisioning-drivers.md b/docs/pages-for-subheaders/about-provisioning-drivers.md index 02ab3b0bc03f..812197b3b3f8 100644 --- a/docs/pages-for-subheaders/about-provisioning-drivers.md +++ b/docs/pages-for-subheaders/about-provisioning-drivers.md @@ -2,6 +2,10 @@ title: Provisioning Drivers --- + + + + Drivers in Rancher allow you to manage which providers can be used to deploy [hosted Kubernetes clusters](set-up-clusters-from-hosted-kubernetes-providers.md) or [nodes in an infrastructure provider](use-new-nodes-in-an-infra-provider.md) to allow Rancher to deploy and manage Kubernetes. ### Rancher Drivers diff --git a/docs/pages-for-subheaders/about-rke1-templates.md b/docs/pages-for-subheaders/about-rke1-templates.md index 44e73fd794aa..601a622a5814 100644 --- a/docs/pages-for-subheaders/about-rke1-templates.md +++ b/docs/pages-for-subheaders/about-rke1-templates.md @@ -2,6 +2,10 @@ title: RKE Templates --- + + + + RKE templates are designed to allow DevOps and security teams to standardize and simplify the creation of Kubernetes clusters. RKE is the [Rancher Kubernetes Engine,](https://rancher.com/docs/rke/latest/en/) which is the tool that Rancher uses to provision Kubernetes clusters. diff --git a/docs/pages-for-subheaders/about-the-api.md b/docs/pages-for-subheaders/about-the-api.md index 4ac4ae8fcf6a..3b39d7c27172 100644 --- a/docs/pages-for-subheaders/about-the-api.md +++ b/docs/pages-for-subheaders/about-the-api.md @@ -2,6 +2,10 @@ title: API --- + + + + ## How to use the API The API has its own user interface accessible from a web browser. This is an easy way to see resources, perform actions, and see the equivalent cURL or HTTP request & response. To access it: diff --git a/docs/pages-for-subheaders/access-clusters.md b/docs/pages-for-subheaders/access-clusters.md index 2dd82ffcb8a7..04edb86702c0 100644 --- a/docs/pages-for-subheaders/access-clusters.md +++ b/docs/pages-for-subheaders/access-clusters.md @@ -2,6 +2,10 @@ title: Cluster Access --- + + + + This section is about what tools can be used to access clusters managed by Rancher. For information on how to give users permission to access a cluster, see the section on [adding users to clusters.](../how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) diff --git a/docs/pages-for-subheaders/advanced-configuration.md b/docs/pages-for-subheaders/advanced-configuration.md index 208abcf1ea87..87efa2a0f9e0 100644 --- a/docs/pages-for-subheaders/advanced-configuration.md +++ b/docs/pages-for-subheaders/advanced-configuration.md @@ -2,6 +2,10 @@ title: Advanced Configuration --- + + + + ### Alertmanager For information on configuring the Alertmanager custom resource, see [this page.](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md) diff --git a/docs/pages-for-subheaders/advanced-user-guides.md b/docs/pages-for-subheaders/advanced-user-guides.md index e4b8c16e1e4f..b5b3d2410372 100644 --- a/docs/pages-for-subheaders/advanced-user-guides.md +++ b/docs/pages-for-subheaders/advanced-user-guides.md @@ -2,6 +2,10 @@ title: Advanced User Guides --- + + + + Advanced user guides are "problem-oriented" docs in which users learn how to answer questions or solve problems. The major difference between these and the new user guides is that these guides are geared toward more experienced or advanced users who have more technical needs from their documentation. These users already have an understanding of Rancher and its functions. They know what they need to accomplish; they just need additional guidance to complete some more complex task they they have encountered while working. It should be noted that neither new user guides nor advanced user guides provide detailed explanations or discussions (these kinds of docs belong elsewhere). How-to guides focus on the action of guiding users through repeatable, effective steps to learn new skills, master some task, or overcome some problem. \ No newline at end of file diff --git a/docs/pages-for-subheaders/air-gapped-helm-cli-install.md b/docs/pages-for-subheaders/air-gapped-helm-cli-install.md index 058b697d93d6..d6fbc09698fd 100644 --- a/docs/pages-for-subheaders/air-gapped-helm-cli-install.md +++ b/docs/pages-for-subheaders/air-gapped-helm-cli-install.md @@ -2,6 +2,10 @@ title: Air-Gapped Helm CLI Install --- + + + + This section is about using the Helm CLI to install the Rancher server in an air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. The installation steps differ depending on whether Rancher is installed on an RKE Kubernetes cluster, a K3s Kubernetes cluster, or a single Docker container. diff --git a/docs/pages-for-subheaders/authentication-config.md b/docs/pages-for-subheaders/authentication-config.md index ac2375cb697a..9bb89f46ee88 100644 --- a/docs/pages-for-subheaders/authentication-config.md +++ b/docs/pages-for-subheaders/authentication-config.md @@ -3,6 +3,10 @@ title: Authentication Config weight: 10 --- + + + + One of the key features that Rancher adds to Kubernetes is centralized user authentication. This feature allows your users to use one set of credentials to authenticate with any of your Kubernetes clusters. This centralized user authentication is accomplished using the Rancher authentication proxy, which is installed along with the rest of Rancher. This proxy authenticates your users and forwards their requests to your Kubernetes clusters using a service account. diff --git a/docs/pages-for-subheaders/authentication-permissions-and-global-configuration.md b/docs/pages-for-subheaders/authentication-permissions-and-global-configuration.md index b32d8068a916..2df94806af94 100644 --- a/docs/pages-for-subheaders/authentication-permissions-and-global-configuration.md +++ b/docs/pages-for-subheaders/authentication-permissions-and-global-configuration.md @@ -2,6 +2,10 @@ title: Authentication, Permissions and Global Configuration --- + + + + After installation, the [system administrator](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) should configure Rancher to configure authentication, authorization, security, default settings, security policies, drivers and global DNS entries. ## First Log In diff --git a/docs/pages-for-subheaders/aws-cloud-marketplace.md b/docs/pages-for-subheaders/aws-cloud-marketplace.md index 0b4c81853429..c487e1d6f928 100644 --- a/docs/pages-for-subheaders/aws-cloud-marketplace.md +++ b/docs/pages-for-subheaders/aws-cloud-marketplace.md @@ -2,6 +2,10 @@ title: AWS Marketplace Integration --- + + + + ## Overview Rancher offers an integration with the AWS Marketplace which allows users to purchase a support contract with SUSE. This integration allows you easily adjust your support needs as you start to support more clusters. diff --git a/docs/pages-for-subheaders/backup-restore-and-disaster-recovery.md b/docs/pages-for-subheaders/backup-restore-and-disaster-recovery.md index ccb69108f66c..19feeb770d0f 100644 --- a/docs/pages-for-subheaders/backup-restore-and-disaster-recovery.md +++ b/docs/pages-for-subheaders/backup-restore-and-disaster-recovery.md @@ -3,6 +3,10 @@ title: Backups and Disaster Recovery keywords: [rancher backup restore, rancher backup and restore, backup restore rancher, rancher backup and restore rancher] --- + + + + In this section, you'll learn how to create backups of Rancher, how to restore Rancher from backup, and how to migrate Rancher to a new Kubernetes cluster. The `rancher-backup` operator is used to backup and restore Rancher on any Kubernetes cluster. This application is a Helm chart, and it can be deployed through the Rancher **Apps** page, or by using the Helm CLI. The `rancher-backup` Helm chart is [here.](https://github.com/rancher/charts/tree/release-v2.6/charts/rancher-backup) diff --git a/docs/pages-for-subheaders/backup-restore-configuration.md b/docs/pages-for-subheaders/backup-restore-configuration.md index c6574b936007..104584f741d5 100644 --- a/docs/pages-for-subheaders/backup-restore-configuration.md +++ b/docs/pages-for-subheaders/backup-restore-configuration.md @@ -2,6 +2,10 @@ title: Rancher Backup Configuration Reference --- + + + + - [Backup configuration](../reference-guides/backup-restore-configuration/backup-configuration.md) - [Restore configuration](../reference-guides/backup-restore-configuration/restore-configuration.md) - [Storage location configuration](../reference-guides/backup-restore-configuration/storage-configuration.md) diff --git a/docs/pages-for-subheaders/best-practices.md b/docs/pages-for-subheaders/best-practices.md index 81f14325f765..7009f6cce709 100644 --- a/docs/pages-for-subheaders/best-practices.md +++ b/docs/pages-for-subheaders/best-practices.md @@ -2,6 +2,10 @@ title: Best Practices Guide --- + + + + The purpose of this section is to consolidate best practices for Rancher implementations. This also includes recommendations for related technologies, such as Kubernetes, Docker, containers, and more. The objective is to improve the outcome of a Rancher implementation using the operational experience of Rancher and its customers. If you have any questions about how these might apply to your use case, please contact your Customer Success Manager or Support. diff --git a/docs/pages-for-subheaders/checklist-for-production-ready-clusters.md b/docs/pages-for-subheaders/checklist-for-production-ready-clusters.md index 64b0c8a37cfb..f5816af3c48b 100644 --- a/docs/pages-for-subheaders/checklist-for-production-ready-clusters.md +++ b/docs/pages-for-subheaders/checklist-for-production-ready-clusters.md @@ -2,6 +2,10 @@ title: Checklist for Production-Ready Clusters --- + + + + In this section, we recommend best practices for creating the production-ready Kubernetes clusters that will run your apps and services. For a list of requirements for your cluster, including the requirements for OS/Docker, hardware, and networking, refer to the section on [node requirements.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) diff --git a/docs/pages-for-subheaders/cis-scan-guides.md b/docs/pages-for-subheaders/cis-scan-guides.md index 6fef05e5b7cc..e76d47504e64 100644 --- a/docs/pages-for-subheaders/cis-scan-guides.md +++ b/docs/pages-for-subheaders/cis-scan-guides.md @@ -2,6 +2,10 @@ title: CIS Scan Guides --- + + + + - [Install rancher-cis-benchmark](../how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md) - [Uninstall rancher-cis-benchmark](../how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md) - [Run a Scan](../how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md) diff --git a/docs/pages-for-subheaders/cis-scans.md b/docs/pages-for-subheaders/cis-scans.md index ea59178857b5..d9c5dbecabb8 100644 --- a/docs/pages-for-subheaders/cis-scans.md +++ b/docs/pages-for-subheaders/cis-scans.md @@ -2,6 +2,10 @@ title: CIS Scans --- + + + + Rancher can run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS Kubernetes Benchmark. The CIS scans can run on any Kubernetes cluster, including hosted Kubernetes providers such as EKS, AKS, and GKE. The `rancher-cis-benchmark` app leverages kube-bench, an open-source tool from Aqua Security, to check clusters for CIS Kubernetes Benchmark compliance. Also, to generate a cluster-wide report, the application utilizes Sonobuoy for report aggregation. diff --git a/docs/pages-for-subheaders/cli-with-rancher.md b/docs/pages-for-subheaders/cli-with-rancher.md index d4c33d7f390f..547d4c50308f 100644 --- a/docs/pages-for-subheaders/cli-with-rancher.md +++ b/docs/pages-for-subheaders/cli-with-rancher.md @@ -2,4 +2,8 @@ title: CLI with Rancher --- + + + + Interact with Rancher using command line interface (CLI) tools from your workstation. The following docs will describe the [Rancher CLI](../reference-guides/cli-with-rancher/rancher-cli.md) and [kubectl Utility](../reference-guides/cli-with-rancher/kubectl-utility.md). \ No newline at end of file diff --git a/docs/pages-for-subheaders/cloud-marketplace.md b/docs/pages-for-subheaders/cloud-marketplace.md index 41d499ef9dae..6f90a2c3ff42 100644 --- a/docs/pages-for-subheaders/cloud-marketplace.md +++ b/docs/pages-for-subheaders/cloud-marketplace.md @@ -2,6 +2,10 @@ title: Cloud Marketplace Integration --- + + + + Rancher offers integration with cloud marketplaces to easily purchase support for installations hosted on certain cloud providers. In addition, this integration also provides the ability to generate a supportconfig bundle which can be provided to rancher support. This integration only supports AWS. diff --git a/docs/pages-for-subheaders/cluster-configuration.md b/docs/pages-for-subheaders/cluster-configuration.md index 50f96d112586..60e02a8cd40b 100644 --- a/docs/pages-for-subheaders/cluster-configuration.md +++ b/docs/pages-for-subheaders/cluster-configuration.md @@ -2,6 +2,10 @@ title: Cluster Configuration --- + + + + After you provision a Kubernetes cluster using Rancher, you can still edit options and settings for the cluster. For information on editing cluster membership, go to [this page.](../how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) diff --git a/docs/pages-for-subheaders/configuration-options.md b/docs/pages-for-subheaders/configuration-options.md index 15a0599ab000..92a375e948db 100644 --- a/docs/pages-for-subheaders/configuration-options.md +++ b/docs/pages-for-subheaders/configuration-options.md @@ -2,6 +2,10 @@ title: Configuration Options --- + + + + ### Egress Support By default the Egress gateway is disabled, but can be enabled on install or upgrade through the values.yaml or via the [overlay file](#overlay-file). diff --git a/docs/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md b/docs/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md index 73c2c651cd6a..8662bf782fb0 100644 --- a/docs/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md +++ b/docs/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md @@ -2,6 +2,10 @@ title: Configuring Microsoft Active Directory Federation Service (SAML) --- + + + + If your organization uses Microsoft Active Directory Federation Services (AD FS) for user authentication, you can configure Rancher to allow your users to log in using their AD FS credentials. ## Prerequisites diff --git a/docs/pages-for-subheaders/configure-openldap.md b/docs/pages-for-subheaders/configure-openldap.md index be2aa86df697..9eb5fc7db2ac 100644 --- a/docs/pages-for-subheaders/configure-openldap.md +++ b/docs/pages-for-subheaders/configure-openldap.md @@ -2,6 +2,10 @@ title: Configuring OpenLDAP --- + + + + If your organization uses LDAP for user authentication, you can configure Rancher to communicate with an OpenLDAP server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the organisation's central user repository, while allowing end-users to authenticate with their LDAP credentials when logging in to the Rancher UI. ## Prerequisites diff --git a/docs/pages-for-subheaders/create-kubernetes-persistent-storage.md b/docs/pages-for-subheaders/create-kubernetes-persistent-storage.md index 8952d8abafa9..cdb775108ca6 100644 --- a/docs/pages-for-subheaders/create-kubernetes-persistent-storage.md +++ b/docs/pages-for-subheaders/create-kubernetes-persistent-storage.md @@ -2,6 +2,10 @@ title: "Kubernetes Persistent Storage: Volumes and Storage Classes" description: "Learn about the two ways with which you can create persistent storage in Kubernetes: persistent volumes and storage classes" --- + + + + When deploying an application that needs to retain data, you'll need to create persistent storage. Persistent storage allows you to store application data external from the pod running your application. This storage practice allows you to maintain application data, even if the application's pod fails. The documents in this section assume that you understand the Kubernetes concepts of persistent volumes, persistent volume claims, and storage classes. For more information, refer to the section on [how storage works.](../how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md) diff --git a/docs/pages-for-subheaders/custom-resource-configuration.md b/docs/pages-for-subheaders/custom-resource-configuration.md index cf43951e5b36..37c2f337d78c 100644 --- a/docs/pages-for-subheaders/custom-resource-configuration.md +++ b/docs/pages-for-subheaders/custom-resource-configuration.md @@ -2,6 +2,10 @@ title: Custom Resource Configuration --- + + + + The following Custom Resource Definitions are used to configure logging: - [Flow and ClusterFlow](../integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows.md) diff --git a/docs/pages-for-subheaders/deploy-apps-across-clusters.md b/docs/pages-for-subheaders/deploy-apps-across-clusters.md index 3215a4f43b4d..8089f7a914dc 100644 --- a/docs/pages-for-subheaders/deploy-apps-across-clusters.md +++ b/docs/pages-for-subheaders/deploy-apps-across-clusters.md @@ -1,6 +1,10 @@ --- title: Deploying Applications across Clusters --- + + + + ### Fleet Rancher v2.5 introduced Fleet, a new way to deploy applications across clusters. diff --git a/docs/pages-for-subheaders/deploy-rancher-manager.md b/docs/pages-for-subheaders/deploy-rancher-manager.md index c91b6f37aa39..74e282f0832c 100644 --- a/docs/pages-for-subheaders/deploy-rancher-manager.md +++ b/docs/pages-for-subheaders/deploy-rancher-manager.md @@ -2,6 +2,10 @@ title: Deploying Rancher Server --- + + + + Use one of the following guides to deploy and provision Rancher and a Kubernetes cluster in the provider of your choice. - [AWS](../getting-started/quick-start-guides/deploy-rancher-manager/aws.md) (uses Terraform) diff --git a/docs/pages-for-subheaders/deploy-rancher-workloads.md b/docs/pages-for-subheaders/deploy-rancher-workloads.md index 3e86165a071c..b2898cd513b7 100644 --- a/docs/pages-for-subheaders/deploy-rancher-workloads.md +++ b/docs/pages-for-subheaders/deploy-rancher-workloads.md @@ -2,6 +2,10 @@ title: Deploying Workloads --- + + + + These guides walk you through the deployment of an application, including how to expose the application for use outside of the cluster. - [Workload with Ingress](../getting-started/quick-start-guides/deploy-workloads/workload-ingress.md) diff --git a/docs/pages-for-subheaders/downstream-cluster-configuration.md b/docs/pages-for-subheaders/downstream-cluster-configuration.md index e9065f1fb088..b9fbad0b9665 100644 --- a/docs/pages-for-subheaders/downstream-cluster-configuration.md +++ b/docs/pages-for-subheaders/downstream-cluster-configuration.md @@ -2,4 +2,8 @@ title: Downstream Cluster Configuration --- + + + + The following docs will discuss [node template configuration](./node-template-configuration.md) and [machine configuration](./machine-configuration.md). \ No newline at end of file diff --git a/docs/pages-for-subheaders/enable-experimental-features.md b/docs/pages-for-subheaders/enable-experimental-features.md index 0494ed5f0fa8..c2fca17aa2f8 100644 --- a/docs/pages-for-subheaders/enable-experimental-features.md +++ b/docs/pages-for-subheaders/enable-experimental-features.md @@ -1,6 +1,10 @@ --- title: Enabling Experimental Features --- + + + + Rancher includes some features that are experimental and disabled by default. You might want to enable these features, for example, if you decide that the benefits of using an [unsupported storage type](../how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md) outweighs the risk of using an untested feature. Feature flags were introduced to allow you to try these features that are not enabled by default. The features can be enabled in three ways: diff --git a/docs/pages-for-subheaders/gke-cluster-configuration.md b/docs/pages-for-subheaders/gke-cluster-configuration.md index 0d7e0804c479..de9a1638a9c5 100644 --- a/docs/pages-for-subheaders/gke-cluster-configuration.md +++ b/docs/pages-for-subheaders/gke-cluster-configuration.md @@ -2,6 +2,10 @@ title: GKE Cluster Configuration Reference --- + + + + ## Changes in Rancher v2.6 - Support for additional configuration options: diff --git a/docs/pages-for-subheaders/helm-charts-in-rancher.md b/docs/pages-for-subheaders/helm-charts-in-rancher.md index d9a926566423..f0d7ba63fcae 100644 --- a/docs/pages-for-subheaders/helm-charts-in-rancher.md +++ b/docs/pages-for-subheaders/helm-charts-in-rancher.md @@ -2,6 +2,10 @@ title: Helm Charts in Rancher --- + + + + In this section, you'll learn how to manage Helm chart repositories and applications in Rancher. Helm chart repositories are managed using **Apps**. It uses a catalog-like system to import bundles of charts from repositories and then uses those charts to either deploy custom Helm applications or Rancher's tools such as Monitoring or Istio. Rancher tools come as pre-loaded repositories which deploy as standalone Helm charts. Any additional repositories are only added to the current cluster. ### Versioning Scheme diff --git a/docs/pages-for-subheaders/horizontal-pod-autoscaler.md b/docs/pages-for-subheaders/horizontal-pod-autoscaler.md index 64d553553154..d18ce147d060 100644 --- a/docs/pages-for-subheaders/horizontal-pod-autoscaler.md +++ b/docs/pages-for-subheaders/horizontal-pod-autoscaler.md @@ -3,6 +3,10 @@ title: The Horizontal Pod Autoscaler description: Learn about the horizontal pod autoscaler (HPA). How to manage HPAs and how to test them with a service deployment --- + + + + The [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (HPA) is a Kubernetes feature that allows you to configure your cluster to automatically scale the services it's running up or down. Rancher provides some additional features to help manage HPAs, depending on the version of Rancher. diff --git a/docs/pages-for-subheaders/infrastructure-setup.md b/docs/pages-for-subheaders/infrastructure-setup.md index f7b834992aae..fabdc72e9751 100644 --- a/docs/pages-for-subheaders/infrastructure-setup.md +++ b/docs/pages-for-subheaders/infrastructure-setup.md @@ -2,6 +2,10 @@ title: Don't have infrastructure for your Kubernetes cluster? Try one of these tutorials. --- + + + + To set up infrastructure for a high-availability K3s Kubernetes cluster with an external DB, refer to [this page.](../how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md) diff --git a/docs/pages-for-subheaders/install-cluster-autoscaler.md b/docs/pages-for-subheaders/install-cluster-autoscaler.md index 740348651316..8b749aae5ee0 100644 --- a/docs/pages-for-subheaders/install-cluster-autoscaler.md +++ b/docs/pages-for-subheaders/install-cluster-autoscaler.md @@ -2,6 +2,10 @@ title: Cluster Autoscaler --- + + + + In this section, you'll learn how to install and use the [Kubernetes cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/) on Rancher custom clusters using AWS EC2 Auto Scaling Groups. The cluster autoscaler is a tool that automatically adjusts the size of the Kubernetes cluster when one of the following conditions is true: diff --git a/docs/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md b/docs/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md index 5708b68b23c0..4eda613d25f2 100644 --- a/docs/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md +++ b/docs/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md @@ -3,6 +3,10 @@ title: Install/Upgrade Rancher on a Kubernetes Cluster description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation --- + + + + In this section, you'll learn how to deploy Rancher on a Kubernetes cluster using the Helm CLI. diff --git a/docs/pages-for-subheaders/installation-and-upgrade.md b/docs/pages-for-subheaders/installation-and-upgrade.md index 032c76e24d52..3077b14edc28 100644 --- a/docs/pages-for-subheaders/installation-and-upgrade.md +++ b/docs/pages-for-subheaders/installation-and-upgrade.md @@ -3,6 +3,10 @@ title: Installing/Upgrading Rancher description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation --- + + + + This section provides an overview of the architecture options of installing Rancher, describing advantages of each option. ## Terminology diff --git a/docs/pages-for-subheaders/installation-references.md b/docs/pages-for-subheaders/installation-references.md index e2048279af00..6108728b04fa 100644 --- a/docs/pages-for-subheaders/installation-references.md +++ b/docs/pages-for-subheaders/installation-references.md @@ -2,4 +2,8 @@ title: Installation References --- + + + + Please see the following reference guides for other installation resources: [Rancher Helm chart options](../getting-started/installation-and-upgrade/installation-references/helm-chart-options.md), [TLS settings](../getting-started/installation-and-upgrade/installation-references/tls-settings.md), and [feature flags](../getting-started/installation-and-upgrade/installation-references/feature-flags.md). \ No newline at end of file diff --git a/docs/pages-for-subheaders/installation-requirements.md b/docs/pages-for-subheaders/installation-requirements.md index 8431f164d4b3..b7214336b133 100644 --- a/docs/pages-for-subheaders/installation-requirements.md +++ b/docs/pages-for-subheaders/installation-requirements.md @@ -3,6 +3,10 @@ title: Installation Requirements description: Learn the node requirements for each node running Rancher server when you’re configuring Rancher to run either in a Docker or Kubernetes setup --- + + + + This page describes the software, hardware, and networking requirements for the nodes where the Rancher server will be installed. The Rancher server can be installed on a single node or a high-availability Kubernetes cluster. :::note Important: diff --git a/docs/pages-for-subheaders/istio-setup-guide.md b/docs/pages-for-subheaders/istio-setup-guide.md index 2c6ba0c41189..24475f7ffea7 100644 --- a/docs/pages-for-subheaders/istio-setup-guide.md +++ b/docs/pages-for-subheaders/istio-setup-guide.md @@ -2,6 +2,10 @@ title: Setup Guide --- + + + + This section describes how to enable Istio and start using it in your projects. If you use Istio for traffic management, you will need to allow external traffic to the cluster. In that case, you will need to follow all of the steps below. diff --git a/docs/pages-for-subheaders/istio.md b/docs/pages-for-subheaders/istio.md index 8b00fd018cb9..93f1e9b7cc50 100644 --- a/docs/pages-for-subheaders/istio.md +++ b/docs/pages-for-subheaders/istio.md @@ -2,6 +2,10 @@ title: Istio --- + + + + [Istio](https://istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, secure, control, and troubleshoot the traffic within a complex network of microservices. As a network of microservices changes and grows, the interactions between them can become increasingly difficult to manage and understand. In such a situation, it is useful to have a service mesh as a separate infrastructure layer. Istio's service mesh lets you manipulate traffic between microservices without changing the microservices directly. diff --git a/docs/pages-for-subheaders/k3s-hardening-guide.md b/docs/pages-for-subheaders/k3s-hardening-guide.md index fc356dfda175..1fdcce7964bf 100644 --- a/docs/pages-for-subheaders/k3s-hardening-guide.md +++ b/docs/pages-for-subheaders/k3s-hardening-guide.md @@ -2,6 +2,10 @@ title: K3s Hardening Guide --- + + + + This document provides prescriptive guidance for how to harden a K3s cluster intended for production, before provisioning it with Rancher. It outlines the configurations and controls required for Center for Information Security (CIS) Kubernetes benchmark controls. :::note diff --git a/docs/pages-for-subheaders/kubernetes-cluster-setup.md b/docs/pages-for-subheaders/kubernetes-cluster-setup.md index 8f9a88e0da3d..9291d55bee45 100644 --- a/docs/pages-for-subheaders/kubernetes-cluster-setup.md +++ b/docs/pages-for-subheaders/kubernetes-cluster-setup.md @@ -2,6 +2,10 @@ title: "Don't have a Kubernetes cluster? Try one of these tutorials." --- + + + + This section contains information on how to install a Kubernetes cluster that the Rancher server can be installed on. Rancher can run on any Kubernetes cluster. diff --git a/docs/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md b/docs/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md index b7f2f0a3bb0b..899d4bb59375 100644 --- a/docs/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md +++ b/docs/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md @@ -3,6 +3,10 @@ title: Setting up Kubernetes Clusters in Rancher description: Provisioning Kubernetes Clusters --- + + + + Rancher simplifies the creation of clusters by allowing you to create them through the Rancher UI rather than more complex alternatives. Rancher provides multiple options for launching a cluster. Use the option that best fits your use case. This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts](../reference-guides/kubernetes-concepts.md) page. diff --git a/docs/pages-for-subheaders/kubernetes-components.md b/docs/pages-for-subheaders/kubernetes-components.md index 5e482f2a1e85..f048b5ba19bf 100644 --- a/docs/pages-for-subheaders/kubernetes-components.md +++ b/docs/pages-for-subheaders/kubernetes-components.md @@ -2,6 +2,10 @@ title: Kubernetes Components --- + + + + The commands and steps listed in this section apply to the core Kubernetes components on [Rancher Launched Kubernetes](../pages-for-subheaders/launch-kubernetes-with-rancher.md) clusters. This section includes troubleshooting tips in the following categories: diff --git a/docs/pages-for-subheaders/kubernetes-resources-setup.md b/docs/pages-for-subheaders/kubernetes-resources-setup.md index ec485c8f80e9..865f5ae5c462 100644 --- a/docs/pages-for-subheaders/kubernetes-resources-setup.md +++ b/docs/pages-for-subheaders/kubernetes-resources-setup.md @@ -2,6 +2,10 @@ title: Kubernetes Resources --- + + + + You can view and manipulate all of the custom resources and CRDs in a Kubernetes cluster from the Rancher UI. ## Workloads diff --git a/docs/pages-for-subheaders/launch-kubernetes-with-rancher.md b/docs/pages-for-subheaders/launch-kubernetes-with-rancher.md index fba8d3aeb668..5b7f4363bab6 100644 --- a/docs/pages-for-subheaders/launch-kubernetes-with-rancher.md +++ b/docs/pages-for-subheaders/launch-kubernetes-with-rancher.md @@ -2,6 +2,10 @@ title: Launching Kubernetes with Rancher --- + + + + You can have Rancher launch a Kubernetes cluster using any nodes you want. When Rancher deploys Kubernetes onto these nodes, you can choose between [Rancher Kubernetes Engine](https://rancher.com/docs/rke/latest/en/) (RKE) or [RKE2](https://docs.rke2.io) distributions. Rancher can launch Kubernetes on any computers, including: - Bare-metal servers diff --git a/docs/pages-for-subheaders/load-balancer-and-ingress-controller.md b/docs/pages-for-subheaders/load-balancer-and-ingress-controller.md index ea5d34fed342..41bdf40a3234 100644 --- a/docs/pages-for-subheaders/load-balancer-and-ingress-controller.md +++ b/docs/pages-for-subheaders/load-balancer-and-ingress-controller.md @@ -3,6 +3,10 @@ title: Set Up Load Balancer and Ingress Controller within Rancher description: Learn how you can set up load balancers and ingress controllers to redirect service requests within Rancher, and learn about the limitations of load balancers --- + + + + Within Rancher, you can set up load balancers and ingress controllers to redirect service requests. ## Load Balancers diff --git a/docs/pages-for-subheaders/logging.md b/docs/pages-for-subheaders/logging.md index a7698cbf32cc..427422627f31 100644 --- a/docs/pages-for-subheaders/logging.md +++ b/docs/pages-for-subheaders/logging.md @@ -3,6 +3,10 @@ title: Rancher Integration with Logging Services description: Rancher integrates with popular logging services. Learn the requirements and benefits of integrating with logging services, and enable logging on your cluster. --- + + + + The [Logging operator](https://kube-logging.github.io/docs/) now powers Rancher's logging solution in place of the former, in-house solution. ## Enabling Logging diff --git a/docs/pages-for-subheaders/machine-configuration.md b/docs/pages-for-subheaders/machine-configuration.md index 18ec97e5f193..e1b9bb72f0a4 100644 --- a/docs/pages-for-subheaders/machine-configuration.md +++ b/docs/pages-for-subheaders/machine-configuration.md @@ -2,4 +2,8 @@ title: Machine Configuration --- + + + + Machine configuration is the arrangement of resources assigned to a virtual machine. Please see the docs for [Amazon EC2](../reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/amazon-ec2.md), [DigitalOcean](../reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/digitalocean.md), and [Azure](../reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/azure.md) to learn more. \ No newline at end of file diff --git a/docs/pages-for-subheaders/manage-clusters.md b/docs/pages-for-subheaders/manage-clusters.md index c3f8c1fd6ff3..e6f698853388 100644 --- a/docs/pages-for-subheaders/manage-clusters.md +++ b/docs/pages-for-subheaders/manage-clusters.md @@ -2,6 +2,10 @@ title: Cluster Administration --- + + + + After you provision a cluster in Rancher, you can begin using powerful Kubernetes features to deploy and scale your containerized applications in development, testing, or production environments. :::note diff --git a/docs/pages-for-subheaders/manage-project-resource-quotas.md b/docs/pages-for-subheaders/manage-project-resource-quotas.md index f6023c0f6089..fa6f10ec84c8 100644 --- a/docs/pages-for-subheaders/manage-project-resource-quotas.md +++ b/docs/pages-for-subheaders/manage-project-resource-quotas.md @@ -2,6 +2,10 @@ title: Project Resource Quotas --- + + + + In situations where several teams share a cluster, one team may overconsume the resources available: CPU, memory, storage, services, Kubernetes objects like pods or secrets, and so on. To prevent this overconsumption, you can apply a _resource quota_, which is a Rancher feature that limits the resources available to a project or namespace. This page is a how-to guide for creating resource quotas in existing projects. diff --git a/docs/pages-for-subheaders/manage-projects.md b/docs/pages-for-subheaders/manage-projects.md index 2d40492ba3ca..be308c7e3423 100644 --- a/docs/pages-for-subheaders/manage-projects.md +++ b/docs/pages-for-subheaders/manage-projects.md @@ -2,6 +2,10 @@ title: Project Administration --- + + + + _Projects_ are objects introduced in Rancher that help organize namespaces in your Kubernetes cluster. You can use projects to create multi-tenant clusters, which allows a group of users to share the same underlying resources without interacting with each other's applications. In terms of hierarchy: diff --git a/docs/pages-for-subheaders/manage-role-based-access-control-rbac.md b/docs/pages-for-subheaders/manage-role-based-access-control-rbac.md index b2674b753b9e..c6c87eff5fc9 100644 --- a/docs/pages-for-subheaders/manage-role-based-access-control-rbac.md +++ b/docs/pages-for-subheaders/manage-role-based-access-control-rbac.md @@ -2,6 +2,10 @@ title: Role-Based Access Control (RBAC) --- + + + + Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. As mentioned in [Authentication](authentication-config.md), users can either be local or external. After you configure external authentication, the users that display on the **Users** page changes. diff --git a/docs/pages-for-subheaders/monitoring-alerting-guides.md b/docs/pages-for-subheaders/monitoring-alerting-guides.md index 720151ea38c7..97e3e801b263 100644 --- a/docs/pages-for-subheaders/monitoring-alerting-guides.md +++ b/docs/pages-for-subheaders/monitoring-alerting-guides.md @@ -2,6 +2,10 @@ title: Monitoring Guides --- + + + + - [Enable monitoring](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md) - [Uninstall monitoring](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md) - [Monitoring workloads](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md) diff --git a/docs/pages-for-subheaders/monitoring-and-alerting.md b/docs/pages-for-subheaders/monitoring-and-alerting.md index a398b725ede8..773796fc0959 100644 --- a/docs/pages-for-subheaders/monitoring-and-alerting.md +++ b/docs/pages-for-subheaders/monitoring-and-alerting.md @@ -3,6 +3,10 @@ title: Monitoring and Alerting description: Prometheus lets you view metrics from your different Rancher and Kubernetes objects. Learn about the scope of monitoring and how to enable cluster monitoring --- + + + + Using the `rancher-monitoring` application, you can quickly deploy leading open-source monitoring and alerting solutions onto your cluster. diff --git a/docs/pages-for-subheaders/monitoring-v2-configuration-guides.md b/docs/pages-for-subheaders/monitoring-v2-configuration-guides.md index de3a78168233..bd0de341f465 100644 --- a/docs/pages-for-subheaders/monitoring-v2-configuration-guides.md +++ b/docs/pages-for-subheaders/monitoring-v2-configuration-guides.md @@ -2,6 +2,10 @@ title: Configuration --- + + + + This page captures some of the most important options for configuring Monitoring V2 in the Rancher UI. For information on configuring custom scrape targets and rules for Prometheus, please refer to the upstream documentation for the [Prometheus Operator.](https://github.com/prometheus-operator/prometheus-operator) Some of the most important custom resources are explained in the Prometheus Operator [design documentation.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/design.md) The Prometheus Operator documentation can help also you set up RBAC, Thanos, or custom configuration. diff --git a/docs/pages-for-subheaders/monitoring-v2-configuration.md b/docs/pages-for-subheaders/monitoring-v2-configuration.md index abf493b4487a..79f97d9513d8 100644 --- a/docs/pages-for-subheaders/monitoring-v2-configuration.md +++ b/docs/pages-for-subheaders/monitoring-v2-configuration.md @@ -2,6 +2,10 @@ title: Monitoring V2 Configuration --- + + + + The following sections will explain important options essential to configuring Monitoring V2 in Rancher: - [Receiver Configuration](../reference-guides/monitoring-v2-configuration/receivers.md) diff --git a/docs/pages-for-subheaders/new-user-guides.md b/docs/pages-for-subheaders/new-user-guides.md index e2e667395c3b..e2e68ff0089a 100644 --- a/docs/pages-for-subheaders/new-user-guides.md +++ b/docs/pages-for-subheaders/new-user-guides.md @@ -2,6 +2,10 @@ title: New User Guides --- + + + + New user guides, also known as **tutorials**, describe practical steps for users to follow in order to complete some concrete action. These docs are known as "learning-oriented" docs in which users learn by "doing". The new user guides are designed to guide beginners, or the everyday users of Rancher, through a series of steps to learn how to do something. The goal is that the user will be able to learn how to complete tasks by using easy-to-follow, meaningful, and repeatable directions. These guides will assist users to do work to then get the promised results immediately. diff --git a/docs/pages-for-subheaders/node-template-configuration.md b/docs/pages-for-subheaders/node-template-configuration.md index 0fff7eb9835b..e6c22d5e8526 100644 --- a/docs/pages-for-subheaders/node-template-configuration.md +++ b/docs/pages-for-subheaders/node-template-configuration.md @@ -2,4 +2,8 @@ title: Node Template Configuration --- + + + + To learn about node template config, refer to [EC2 Node Template Configuration](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md), [DigitalOcean Node Template Configuration](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md), [Azure Node Template Configuration](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md), [vSphere Node Template Configuration](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md), and [Nutanix Node Template Configuration](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/nutanix.md). diff --git a/docs/pages-for-subheaders/nutanix.md b/docs/pages-for-subheaders/nutanix.md index 706ecbae214b..b626cf2bd12b 100644 --- a/docs/pages-for-subheaders/nutanix.md +++ b/docs/pages-for-subheaders/nutanix.md @@ -3,6 +3,10 @@ title: Creating a Nutanix AOS Cluster description: Use Rancher to create a Nutanix AOS (AHV) cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. --- + + + + [Nutanix Acropolis Operating System](https://www.nutanix.com/products/acropolis) (Nutanix AOS) is an operating system for the Nutanix hyper-converged infrastructure platform. AOS comes with a built-in hypervisor called [Acropolis Hypervisor](https://www.nutanix.com/products/ahv), or AHV. By using Rancher with Nutanix AOS (AHV), you can bring cloud operations on-premises. Rancher can provision nodes in AOS (AHV) and install Kubernetes on them. When creating a Kubernetes cluster in AOS, Rancher first provisions the specified number of virtual machines by communicating with the Prism Central API. Then it installs Kubernetes on top of the VMs. diff --git a/docs/pages-for-subheaders/other-installation-methods.md b/docs/pages-for-subheaders/other-installation-methods.md index effd2424c191..7cd497a8d489 100644 --- a/docs/pages-for-subheaders/other-installation-methods.md +++ b/docs/pages-for-subheaders/other-installation-methods.md @@ -2,6 +2,10 @@ title: Other Installation Methods --- + + + + ### Air Gapped Installations Follow [these steps](air-gapped-helm-cli-install.md) to install the Rancher server in an air gapped environment. diff --git a/docs/pages-for-subheaders/prometheus-federator-guides.md b/docs/pages-for-subheaders/prometheus-federator-guides.md index 0f82f71e1ada..2d1c0ae82246 100644 --- a/docs/pages-for-subheaders/prometheus-federator-guides.md +++ b/docs/pages-for-subheaders/prometheus-federator-guides.md @@ -2,6 +2,10 @@ title: Prometheus Federator Guides --- + + + + - [Enable Prometheus Operator](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md) - [Uninstall Prometheus Operator](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/uninstall-prometheus-federator.md) - [Customize Grafana Dashboards](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/customize-grafana-dashboards.md) diff --git a/docs/pages-for-subheaders/prometheus-federator.md b/docs/pages-for-subheaders/prometheus-federator.md index 918d0b0abea9..efef5f5abae8 100644 --- a/docs/pages-for-subheaders/prometheus-federator.md +++ b/docs/pages-for-subheaders/prometheus-federator.md @@ -2,6 +2,10 @@ title: Prometheus Federator --- + + + + Prometheus Federator, also referred to as Project Monitoring v2, deploys a Helm Project Operator (based on the [rancher/helm-project-operator](https://github.com/rancher/helm-project-operator)), an operator that manages deploying Helm charts each containing a Project Monitoring Stack, where each stack contains: - [Prometheus](https://prometheus.io/) (managed externally by [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)) diff --git a/docs/pages-for-subheaders/provisioning-storage-examples.md b/docs/pages-for-subheaders/provisioning-storage-examples.md index 7dcd3960ae51..44fd9593fba5 100644 --- a/docs/pages-for-subheaders/provisioning-storage-examples.md +++ b/docs/pages-for-subheaders/provisioning-storage-examples.md @@ -2,6 +2,10 @@ title: Provisioning Storage Examples --- + + + + Rancher supports persistent storage with a variety of volume plugins. However, before you use any of these plugins to bind persistent storage to your workloads, you have to configure the storage itself, whether its a cloud-based solution from a service-provider or an on-prem solution that you manage yourself. For your convenience, Rancher offers documentation on how to configure some of the popular storage methods: diff --git a/docs/pages-for-subheaders/quick-start-guides.md b/docs/pages-for-subheaders/quick-start-guides.md index c0010d438507..8a7f8028dfbc 100644 --- a/docs/pages-for-subheaders/quick-start-guides.md +++ b/docs/pages-for-subheaders/quick-start-guides.md @@ -1,6 +1,10 @@ --- title: Rancher Deployment Quick Start Guides --- + + + + :::caution The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](installation-and-upgrade.md). diff --git a/docs/pages-for-subheaders/rancher-behind-an-http-proxy.md b/docs/pages-for-subheaders/rancher-behind-an-http-proxy.md index 59a7eec2dba7..fd8a41b8e086 100644 --- a/docs/pages-for-subheaders/rancher-behind-an-http-proxy.md +++ b/docs/pages-for-subheaders/rancher-behind-an-http-proxy.md @@ -2,6 +2,10 @@ title: Installing Rancher behind an HTTP Proxy --- + + + + In a lot of enterprise environments, servers or VMs running on premise do not have direct Internet access, but must connect to external services through a HTTP(S) proxy for security reasons. This tutorial shows step by step how to set up a highly available Rancher installation in such an environment. Alternatively, it is also possible to set up Rancher completely air-gapped without any Internet access. This process is described in detail in the [Rancher docs](air-gapped-helm-cli-install.md). diff --git a/docs/pages-for-subheaders/rancher-hardening-guides.md b/docs/pages-for-subheaders/rancher-hardening-guides.md index 48386cdd8712..0648de359b50 100644 --- a/docs/pages-for-subheaders/rancher-hardening-guides.md +++ b/docs/pages-for-subheaders/rancher-hardening-guides.md @@ -2,6 +2,10 @@ title: Self-Assessment and Hardening Guides for Rancher --- + + + + Rancher provides specific security hardening guides for each supported Rancher version's Kubernetes distributions. ## Rancher Kubernetes Distributions diff --git a/docs/pages-for-subheaders/rancher-managed-clusters.md b/docs/pages-for-subheaders/rancher-managed-clusters.md index 2fc25c09150c..2cdb03fd9094 100644 --- a/docs/pages-for-subheaders/rancher-managed-clusters.md +++ b/docs/pages-for-subheaders/rancher-managed-clusters.md @@ -2,6 +2,10 @@ title: Best Practices for Rancher Managed Clusters --- + + + + ### Logging Refer to [this guide](../reference-guides/best-practices/rancher-managed-clusters/logging-best-practices.md) for our recommendations for cluster-level logging and application logging. diff --git a/docs/pages-for-subheaders/rancher-manager-architecture.md b/docs/pages-for-subheaders/rancher-manager-architecture.md index 03ec604e3f66..d7e76f285739 100644 --- a/docs/pages-for-subheaders/rancher-manager-architecture.md +++ b/docs/pages-for-subheaders/rancher-manager-architecture.md @@ -2,6 +2,10 @@ title: Architecture --- + + + + This section focuses on the [Rancher server and its components](../reference-guides/rancher-manager-architecture/rancher-server-and-components.md) and how [Rancher communicates with downstream Kubernetes clusters](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md). For information on the different ways that Rancher can be installed, refer to the [overview of installation options.](installation-and-upgrade.md#overview-of-installation-options) diff --git a/docs/pages-for-subheaders/rancher-on-a-single-node-with-docker.md b/docs/pages-for-subheaders/rancher-on-a-single-node-with-docker.md index 339dfc856a0a..da5b39209cfe 100644 --- a/docs/pages-for-subheaders/rancher-on-a-single-node-with-docker.md +++ b/docs/pages-for-subheaders/rancher-on-a-single-node-with-docker.md @@ -3,6 +3,10 @@ title: Installing Rancher on a Single Node Using Docker description: For development and testing environments only, use a Docker install. Install Docker on a single Linux host, and deploy Rancher with a single Docker container. --- + + + + Rancher can be installed by running a single Docker container. In this installation scenario, you'll install Docker on a single Linux host, and then deploy Rancher on your host using a single Docker container. diff --git a/docs/pages-for-subheaders/rancher-security.md b/docs/pages-for-subheaders/rancher-security.md index 4ad6516f91ad..5a30b09c77be 100644 --- a/docs/pages-for-subheaders/rancher-security.md +++ b/docs/pages-for-subheaders/rancher-security.md @@ -2,6 +2,10 @@ title: Security --- + + + +
diff --git a/docs/pages-for-subheaders/rancher-server-configuration.md b/docs/pages-for-subheaders/rancher-server-configuration.md index 0892fb94db35..5e18f69e7406 100644 --- a/docs/pages-for-subheaders/rancher-server-configuration.md +++ b/docs/pages-for-subheaders/rancher-server-configuration.md @@ -2,6 +2,10 @@ title: Rancher Server Configuration --- + + + + - [RKE1 Cluster Configuration](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) - [RKE2 Cluster Configuration](../reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md) - [K3s Cluster Configuration](../reference-guides/cluster-configuration/rancher-server-configuration/k3s-cluster-configuration.md) diff --git a/docs/pages-for-subheaders/rancher-server.md b/docs/pages-for-subheaders/rancher-server.md index c8984d9c447b..45c3917cd588 100644 --- a/docs/pages-for-subheaders/rancher-server.md +++ b/docs/pages-for-subheaders/rancher-server.md @@ -2,6 +2,10 @@ title: Best Practices for the Rancher Server --- + + + + This guide contains our recommendations for running the Rancher server, and is intended to be used in situations in which Rancher manages downstream Kubernetes clusters. ### Recommended Architecture and Infrastructure diff --git a/docs/pages-for-subheaders/resources.md b/docs/pages-for-subheaders/resources.md index 57f6ee1e7b62..52e61353441b 100644 --- a/docs/pages-for-subheaders/resources.md +++ b/docs/pages-for-subheaders/resources.md @@ -2,6 +2,10 @@ title: Resources --- + + + + ### Docker Installations The [single-node Docker installation](rancher-on-a-single-node-with-docker.md) is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster using Helm, you install the Rancher server component on a single node using a `docker run` command. diff --git a/docs/pages-for-subheaders/rke1-hardening-guide.md b/docs/pages-for-subheaders/rke1-hardening-guide.md index 6a187df32738..5c5fad27a162 100644 --- a/docs/pages-for-subheaders/rke1-hardening-guide.md +++ b/docs/pages-for-subheaders/rke1-hardening-guide.md @@ -2,6 +2,10 @@ title: RKE Hardening Guide --- + + + + This document provides prescriptive guidance for how to harden an RKE cluster intended for production, before provisioning it with Rancher. It outlines the configurations and controls required for Center for Information Security (CIS) Kubernetes benchmark controls. :::note diff --git a/docs/pages-for-subheaders/rke2-hardening-guide.md b/docs/pages-for-subheaders/rke2-hardening-guide.md index 962462c7f3fd..5f3c8c7697f4 100644 --- a/docs/pages-for-subheaders/rke2-hardening-guide.md +++ b/docs/pages-for-subheaders/rke2-hardening-guide.md @@ -2,6 +2,10 @@ title: RKE2 Hardening Guide --- + + + + This document provides prescriptive guidance for how to harden an RKE2 cluster intended for production, before provisioning it with Rancher. It outlines the configurations and controls required for Center for Information Security (CIS) Kubernetes benchmark controls. :::note diff --git a/docs/pages-for-subheaders/selinux-rpm.md b/docs/pages-for-subheaders/selinux-rpm.md index 0b06703cf32d..c72c72ff56a7 100644 --- a/docs/pages-for-subheaders/selinux-rpm.md +++ b/docs/pages-for-subheaders/selinux-rpm.md @@ -2,6 +2,10 @@ title: SELinux RPM --- + + + + [Security-Enhanced Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux) is a security enhancement to Linux. Developed by Red Hat, it is an implementation of mandatory access controls (MAC) on Linux. Mandatory access controls allow an administrator of a system to define how applications and users can access different resources such as files, devices, networks and inter-process communication. SELinux also enhances security by making an OS restrictive by default. diff --git a/docs/pages-for-subheaders/set-up-cloud-providers.md b/docs/pages-for-subheaders/set-up-cloud-providers.md index 4bf3356c2671..9a02515dba97 100644 --- a/docs/pages-for-subheaders/set-up-cloud-providers.md +++ b/docs/pages-for-subheaders/set-up-cloud-providers.md @@ -2,6 +2,10 @@ title: Setting up Cloud Providers --- + + + + A _cloud provider_ is a module in Kubernetes that provides an interface for managing nodes, load balancers, and networking routes. When a cloud provider is set up in Rancher, the Rancher server can automatically provision new nodes, load balancers or persistent storage devices when launching Kubernetes definitions, if the cloud provider you're using supports such automation. diff --git a/docs/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md b/docs/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md index 001533601639..fd4f4ed5bda7 100644 --- a/docs/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md +++ b/docs/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md @@ -2,6 +2,10 @@ title: Setting up Clusters from Hosted Kubernetes Providers --- + + + + In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. diff --git a/docs/pages-for-subheaders/single-node-rancher-in-docker.md b/docs/pages-for-subheaders/single-node-rancher-in-docker.md index 61cd166a90c2..91072d2b3b44 100644 --- a/docs/pages-for-subheaders/single-node-rancher-in-docker.md +++ b/docs/pages-for-subheaders/single-node-rancher-in-docker.md @@ -2,4 +2,8 @@ title: Single Node Rancher in Docker --- + + + + The following docs will discuss [HTTP proxy configuration](../reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md) and [advanced options](../reference-guides/single-node-rancher-in-docker/advanced-options.md) for Docker installs. \ No newline at end of file diff --git a/docs/pages-for-subheaders/use-existing-nodes.md b/docs/pages-for-subheaders/use-existing-nodes.md index d0c301bdc32e..2aeb05bc488e 100644 --- a/docs/pages-for-subheaders/use-existing-nodes.md +++ b/docs/pages-for-subheaders/use-existing-nodes.md @@ -3,6 +3,10 @@ title: Launching Kubernetes on Existing Custom Nodes description: To create a cluster with custom nodes, you’ll need to access servers in your cluster and provision them according to Rancher requirements --- + + + + When you create a custom cluster, Rancher uses RKE (the Rancher Kubernetes Engine) to create a Kubernetes cluster in on-prem bare-metal servers, on-prem virtual machines, or in any node hosted by an infrastructure provider. To use this option you'll need access to servers you intend to use in your Kubernetes cluster. Provision each server according to the [requirements](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md), which includes some hardware specifications and Docker. After you install Docker on each server, you willl also run the command provided in the Rancher UI on each server to turn each one into a Kubernetes node. diff --git a/docs/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md b/docs/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md index fb11e6f46c7c..d875e2325c2f 100644 --- a/docs/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md +++ b/docs/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md @@ -2,6 +2,10 @@ title: Launching Kubernetes on New Nodes in an Infrastructure Provider --- + + + + When you create an RKE or RKE2 cluster using a node template in Rancher, each resulting node pool is shown in a new **Machine Pools** tab. You can see the machine pools by doing the following: 1. Click **☰ > Cluster Management**. diff --git a/docs/pages-for-subheaders/use-windows-clusters.md b/docs/pages-for-subheaders/use-windows-clusters.md index d60a24435b17..36fe47c422f9 100644 --- a/docs/pages-for-subheaders/use-windows-clusters.md +++ b/docs/pages-for-subheaders/use-windows-clusters.md @@ -2,6 +2,10 @@ title: Launching Kubernetes on Windows Clusters --- + + + + When provisioning a [custom cluster](use-existing-nodes.md) using Rancher, Rancher uses RKE (the Rancher Kubernetes Engine) to install Kubernetes on your existing nodes. In a Windows cluster provisioned with Rancher, the cluster must contain both Linux and Windows nodes. The Kubernetes controlplane can only run on Linux nodes, and the Windows nodes can only have the worker role. Windows nodes can only be used for deploying workloads. diff --git a/docs/pages-for-subheaders/user-settings.md b/docs/pages-for-subheaders/user-settings.md index db2376f3df89..a9ed1c72d923 100644 --- a/docs/pages-for-subheaders/user-settings.md +++ b/docs/pages-for-subheaders/user-settings.md @@ -2,6 +2,10 @@ title: User Settings --- + + + + Within Rancher, each user has a number of settings associated with their login: personal preferences, API keys, etc. You can configure these settings by choosing from the **User Settings** menu. You can open this menu by clicking your avatar, located within the main menu. ![User Settings Menu](/img/user-settings.png) diff --git a/docs/pages-for-subheaders/vsphere.md b/docs/pages-for-subheaders/vsphere.md index 8d5cac9d7b89..70eea8bfd52c 100644 --- a/docs/pages-for-subheaders/vsphere.md +++ b/docs/pages-for-subheaders/vsphere.md @@ -2,6 +2,10 @@ title: Creating a vSphere Cluster description: Use Rancher to create a vSphere cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. --- + + + + import YouTube from '@site/src/components/YouTube' By using Rancher with vSphere, you can bring cloud operations on-premises. diff --git a/docs/pages-for-subheaders/workloads-and-pods.md b/docs/pages-for-subheaders/workloads-and-pods.md index 94ac0881d6d6..5cfe84668af6 100644 --- a/docs/pages-for-subheaders/workloads-and-pods.md +++ b/docs/pages-for-subheaders/workloads-and-pods.md @@ -3,6 +3,10 @@ title: "Kubernetes Workloads and Pods" description: "Learn about the two constructs with which you can build any complex containerized application in Kubernetes: Kubernetes workloads and pods" --- + + + + You can build any complex containerized application in Kubernetes using two basic constructs: pods and workloads. Once you build an application, you can expose it for access either within the same cluster or on the Internet using a third construct: services. ### Pods diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-provisioning-drivers.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-provisioning-drivers.md index 9bee75369b2b..7750174d04b3 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-provisioning-drivers.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-provisioning-drivers.md @@ -2,6 +2,10 @@ title: Provisioning Drivers --- + + + + Drivers in Rancher allow you to manage which providers can be used to deploy [hosted Kubernetes clusters](set-up-clusters-from-hosted-kubernetes-providers.md) or [nodes in an infrastructure provider](use-new-nodes-in-an-infra-provider.md) to allow Rancher to deploy and manage Kubernetes. ### Rancher Drivers diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-rke1-templates.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-rke1-templates.md index 5b733409f86c..5cbb3fecd69a 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-rke1-templates.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-rke1-templates.md @@ -2,6 +2,10 @@ title: RKE Templates --- + + + + _Available as of Rancher v2.3.0_ RKE templates are designed to allow DevOps and security teams to standardize and simplify the creation of Kubernetes clusters. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-the-api.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-the-api.md index 3d952998f1e8..ae5cd82377fa 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-the-api.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-the-api.md @@ -2,6 +2,10 @@ title: API --- + + + + ## How to use the API The API has its own user interface accessible from a web browser. This is an easy way to see resources, perform actions, and see the equivalent cURL or HTTP request & response. To access it, click on your user avatar in the upper right corner. Under **API & Keys**, you can find the URL endpoint as well as create [API keys](../reference-guides/user-settings/api-keys.md). diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/access-clusters.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/access-clusters.md index 9bc920a34076..5a4deeffeecf 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/access-clusters.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/access-clusters.md @@ -2,6 +2,10 @@ title: Cluster Access --- + + + + This section is about what tools can be used to access clusters managed by Rancher. For information on how to give users permission to access a cluster, see the section on [adding users to clusters.](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/advanced-user-guides.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/advanced-user-guides.md index e4b8c16e1e4f..b5b3d2410372 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/advanced-user-guides.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/advanced-user-guides.md @@ -2,6 +2,10 @@ title: Advanced User Guides --- + + + + Advanced user guides are "problem-oriented" docs in which users learn how to answer questions or solve problems. The major difference between these and the new user guides is that these guides are geared toward more experienced or advanced users who have more technical needs from their documentation. These users already have an understanding of Rancher and its functions. They know what they need to accomplish; they just need additional guidance to complete some more complex task they they have encountered while working. It should be noted that neither new user guides nor advanced user guides provide detailed explanations or discussions (these kinds of docs belong elsewhere). How-to guides focus on the action of guiding users through repeatable, effective steps to learn new skills, master some task, or overcome some problem. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/air-gapped-helm-cli-install.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/air-gapped-helm-cli-install.md index b2ea746630aa..0ce60644e20d 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/air-gapped-helm-cli-install.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/air-gapped-helm-cli-install.md @@ -2,6 +2,10 @@ title: Air Gapped Helm CLI Install --- + + + + This section is about using the Helm CLI to install the Rancher server in an air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. The installation steps differ depending on whether Rancher is installed on an RKE Kubernetes cluster, a K3s Kubernetes cluster, or a single Docker container. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/authentication-config.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/authentication-config.md index dfa237a21039..3d6a59f954dd 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/authentication-config.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/authentication-config.md @@ -2,4 +2,8 @@ title: Authentication Config --- + + + + In the following tutorials, you will learn how to [manage users and groups](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/manage-users-and-groups.md), [create local users](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/create-local-users.md), [configure Google OAuth](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-google-oauth.md), [configure Active Directory (AD)](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md), [configure OpenLDAP](../pages-for-subheaders/configure-openldap.md), [configure FreeIPA](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-freeipa.md), [configure Azure AD](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-azure-ad.md), [configure GitHub](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-github.md), [configure Keycloak](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak.md), [configure PingIdentity (SAML)](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-pingidentity.md), [configure Okta (SAML)](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-okta-saml.md), [configure Shibboleth (SAML)](../pages-for-subheaders/configure-shibboleth-saml.md), and how to [configure Microsoft AD Federation Service (SAML)](../pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md). \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/authentication-permissions-and-global-configuration.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/authentication-permissions-and-global-configuration.md index af2d35bfb53a..799c12045bd7 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/authentication-permissions-and-global-configuration.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/authentication-permissions-and-global-configuration.md @@ -2,6 +2,10 @@ title: Authentication, Permissions and Global Configuration --- + + + + After installation, the [system administrator](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) should configure Rancher to configure authentication, authorization, security, default settings, security policies, drivers and global DNS entries. ## First Log In diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/backup-restore-and-disaster-recovery.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/backup-restore-and-disaster-recovery.md index 1803096157c1..074546ecfbd7 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/backup-restore-and-disaster-recovery.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/backup-restore-and-disaster-recovery.md @@ -3,6 +3,10 @@ title: Backups and Disaster Recovery keywords: [rancher v2.0-v2.4 backup restore, rancher v2.0-v2.4 backup and restore, backup restore rancher v2.0-v2.4, backup and restore rancher v2.0-v2.4] --- + + + + This section is devoted to protecting your data in a disaster scenario. To protect yourself from a disaster scenario, you should create backups on a regular basis. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/best-practices.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/best-practices.md index 4a3fa51d285a..845735f098ce 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/best-practices.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/best-practices.md @@ -2,6 +2,10 @@ title: Best Practices Guide --- + + + + The purpose of this section is to consolidate best practices for Rancher implementations. This also includes recommendations for related technologies, such as Kubernetes, Docker, containers, and more. The objective is to improve the outcome of a Rancher implementation using the operational experience of Rancher and its customers. If you have any questions about how these might apply to your use case, please contact your Customer Success Manager or Support. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/checklist-for-production-ready-clusters.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/checklist-for-production-ready-clusters.md index b5a268b7c70a..8cc0a6948aa6 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/checklist-for-production-ready-clusters.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/checklist-for-production-ready-clusters.md @@ -2,6 +2,10 @@ title: Checklist for Production-Ready Clusters --- + + + + In this section, we recommend best practices for creating the production-ready Kubernetes clusters that will run your apps and services. For a list of requirements for your cluster, including the requirements for OS/Docker, hardware, and networking, refer to the section on [node requirements.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/cis-scan-guides.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cis-scan-guides.md index 4683ee3cdabc..50c7fd2bab2c 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/cis-scan-guides.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cis-scan-guides.md @@ -2,6 +2,10 @@ title: CIS Scan Guides --- + + + + - [Run a Scan](../how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md) - [Run a Scan Periodically on a Schedule](../how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md) - [Skip Tests](../how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/cis-scans.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cis-scans.md index 84402b5c3c19..efafaaa13e2f 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/cis-scans.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cis-scans.md @@ -2,6 +2,10 @@ title: CIS Scans --- + + + + _Available as of v2.4.0_ - [Prerequisites](#prerequisites) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/cli-with-rancher.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cli-with-rancher.md index d4c33d7f390f..547d4c50308f 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/cli-with-rancher.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cli-with-rancher.md @@ -2,4 +2,8 @@ title: CLI with Rancher --- + + + + Interact with Rancher using command line interface (CLI) tools from your workstation. The following docs will describe the [Rancher CLI](../reference-guides/cli-with-rancher/rancher-cli.md) and [kubectl Utility](../reference-guides/cli-with-rancher/kubectl-utility.md). \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-configuration.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-configuration.md index e6838f059cc9..a966e8e6d504 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-configuration.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-configuration.md @@ -2,6 +2,10 @@ title: Cluster Configuration --- + + + + After you provision a Kubernetes cluster using Rancher, you can still edit options and settings for the cluster. For information on editing cluster membership, go to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md index d645337d23ec..a93e37aff4c8 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md @@ -1,6 +1,10 @@ --- title: Configuring Microsoft Active Directory Federation Service (SAML) --- + + + + _Available as of v2.0.7_ If your organization uses Microsoft Active Directory Federation Services (AD FS) for user authentication, you can configure Rancher to allow your users to log in using their AD FS credentials. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-openldap.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-openldap.md index 3607bc9d7bfa..aa300d61d7d1 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-openldap.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-openldap.md @@ -2,6 +2,10 @@ title: Configuring OpenLDAP --- + + + + _Available as of v2.0.5_ If your organization uses LDAP for user authentication, you can configure Rancher to communicate with an OpenLDAP server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the organisation's central user repository, while allowing end-users to authenticate with their LDAP credentials when logging in to the Rancher UI. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/create-kubernetes-persistent-storage.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/create-kubernetes-persistent-storage.md index 528aeec4f041..c2adf28aefc7 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/create-kubernetes-persistent-storage.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/create-kubernetes-persistent-storage.md @@ -3,6 +3,10 @@ title: "Kubernetes Persistent Storage: Volumes and Storage Classes" description: "Learn about the two ways with which you can create persistent storage in Kubernetes: persistent volumes and storage classes" --- + + + + When deploying an application that needs to retain data, you'll need to create persistent storage. Persistent storage allows you to store application data external from the pod running your application. This storage practice allows you to maintain application data, even if the application's pod fails. The documents in this section assume that you understand the Kubernetes concepts of persistent volumes, persistent volume claims, and storage classes. For more information, refer to the section on [how storage works.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/deploy-rancher-manager.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/deploy-rancher-manager.md index 051799f2b834..51162752eaa1 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/deploy-rancher-manager.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/deploy-rancher-manager.md @@ -2,6 +2,10 @@ title: Deploying Rancher Server --- + + + + Use one of the following guides to deploy and provision Rancher and a Kubernetes cluster in the provider of your choice. - [DigitalOcean](../getting-started/quick-start-guides/deploy-rancher-manager/digitalocean.md) (uses Terraform) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/deploy-rancher-workloads.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/deploy-rancher-workloads.md index 3e86165a071c..b2898cd513b7 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/deploy-rancher-workloads.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/deploy-rancher-workloads.md @@ -2,6 +2,10 @@ title: Deploying Workloads --- + + + + These guides walk you through the deployment of an application, including how to expose the application for use outside of the cluster. - [Workload with Ingress](../getting-started/quick-start-guides/deploy-workloads/workload-ingress.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/downstream-cluster-configuration.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/downstream-cluster-configuration.md index 126f0472164a..8aec527bb92b 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/downstream-cluster-configuration.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/downstream-cluster-configuration.md @@ -2,4 +2,8 @@ title: Downstream Cluster Configuration --- + + + + The following docs will discuss [node template configuration](./node-template-configuration.md). \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/enable-experimental-features.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/enable-experimental-features.md index 8829137887d4..a773cf5b9357 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/enable-experimental-features.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/enable-experimental-features.md @@ -2,6 +2,10 @@ title: Enabling Experimental Features --- + + + + Rancher includes some features that are experimental and disabled by default. You might want to enable these features, for example, if you decide that the benefits of using an [unsupported storage type](../getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers.md) outweighs the risk of using an untested feature. Feature flags were introduced to allow you to try these features that are not enabled by default. The features can be enabled in three ways: diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm-charts-in-rancher.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm-charts-in-rancher.md index b41b54a2117e..9ca0becfeb2d 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm-charts-in-rancher.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm-charts-in-rancher.md @@ -3,6 +3,10 @@ title: Helm Charts in Rancher description: Rancher enables the use of catalogs to repeatedly deploy applications easily. Catalogs are GitHub or Helm Chart repositories filled with deployment-ready apps. --- + + + + Rancher provides the ability to use a catalog of Helm charts that make it easy to repeatedly deploy applications. - **Catalogs** are GitHub repositories or Helm Chart repositories filled with applications that are ready-made for deployment. Applications are bundled in objects called _Helm charts_. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/horizontal-pod-autoscaler.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/horizontal-pod-autoscaler.md index 23ff2df8e602..717568ac5cbd 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/horizontal-pod-autoscaler.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/horizontal-pod-autoscaler.md @@ -3,6 +3,10 @@ title: The Horizontal Pod Autoscaler description: Learn about the horizontal pod autoscaler (HPA). How to manage HPAs and how to test them with a service deployment --- + + + + The [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (HPA) is a Kubernetes feature that allows you to configure your cluster to automatically scale the services it's running up or down. Rancher provides some additional features to help manage HPAs, depending on the version of Rancher. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/infrastructure-setup.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/infrastructure-setup.md index f7b834992aae..fabdc72e9751 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/infrastructure-setup.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/infrastructure-setup.md @@ -2,6 +2,10 @@ title: Don't have infrastructure for your Kubernetes cluster? Try one of these tutorials. --- + + + + To set up infrastructure for a high-availability K3s Kubernetes cluster with an external DB, refer to [this page.](../how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/install-cluster-autoscaler.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/install-cluster-autoscaler.md index cd0fac015c70..545d5a98b828 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/install-cluster-autoscaler.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/install-cluster-autoscaler.md @@ -2,6 +2,10 @@ title: Cluster Autoscaler --- + + + + In this section, you'll learn how to install and use the [Kubernetes cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/) on Rancher custom clusters using AWS EC2 Auto Scaling Groups. The cluster autoscaler is a tool that automatically adjusts the size of the Kubernetes cluster when one of the following conditions is true: diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md index 8a0e52a71911..73353f9effe3 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md @@ -3,6 +3,10 @@ title: Install Rancher on a Kubernetes Cluster description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation --- + + + + ## Prerequisite Set up the Rancher server's local Kubernetes cluster. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-and-upgrade.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-and-upgrade.md index 9f19859700ef..4963bbfdcbda 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-and-upgrade.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-and-upgrade.md @@ -3,6 +3,10 @@ title: Installing/Upgrading Rancher description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation --- + + + + This section provides an overview of the architecture options of installing Rancher, describing advantages of each option. ## Terminology diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-references.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-references.md index 0eeae0f6c4eb..625a8721e27d 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-references.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-references.md @@ -2,4 +2,8 @@ title: Installation References --- + + + + Please see the following reference guides for other installation resources: [Rancher Helm chart options](../reference-guides/installation-references/helm-chart-options.md), [TLS settings](../reference-guides/installation-references/tls-settings.md), and [feature flags](../reference-guides/installation-references/feature-flags.md). \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-requirements.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-requirements.md index f2324ce9e317..491a28a5e343 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-requirements.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-requirements.md @@ -3,6 +3,10 @@ title: Installation Requirements description: Learn the node requirements for each node running Rancher server when you’re configuring Rancher to run either in a Docker or Kubernetes setup --- + + + + This page describes the software, hardware, and networking requirements for the nodes where the Rancher server will be installed. The Rancher server can be installed on a single node or a high-availability Kubernetes cluster. > It is important to note that if you install Rancher on a Kubernetes cluster, requirements are different from the [node requirements for downstream user clusters,](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) which will run your apps and services. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/istio-setup-guide.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/istio-setup-guide.md index 9d161f714544..7340ff4fceb4 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/istio-setup-guide.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/istio-setup-guide.md @@ -2,6 +2,10 @@ title: Setup Guide --- + + + + This section describes how to enable Istio and start using it in your projects. This section assumes that you have Rancher installed, and you have a Rancher-provisioned Kubernetes cluster where you would like to set up Istio. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/istio.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/istio.md index 3fa5f5fce9e6..fea19f422b91 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/istio.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/istio.md @@ -2,6 +2,10 @@ title: Istio --- + + + + _Available as of v2.3.0_ [Istio](https://istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, control, troubleshoot, and secure the traffic within a complex network of microservices. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-cluster-setup.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-cluster-setup.md index e8074910cee3..2d724a0005c1 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-cluster-setup.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-cluster-setup.md @@ -2,6 +2,10 @@ title: "Don't have a Kubernetes cluster? Try one of these tutorials." --- + + + + This section contains information on how to install a Kubernetes cluster that the Rancher server can be installed on. In Rancher before v2.4, the Rancher server needed to run on an RKE Kubernetes cluster. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md index b40ea279368c..4733a7278f86 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md @@ -3,6 +3,10 @@ title: Setting up Kubernetes Clusters in Rancher description: Provisioning Kubernetes Clusters --- + + + + Rancher simplifies the creation of clusters by allowing you to create them through the Rancher UI rather than more complex alternatives. Rancher provides multiple options for launching a cluster. Use the option that best fits your use case. This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts](../reference-guides/kubernetes-concepts.md) page. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-components.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-components.md index b2271a509093..db3dfc08ca00 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-components.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-components.md @@ -2,6 +2,10 @@ title: Kubernetes Components --- + + + + The commands and steps listed in this section apply to the core Kubernetes components on [Rancher Launched Kubernetes](launch-kubernetes-with-rancher.md) clusters. This section includes troubleshooting tips in the following categories: diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-resources-setup.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-resources-setup.md index ff7f1cdccbea..2734c69d7998 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-resources-setup.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-resources-setup.md @@ -2,6 +2,10 @@ title: Kubernetes Resources --- + + + + ## Workloads Deploy applications to your cluster nodes using [workloads](workloads-and-pods.md), which are objects that contain pods that run your apps, along with metadata that set rules for the deployment's behavior. Workloads can be deployed within the scope of the entire clusters or within a namespace. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/launch-kubernetes-with-rancher.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/launch-kubernetes-with-rancher.md index f198523e3b29..bbff4a461d5a 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/launch-kubernetes-with-rancher.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/launch-kubernetes-with-rancher.md @@ -2,6 +2,10 @@ title: Launching Kubernetes with Rancher --- + + + + You can have Rancher launch a Kubernetes cluster using any nodes you want. When Rancher deploys Kubernetes onto these nodes, it uses [Rancher Kubernetes Engine](https://rancher.com/docs/rke/latest/en/) (RKE), which is Rancher's own lightweight Kubernetes installer. It can launch Kubernetes on any computers, including: - Bare-metal servers diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/load-balancer-and-ingress-controller.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/load-balancer-and-ingress-controller.md index f5c26398be5a..053f18e6ba43 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/load-balancer-and-ingress-controller.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/load-balancer-and-ingress-controller.md @@ -3,6 +3,10 @@ title: Set Up Load Balancer and Ingress Controller within Rancher description: Learn how you can set up load balancers and ingress controllers to redirect service requests within Rancher, and learn about the limitations of load balancers --- + + + + Within Rancher, you can set up load balancers and ingress controllers to redirect service requests. ## Load Balancers diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-clusters.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-clusters.md index 9761637dc32c..7cee5c80ceef 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-clusters.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-clusters.md @@ -2,6 +2,10 @@ title: Cluster Administration --- + + + + After you provision a cluster in Rancher, you can begin using powerful Kubernetes features to deploy and scale your containerized applications in development, testing, or production environments. This page covers the following topics: diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-project-resource-quotas.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-project-resource-quotas.md index 203cc9086ac5..c9f6a0ea1e21 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-project-resource-quotas.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-project-resource-quotas.md @@ -2,6 +2,10 @@ title: Project Resource Quotas --- + + + + _Available as of v2.1.0_ In situations where several teams share a cluster, one team may overconsume the resources available: CPU, memory, storage, services, Kubernetes objects like pods or secrets, and so on. To prevent this overconsumption, you can apply a _resource quota_, which is a Rancher feature that limits the resources available to a project or namespace. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-projects.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-projects.md index 69ae566f72a2..71e00c53807d 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-projects.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-projects.md @@ -2,6 +2,10 @@ title: Project Administration --- + + + + _Projects_ are objects introduced in Rancher that help organize namespaces in your Kubernetes cluster. You can use projects to create multi-tenant clusters, which allows a group of users to share the same underlying resources without interacting with each other's applications. In terms of hierarchy: diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-role-based-access-control-rbac.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-role-based-access-control-rbac.md index 8d63e7ad740a..921cb745cc20 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-role-based-access-control-rbac.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-role-based-access-control-rbac.md @@ -2,6 +2,10 @@ title: Role-Based Access Control (RBAC) --- + + + + Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. As mentioned in [Authentication](about-authentication.md), users can either be local or external. After you configure external authentication, the users that display on the **Users** page changes. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/new-user-guides.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/new-user-guides.md index e2e667395c3b..e2e68ff0089a 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/new-user-guides.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/new-user-guides.md @@ -2,6 +2,10 @@ title: New User Guides --- + + + + New user guides, also known as **tutorials**, describe practical steps for users to follow in order to complete some concrete action. These docs are known as "learning-oriented" docs in which users learn by "doing". The new user guides are designed to guide beginners, or the everyday users of Rancher, through a series of steps to learn how to do something. The goal is that the user will be able to learn how to complete tasks by using easy-to-follow, meaningful, and repeatable directions. These guides will assist users to do work to then get the promised results immediately. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/node-template-configuration.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/node-template-configuration.md index 9e34bbb0681a..618453c0fb9d 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/node-template-configuration.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/node-template-configuration.md @@ -2,4 +2,8 @@ title: Node Template Configuration --- + + + + To learn about node template config, refer to [EC2 Node Template Configuration](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md), [DigitalOcean Node Template Configuration](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md), [Azure Node Template Configuration](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md), and [vSphere Node Template Configuration](../pages-for-subheaders/creating-a-vsphere-cluster.md). diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-installation-methods.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-installation-methods.md index 6b0ad0cf9a91..fcb35cf20595 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-installation-methods.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-installation-methods.md @@ -2,6 +2,10 @@ title: Other Installation Methods --- + + + + ### Air Gapped Installations Follow [these steps](air-gapped-helm-cli-install.md) to install the Rancher server in an air gapped environment. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/provisioning-storage-examples.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/provisioning-storage-examples.md index 72d452efcc15..ea726b07c3ca 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/provisioning-storage-examples.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/provisioning-storage-examples.md @@ -2,6 +2,10 @@ title: Provisioning Storage Examples --- + + + + Rancher supports persistent storage with a variety of volume plugins. However, before you use any of these plugins to bind persistent storage to your workloads, you have to configure the storage itself, whether its a cloud-based solution from a service-provider or an on-prem solution that you manage yourself. For your convenience, Rancher offers documentation on how to configure some of the popular storage methods: diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/quick-start-guides.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/quick-start-guides.md index 7d0b30134d2d..3de52c39b8bc 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/quick-start-guides.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/quick-start-guides.md @@ -2,6 +2,10 @@ title: Rancher Deployment Quick Start Guides --- + + + + >**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](installation-and-upgrade.md). Howdy buckaroos! Use this section of the docs to jump start your deployment and testing of Rancher 2.x! It contains instructions for a simple Rancher setup and some common use cases. We plan on adding more content to this section in the future. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-behind-an-http-proxy.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-behind-an-http-proxy.md index 59a7eec2dba7..fd8a41b8e086 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-behind-an-http-proxy.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-behind-an-http-proxy.md @@ -2,6 +2,10 @@ title: Installing Rancher behind an HTTP Proxy --- + + + + In a lot of enterprise environments, servers or VMs running on premise do not have direct Internet access, but must connect to external services through a HTTP(S) proxy for security reasons. This tutorial shows step by step how to set up a highly available Rancher installation in such an environment. Alternatively, it is also possible to set up Rancher completely air-gapped without any Internet access. This process is described in detail in the [Rancher docs](air-gapped-helm-cli-install.md). diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-manager-architecture.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-manager-architecture.md index db2e5229a159..eac9370372a5 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-manager-architecture.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-manager-architecture.md @@ -2,6 +2,10 @@ title: Architecture --- + + + + This section focuses on the [Rancher server and its components](../reference-guides/rancher-manager-architecture/rancher-server-and-components.md) and how [Rancher communicates with downstream Kubernetes clusters](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md). For information on the different ways that Rancher can be installed, refer to the [overview of installation options.](installation-and-upgrade.md#overview-of-installation-options) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-on-a-single-node-with-docker.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-on-a-single-node-with-docker.md index d8a7d3c2b12f..343aab32c141 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-on-a-single-node-with-docker.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-on-a-single-node-with-docker.md @@ -3,6 +3,10 @@ title: Installing Rancher on a Single Node Using Docker description: For development and testing environments only, use a Docker install. Install Docker on a single Linux host, and deploy Rancher with a single Docker container. --- + + + + Rancher can be installed by running a single Docker container. In this installation scenario, you'll install Docker on a single Linux host, and then deploy Rancher on your host using a single Docker container. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-security.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-security.md index 702fe09ed2fd..785d0ab268df 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-security.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-security.md @@ -2,6 +2,10 @@ title: Security --- + + + +
diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-server-configuration.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-server-configuration.md index c1a7384f881d..fad01664d4b2 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-server-configuration.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-server-configuration.md @@ -2,5 +2,9 @@ title: Rancher Server Configuration --- + + + + - [RKE1 Cluster Configuration](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) - [Use Existing Nodes](../pages-for-subheaders/use-existing-nodes.md) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/resources.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/resources.md index b596439a73e7..07714a5dc9c3 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/resources.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/resources.md @@ -2,6 +2,10 @@ title: Resources --- + + + + ### Docker Installations The [single-node Docker installation](rancher-on-a-single-node-with-docker.md) is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster using Helm, you install the Rancher server component on a single node using a `docker run` command. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/set-up-cloud-providers.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/set-up-cloud-providers.md index 84a0a38df515..cf1a8422f960 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/set-up-cloud-providers.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/set-up-cloud-providers.md @@ -2,6 +2,10 @@ title: Setting up Cloud Providers --- + + + + A _cloud provider_ is a module in Kubernetes that provides an interface for managing nodes, load balancers, and networking routes. For more information, refer to the [official Kubernetes documentation on cloud providers.](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/) When a cloud provider is set up in Rancher, the Rancher server can automatically provision new nodes, load balancers or persistent storage devices when launching Kubernetes definitions, if the cloud provider you're using supports such automation. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md index 36eac035e841..c3b7b48766b1 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md @@ -2,6 +2,10 @@ title: Setting up Clusters from Hosted Kubernetes Providers --- + + + + In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/single-node-rancher-in-docker.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/single-node-rancher-in-docker.md index 61cd166a90c2..91072d2b3b44 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/single-node-rancher-in-docker.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/single-node-rancher-in-docker.md @@ -2,4 +2,8 @@ title: Single Node Rancher in Docker --- + + + + The following docs will discuss [HTTP proxy configuration](../reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md) and [advanced options](../reference-guides/single-node-rancher-in-docker/advanced-options.md) for Docker installs. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-existing-nodes.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-existing-nodes.md index 0564cd40323b..d514f08ab761 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-existing-nodes.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-existing-nodes.md @@ -3,6 +3,10 @@ title: Launching Kubernetes on Existing Custom Nodes description: To create a cluster with custom nodes, you’ll need to access servers in your cluster and provision them according to Rancher requirements --- + + + + When you create a custom cluster, Rancher uses RKE (the Rancher Kubernetes Engine) to create a Kubernetes cluster in on-prem bare-metal servers, on-prem virtual machines, or in any node hosted by an infrastructure provider. To use this option you'll need access to servers you intend to use in your Kubernetes cluster. Provision each server according to the [requirements](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md), which includes some hardware specifications and Docker. After you install Docker on each server, you willl also run the command provided in the Rancher UI on each server to turn each one into a Kubernetes node. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md index 9c8844132693..a8251d53e03b 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md @@ -2,6 +2,10 @@ title: Launching Kubernetes on New Nodes in an Infrastructure Provider --- + + + + Using Rancher, you can create pools of nodes based on a [node template](use-new-nodes-in-an-infra-provider.md#node-templates). This node template defines the parameters you want to use to launch nodes in your infrastructure providers or cloud providers. One benefit of installing Kubernetes on node pools hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically create another node to join the cluster to ensure that the count of the node pool is as expected. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-windows-clusters.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-windows-clusters.md index b68411202ca5..989f739c25c9 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-windows-clusters.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-windows-clusters.md @@ -2,6 +2,10 @@ title: Launching Kubernetes on Windows Clusters --- + + + + _Available as of v2.3.0_ When provisioning a [custom cluster](use-existing-nodes.md) using Rancher, Rancher uses RKE (the Rancher Kubernetes Engine) to install Kubernetes on your existing nodes. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/user-settings.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/user-settings.md index 2fa5a811b6f3..48fb0ed78620 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/user-settings.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/user-settings.md @@ -2,6 +2,10 @@ title: User Settings --- + + + + Within Rancher, each user has a number of settings associated with their login: personal preferences, API keys, etc. You can configure these settings by choosing from the **User Settings** menu. You can open this menu by clicking your avatar, located within the main menu. ![User Settings Menu](/img/user-settings.png) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/vsphere.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/vsphere.md index 1f5a336162bb..e1ba4150bb78 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/vsphere.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/vsphere.md @@ -2,6 +2,10 @@ title: Creating a vSphere Cluster description: Use Rancher to create a vSphere cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. --- + + + + import YouTube from '@site/src/components/YouTube' By using Rancher with vSphere, you can bring cloud operations on-premises. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/workloads-and-pods.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/workloads-and-pods.md index 94ac0881d6d6..5cfe84668af6 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/workloads-and-pods.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/workloads-and-pods.md @@ -3,6 +3,10 @@ title: "Kubernetes Workloads and Pods" description: "Learn about the two constructs with which you can build any complex containerized application in Kubernetes: Kubernetes workloads and pods" --- + + + + You can build any complex containerized application in Kubernetes using two basic constructs: pods and workloads. Once you build an application, you can expose it for access either within the same cluster or on the Internet using a third construct: services. ### Pods diff --git a/versioned_docs/version-2.5/pages-for-subheaders/about-provisioning-drivers.md b/versioned_docs/version-2.5/pages-for-subheaders/about-provisioning-drivers.md index bbea00badab5..24c351a051c2 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/about-provisioning-drivers.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/about-provisioning-drivers.md @@ -2,6 +2,10 @@ title: Provisioning Drivers --- + + + + Drivers in Rancher allow you to manage which providers can be used to deploy [hosted Kubernetes clusters](set-up-clusters-from-hosted-kubernetes-providers.md) or [nodes in an infrastructure provider](use-new-nodes-in-an-infra-provider.md) to allow Rancher to deploy and manage Kubernetes. ### Rancher Drivers diff --git a/versioned_docs/version-2.5/pages-for-subheaders/about-rke1-templates.md b/versioned_docs/version-2.5/pages-for-subheaders/about-rke1-templates.md index d4f4c7ab33a9..9e9c73875d51 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/about-rke1-templates.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/about-rke1-templates.md @@ -2,6 +2,10 @@ title: RKE Templates --- + + + + RKE templates are designed to allow DevOps and security teams to standardize and simplify the creation of Kubernetes clusters. RKE is the [Rancher Kubernetes Engine,](https://rancher.com/docs/rke/latest/en/) which is the tool that Rancher uses to provision Kubernetes clusters. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/about-the-api.md b/versioned_docs/version-2.5/pages-for-subheaders/about-the-api.md index 27570e73a410..e3f44f17f848 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/about-the-api.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/about-the-api.md @@ -2,6 +2,10 @@ title: API --- + + + + ## How to use the API The API has its own user interface accessible from a web browser. This is an easy way to see resources, perform actions, and see the equivalent cURL or HTTP request & response. To access it, click on your user avatar in the upper right corner. Under **API & Keys**, you can find the URL endpoint as well as create [API keys](../reference-guides/user-settings/api-keys.md). diff --git a/versioned_docs/version-2.5/pages-for-subheaders/access-clusters.md b/versioned_docs/version-2.5/pages-for-subheaders/access-clusters.md index 2eba1efac1c3..fca157a83205 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/access-clusters.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/access-clusters.md @@ -2,6 +2,10 @@ title: Cluster Access --- + + + + This section is about what tools can be used to access clusters managed by Rancher. For information on how to give users permission to access a cluster, see the section on [adding users to clusters.](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/advanced-configuration.md b/versioned_docs/version-2.5/pages-for-subheaders/advanced-configuration.md index 208abcf1ea87..87efa2a0f9e0 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/advanced-configuration.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/advanced-configuration.md @@ -2,6 +2,10 @@ title: Advanced Configuration --- + + + + ### Alertmanager For information on configuring the Alertmanager custom resource, see [this page.](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/advanced-user-guides.md b/versioned_docs/version-2.5/pages-for-subheaders/advanced-user-guides.md index e4b8c16e1e4f..b5b3d2410372 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/advanced-user-guides.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/advanced-user-guides.md @@ -2,6 +2,10 @@ title: Advanced User Guides --- + + + + Advanced user guides are "problem-oriented" docs in which users learn how to answer questions or solve problems. The major difference between these and the new user guides is that these guides are geared toward more experienced or advanced users who have more technical needs from their documentation. These users already have an understanding of Rancher and its functions. They know what they need to accomplish; they just need additional guidance to complete some more complex task they they have encountered while working. It should be noted that neither new user guides nor advanced user guides provide detailed explanations or discussions (these kinds of docs belong elsewhere). How-to guides focus on the action of guiding users through repeatable, effective steps to learn new skills, master some task, or overcome some problem. \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/air-gapped-helm-cli-install.md b/versioned_docs/version-2.5/pages-for-subheaders/air-gapped-helm-cli-install.md index cd300d3ed8d5..0871bfcb9096 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/air-gapped-helm-cli-install.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/air-gapped-helm-cli-install.md @@ -2,6 +2,10 @@ title: Air Gapped Helm CLI Install --- + + + + This section is about using the Helm CLI to install the Rancher server in an air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. The installation steps differ depending on whether Rancher is installed on an RKE Kubernetes cluster, a K3s Kubernetes cluster, or a single Docker container. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/authentication-config.md b/versioned_docs/version-2.5/pages-for-subheaders/authentication-config.md index dfa237a21039..3d6a59f954dd 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/authentication-config.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/authentication-config.md @@ -2,4 +2,8 @@ title: Authentication Config --- + + + + In the following tutorials, you will learn how to [manage users and groups](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/manage-users-and-groups.md), [create local users](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/create-local-users.md), [configure Google OAuth](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-google-oauth.md), [configure Active Directory (AD)](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md), [configure OpenLDAP](../pages-for-subheaders/configure-openldap.md), [configure FreeIPA](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-freeipa.md), [configure Azure AD](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-azure-ad.md), [configure GitHub](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-github.md), [configure Keycloak](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak.md), [configure PingIdentity (SAML)](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-pingidentity.md), [configure Okta (SAML)](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-okta-saml.md), [configure Shibboleth (SAML)](../pages-for-subheaders/configure-shibboleth-saml.md), and how to [configure Microsoft AD Federation Service (SAML)](../pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md). \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/authentication-permissions-and-global-configuration.md b/versioned_docs/version-2.5/pages-for-subheaders/authentication-permissions-and-global-configuration.md index f893100e77c4..6836343e89a1 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/authentication-permissions-and-global-configuration.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/authentication-permissions-and-global-configuration.md @@ -2,6 +2,10 @@ title: Authentication, Permissions and Global Configuration --- + + + + After installation, the [system administrator](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) should configure Rancher to configure authentication, authorization, security, default settings, security policies, drivers and global DNS entries. ## First Log In diff --git a/versioned_docs/version-2.5/pages-for-subheaders/backup-restore-and-disaster-recovery.md b/versioned_docs/version-2.5/pages-for-subheaders/backup-restore-and-disaster-recovery.md index 01a2b36f67ea..25a2c7cdc3d1 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/backup-restore-and-disaster-recovery.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/backup-restore-and-disaster-recovery.md @@ -3,6 +3,10 @@ title: Backups and Disaster Recovery keywords: [rancher v2.5 backup restore, rancher v2.5 backup and restore, backup restore rancher v2.5, backup and restore rancher v2.5] --- + + + + In this section, you'll learn how to create backups of Rancher, how to restore Rancher from backup, and how to migrate Rancher to a new Kubernetes cluster. As of Rancher v2.5, the `rancher-backup` operator is used to backup and restore Rancher. The `rancher-backup` Helm chart is [here.](https://github.com/rancher/charts/tree/release-v2.5/charts/rancher-backup) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/backup-restore-configuration.md b/versioned_docs/version-2.5/pages-for-subheaders/backup-restore-configuration.md index c6574b936007..104584f741d5 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/backup-restore-configuration.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/backup-restore-configuration.md @@ -2,6 +2,10 @@ title: Rancher Backup Configuration Reference --- + + + + - [Backup configuration](../reference-guides/backup-restore-configuration/backup-configuration.md) - [Restore configuration](../reference-guides/backup-restore-configuration/restore-configuration.md) - [Storage location configuration](../reference-guides/backup-restore-configuration/storage-configuration.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/best-practices.md b/versioned_docs/version-2.5/pages-for-subheaders/best-practices.md index 4a3fa51d285a..845735f098ce 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/best-practices.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/best-practices.md @@ -2,6 +2,10 @@ title: Best Practices Guide --- + + + + The purpose of this section is to consolidate best practices for Rancher implementations. This also includes recommendations for related technologies, such as Kubernetes, Docker, containers, and more. The objective is to improve the outcome of a Rancher implementation using the operational experience of Rancher and its customers. If you have any questions about how these might apply to your use case, please contact your Customer Success Manager or Support. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/checklist-for-production-ready-clusters.md b/versioned_docs/version-2.5/pages-for-subheaders/checklist-for-production-ready-clusters.md index 381e90cfe076..d34772853ac8 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/checklist-for-production-ready-clusters.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/checklist-for-production-ready-clusters.md @@ -2,6 +2,10 @@ title: Checklist for Production-Ready Clusters --- + + + + In this section, we recommend best practices for creating the production-ready Kubernetes clusters that will run your apps and services. For a list of requirements for your cluster, including the requirements for OS/Docker, hardware, and networking, refer to the section on [node requirements.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/cis-scan-guides.md b/versioned_docs/version-2.5/pages-for-subheaders/cis-scan-guides.md index 6fef05e5b7cc..e76d47504e64 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/cis-scan-guides.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/cis-scan-guides.md @@ -2,6 +2,10 @@ title: CIS Scan Guides --- + + + + - [Install rancher-cis-benchmark](../how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md) - [Uninstall rancher-cis-benchmark](../how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md) - [Run a Scan](../how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/cis-scans.md b/versioned_docs/version-2.5/pages-for-subheaders/cis-scans.md index 0e12fddea2a4..ccdde06ba20e 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/cis-scans.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/cis-scans.md @@ -2,6 +2,10 @@ title: CIS Scans --- + + + + Rancher can run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS Kubernetes Benchmark. The `rancher-cis-benchmark` app leverages kube-bench, an open-source tool from Aqua Security, to check clusters for CIS Kubernetes Benchmark compliance. Also, to generate a cluster-wide report, the application utilizes Sonobuoy for report aggregation. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/cli-with-rancher.md b/versioned_docs/version-2.5/pages-for-subheaders/cli-with-rancher.md index d4c33d7f390f..547d4c50308f 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/cli-with-rancher.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/cli-with-rancher.md @@ -2,4 +2,8 @@ title: CLI with Rancher --- + + + + Interact with Rancher using command line interface (CLI) tools from your workstation. The following docs will describe the [Rancher CLI](../reference-guides/cli-with-rancher/rancher-cli.md) and [kubectl Utility](../reference-guides/cli-with-rancher/kubectl-utility.md). \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/cluster-configuration.md b/versioned_docs/version-2.5/pages-for-subheaders/cluster-configuration.md index 8a65b275b36e..9013fe4a1eff 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/cluster-configuration.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/cluster-configuration.md @@ -2,6 +2,10 @@ title: Cluster Configuration --- + + + + After you provision a Kubernetes cluster using Rancher, you can still edit options and settings for the cluster. For information on editing cluster membership, go to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/configuration-options.md b/versioned_docs/version-2.5/pages-for-subheaders/configuration-options.md index 9e7ad6f0ccbd..be57a9921978 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/configuration-options.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/configuration-options.md @@ -2,6 +2,10 @@ title: Configuration Options --- + + + + ### Egress Support By default the Egress gateway is disabled, but can be enabled on install or upgrade through the values.yaml or via the [overlay file](#overlay-file). diff --git a/versioned_docs/version-2.5/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md b/versioned_docs/version-2.5/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md index 549f70444ef1..b30bd2864827 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md @@ -2,6 +2,10 @@ title: Configuring Microsoft Active Directory Federation Service (SAML) --- + + + + If your organization uses Microsoft Active Directory Federation Services (AD FS) for user authentication, you can configure Rancher to allow your users to log in using their AD FS credentials. ## Prerequisites diff --git a/versioned_docs/version-2.5/pages-for-subheaders/configure-openldap.md b/versioned_docs/version-2.5/pages-for-subheaders/configure-openldap.md index 68210668ce9f..b73b7fa8794a 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/configure-openldap.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/configure-openldap.md @@ -2,6 +2,10 @@ title: Configuring OpenLDAP --- + + + + If your organization uses LDAP for user authentication, you can configure Rancher to communicate with an OpenLDAP server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the organisation's central user repository, while allowing end-users to authenticate with their LDAP credentials when logging in to the Rancher UI. ## Prerequisites diff --git a/versioned_docs/version-2.5/pages-for-subheaders/create-kubernetes-persistent-storage.md b/versioned_docs/version-2.5/pages-for-subheaders/create-kubernetes-persistent-storage.md index 9313c92dc3e8..1cee5519a8e2 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/create-kubernetes-persistent-storage.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/create-kubernetes-persistent-storage.md @@ -3,6 +3,10 @@ title: "Kubernetes Persistent Storage: Volumes and Storage Classes" description: "Learn about the two ways with which you can create persistent storage in Kubernetes: persistent volumes and storage classes" --- + + + + When deploying an application that needs to retain data, you'll need to create persistent storage. Persistent storage allows you to store application data external from the pod running your application. This storage practice allows you to maintain application data, even if the application's pod fails. The documents in this section assume that you understand the Kubernetes concepts of persistent volumes, persistent volume claims, and storage classes. For more information, refer to the section on [how storage works.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/custom-resource-configuration.md b/versioned_docs/version-2.5/pages-for-subheaders/custom-resource-configuration.md index 3358ebb80191..822d6e9550bd 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/custom-resource-configuration.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/custom-resource-configuration.md @@ -2,6 +2,10 @@ title: Custom Resource Configuration --- + + + + The following Custom Resource Definitions are used to configure logging: - [Flow and ClusterFlow](../explanations/integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/deploy-apps-across-clusters.md b/versioned_docs/version-2.5/pages-for-subheaders/deploy-apps-across-clusters.md index c61725a774da..337d1ff05fb5 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/deploy-apps-across-clusters.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/deploy-apps-across-clusters.md @@ -2,6 +2,10 @@ title: Deploying Applications across Clusters --- + + + + ### Fleet Rancher v2.5 introduced Fleet, a new way to deploy applications across clusters. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/deploy-rancher-manager.md b/versioned_docs/version-2.5/pages-for-subheaders/deploy-rancher-manager.md index 051799f2b834..51162752eaa1 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/deploy-rancher-manager.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/deploy-rancher-manager.md @@ -2,6 +2,10 @@ title: Deploying Rancher Server --- + + + + Use one of the following guides to deploy and provision Rancher and a Kubernetes cluster in the provider of your choice. - [DigitalOcean](../getting-started/quick-start-guides/deploy-rancher-manager/digitalocean.md) (uses Terraform) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/deploy-rancher-workloads.md b/versioned_docs/version-2.5/pages-for-subheaders/deploy-rancher-workloads.md index 3e86165a071c..b2898cd513b7 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/deploy-rancher-workloads.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/deploy-rancher-workloads.md @@ -2,6 +2,10 @@ title: Deploying Workloads --- + + + + These guides walk you through the deployment of an application, including how to expose the application for use outside of the cluster. - [Workload with Ingress](../getting-started/quick-start-guides/deploy-workloads/workload-ingress.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/downstream-cluster-configuration.md b/versioned_docs/version-2.5/pages-for-subheaders/downstream-cluster-configuration.md index 126f0472164a..8aec527bb92b 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/downstream-cluster-configuration.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/downstream-cluster-configuration.md @@ -2,4 +2,8 @@ title: Downstream Cluster Configuration --- + + + + The following docs will discuss [node template configuration](./node-template-configuration.md). \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/enable-experimental-features.md b/versioned_docs/version-2.5/pages-for-subheaders/enable-experimental-features.md index 96f3abdc36cc..7eb26938937e 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/enable-experimental-features.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/enable-experimental-features.md @@ -2,6 +2,10 @@ title: Enabling Experimental Features --- + + + + Rancher includes some features that are experimental and disabled by default. You might want to enable these features, for example, if you decide that the benefits of using an [unsupported storage type](../getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers.md) outweighs the risk of using an untested feature. Feature flags were introduced to allow you to try these features that are not enabled by default. The features can be enabled in three ways: diff --git a/versioned_docs/version-2.5/pages-for-subheaders/gke-cluster-configuration.md b/versioned_docs/version-2.5/pages-for-subheaders/gke-cluster-configuration.md index 13b700b10974..39d4188b98b9 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/gke-cluster-configuration.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/gke-cluster-configuration.md @@ -2,6 +2,10 @@ title: GKE Cluster Configuration Reference --- + + + + diff --git a/versioned_docs/version-2.5/pages-for-subheaders/helm-charts-in-rancher.md b/versioned_docs/version-2.5/pages-for-subheaders/helm-charts-in-rancher.md index 38fb4e8c6190..7e71750965de 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/helm-charts-in-rancher.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/helm-charts-in-rancher.md @@ -2,6 +2,10 @@ title: Helm Charts in Rancher --- + + + + In this section, you'll learn how to manage Helm chart repositories and applications in Rancher. ### Changes in Rancher v2.5 diff --git a/versioned_docs/version-2.5/pages-for-subheaders/horizontal-pod-autoscaler.md b/versioned_docs/version-2.5/pages-for-subheaders/horizontal-pod-autoscaler.md index 5afecb6a3ae8..59fa82620247 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/horizontal-pod-autoscaler.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/horizontal-pod-autoscaler.md @@ -3,6 +3,10 @@ title: The Horizontal Pod Autoscaler description: Learn about the horizontal pod autoscaler (HPA). How to manage HPAs and how to test them with a service deployment --- + + + + The [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (HPA) is a Kubernetes feature that allows you to configure your cluster to automatically scale the services it's running up or down. Rancher provides some additional features to help manage HPAs, depending on the version of Rancher. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/infrastructure-setup.md b/versioned_docs/version-2.5/pages-for-subheaders/infrastructure-setup.md index f7b834992aae..fabdc72e9751 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/infrastructure-setup.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/infrastructure-setup.md @@ -2,6 +2,10 @@ title: Don't have infrastructure for your Kubernetes cluster? Try one of these tutorials. --- + + + + To set up infrastructure for a high-availability K3s Kubernetes cluster with an external DB, refer to [this page.](../how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/install-cluster-autoscaler.md b/versioned_docs/version-2.5/pages-for-subheaders/install-cluster-autoscaler.md index cd0fac015c70..545d5a98b828 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/install-cluster-autoscaler.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/install-cluster-autoscaler.md @@ -2,6 +2,10 @@ title: Cluster Autoscaler --- + + + + In this section, you'll learn how to install and use the [Kubernetes cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/) on Rancher custom clusters using AWS EC2 Auto Scaling Groups. The cluster autoscaler is a tool that automatically adjusts the size of the Kubernetes cluster when one of the following conditions is true: diff --git a/versioned_docs/version-2.5/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md b/versioned_docs/version-2.5/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md index 5d13b136b42e..fbf99b4eb34a 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md @@ -3,6 +3,10 @@ title: Install/Upgrade Rancher on a Kubernetes Cluster description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation --- + + + + In this section, you'll learn how to deploy Rancher on a Kubernetes cluster using the Helm CLI. ## Prerequisites diff --git a/versioned_docs/version-2.5/pages-for-subheaders/installation-and-upgrade.md b/versioned_docs/version-2.5/pages-for-subheaders/installation-and-upgrade.md index 91d501aebd2d..46247c0e43a6 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/installation-and-upgrade.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/installation-and-upgrade.md @@ -3,6 +3,10 @@ title: Installing/Upgrading Rancher description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation --- + + + + This section provides an overview of the architecture options of installing Rancher, describing advantages of each option. ## Terminology diff --git a/versioned_docs/version-2.5/pages-for-subheaders/installation-references.md b/versioned_docs/version-2.5/pages-for-subheaders/installation-references.md index 0eeae0f6c4eb..625a8721e27d 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/installation-references.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/installation-references.md @@ -2,4 +2,8 @@ title: Installation References --- + + + + Please see the following reference guides for other installation resources: [Rancher Helm chart options](../reference-guides/installation-references/helm-chart-options.md), [TLS settings](../reference-guides/installation-references/tls-settings.md), and [feature flags](../reference-guides/installation-references/feature-flags.md). \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/installation-requirements.md b/versioned_docs/version-2.5/pages-for-subheaders/installation-requirements.md index fb199923affc..bb29ae3b641c 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/installation-requirements.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/installation-requirements.md @@ -3,6 +3,10 @@ title: Installation Requirements description: Learn the node requirements for each node running Rancher server when you’re configuring Rancher to run either in a Docker or Kubernetes setup --- + + + + This page describes the software, hardware, and networking requirements for the nodes where the Rancher server will be installed. The Rancher server can be installed on a single node or a high-availability Kubernetes cluster. :::note Important: diff --git a/versioned_docs/version-2.5/pages-for-subheaders/istio-setup-guide.md b/versioned_docs/version-2.5/pages-for-subheaders/istio-setup-guide.md index 15c31d9b8fcb..e8c690c7f2c6 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/istio-setup-guide.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/istio-setup-guide.md @@ -2,6 +2,10 @@ title: Setup Guide --- + + + + This section describes how to enable Istio and start using it in your projects. If you use Istio for traffic management, you will need to allow external traffic to the cluster. In that case, you will need to follow all of the steps below. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/istio.md b/versioned_docs/version-2.5/pages-for-subheaders/istio.md index b6477c46a28e..2b7c819eff92 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/istio.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/istio.md @@ -2,6 +2,10 @@ title: Istio --- + + + + [Istio](https://istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, secure, control, and troubleshoot the traffic within a complex network of microservices. As a network of microservices changes and grows, the interactions between them can become increasingly difficult to manage and understand. In such a situation, it is useful to have a service mesh as a separate infrastructure layer. Istio's service mesh lets you manipulate traffic between microservices without changing the microservices directly. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-cluster-setup.md b/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-cluster-setup.md index d4b5ef29e56c..941663ebf16d 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-cluster-setup.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-cluster-setup.md @@ -2,6 +2,10 @@ title: "Don't have a Kubernetes cluster? Try one of these tutorials." --- + + + + This section contains information on how to install a Kubernetes cluster that the Rancher server can be installed on. In Rancher v2.5, Rancher can run on any Kubernetes cluster. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md b/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md index 11f6f7f88489..94dbfbacc0a0 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md @@ -3,6 +3,10 @@ title: Setting up Kubernetes Clusters in Rancher description: Provisioning Kubernetes Clusters --- + + + + Rancher simplifies the creation of clusters by allowing you to create them through the Rancher UI rather than more complex alternatives. Rancher provides multiple options for launching a cluster. Use the option that best fits your use case. This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts](../reference-guides/kubernetes-concepts.md) page. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-components.md b/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-components.md index b2271a509093..db3dfc08ca00 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-components.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-components.md @@ -2,6 +2,10 @@ title: Kubernetes Components --- + + + + The commands and steps listed in this section apply to the core Kubernetes components on [Rancher Launched Kubernetes](launch-kubernetes-with-rancher.md) clusters. This section includes troubleshooting tips in the following categories: diff --git a/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-resources-setup.md b/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-resources-setup.md index bfb36818d950..050200918dbd 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-resources-setup.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-resources-setup.md @@ -2,6 +2,10 @@ title: Kubernetes Resources --- + + + + > The Cluster Explorer is a new feature in Rancher v2.5 that allows you to view and manipulate all of the custom resources and CRDs in a Kubernetes cluster from the Rancher UI. This section will be updated to reflect the way that Kubernetes resources are handled in Rancher v2.5. ## Workloads diff --git a/versioned_docs/version-2.5/pages-for-subheaders/launch-kubernetes-with-rancher.md b/versioned_docs/version-2.5/pages-for-subheaders/launch-kubernetes-with-rancher.md index b5910af9b699..43d77fb5387f 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/launch-kubernetes-with-rancher.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/launch-kubernetes-with-rancher.md @@ -2,6 +2,10 @@ title: Launching Kubernetes with Rancher --- + + + + You can have Rancher launch a Kubernetes cluster using any nodes you want. When Rancher deploys Kubernetes onto these nodes, it uses [Rancher Kubernetes Engine](https://rancher.com/docs/rke/latest/en/) (RKE), which is Rancher's own lightweight Kubernetes installer. It can launch Kubernetes on any computers, including: - Bare-metal servers diff --git a/versioned_docs/version-2.5/pages-for-subheaders/load-balancer-and-ingress-controller.md b/versioned_docs/version-2.5/pages-for-subheaders/load-balancer-and-ingress-controller.md index 77a7838cde80..1f7dcc638ded 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/load-balancer-and-ingress-controller.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/load-balancer-and-ingress-controller.md @@ -3,6 +3,10 @@ title: Set Up Load Balancer and Ingress Controller within Rancher description: Learn how you can set up load balancers and ingress controllers to redirect service requests within Rancher, and learn about the limitations of load balancers --- + + + + Within Rancher, you can set up load balancers and ingress controllers to redirect service requests. ## Load Balancers diff --git a/versioned_docs/version-2.5/pages-for-subheaders/logging.md b/versioned_docs/version-2.5/pages-for-subheaders/logging.md index 8510828a6994..152e66d3f9b4 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/logging.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/logging.md @@ -3,6 +3,10 @@ title: Rancher Integration with Logging Services description: Rancher integrates with popular logging services. Learn the requirements and benefits of integrating with logging services, and enable logging on your cluster. --- + + + + The [Logging operator](https://kube-logging.github.io/docs/) now powers Rancher's logging solution in place of the former, in-house solution. For an overview of the changes in v2.5, see [this section.](../explanations/integrations-in-rancher/logging/logging-architecture.md#changes-in-rancher-v25) For information about migrating from Logging V1, see [this page.](../explanations/integrations-in-rancher/logging/migrate-to-rancher-v2.5+-logging.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/manage-clusters.md b/versioned_docs/version-2.5/pages-for-subheaders/manage-clusters.md index 67941b22779e..0f4bf0f93a69 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/manage-clusters.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/manage-clusters.md @@ -2,6 +2,10 @@ title: Cluster Administration --- + + + + After you provision a cluster in Rancher, you can begin using powerful Kubernetes features to deploy and scale your containerized applications in development, testing, or production environments. This page covers the following topics: diff --git a/versioned_docs/version-2.5/pages-for-subheaders/manage-project-resource-quotas.md b/versioned_docs/version-2.5/pages-for-subheaders/manage-project-resource-quotas.md index 7d452fcb4235..976e2c218815 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/manage-project-resource-quotas.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/manage-project-resource-quotas.md @@ -2,6 +2,10 @@ title: Project Resource Quotas --- + + + + In situations where several teams share a cluster, one team may overconsume the resources available: CPU, memory, storage, services, Kubernetes objects like pods or secrets, and so on. To prevent this overconsumption, you can apply a _resource quota_, which is a Rancher feature that limits the resources available to a project or namespace. This page is a how-to guide for creating resource quotas in existing projects. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/manage-projects.md b/versioned_docs/version-2.5/pages-for-subheaders/manage-projects.md index 17864ca8720b..c1f17072e44b 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/manage-projects.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/manage-projects.md @@ -2,6 +2,10 @@ title: Project Administration --- + + + + _Projects_ are objects introduced in Rancher that help organize namespaces in your Kubernetes cluster. You can use projects to create multi-tenant clusters, which allows a group of users to share the same underlying resources without interacting with each other's applications. In terms of hierarchy: diff --git a/versioned_docs/version-2.5/pages-for-subheaders/manage-role-based-access-control-rbac.md b/versioned_docs/version-2.5/pages-for-subheaders/manage-role-based-access-control-rbac.md index 8d63e7ad740a..921cb745cc20 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/manage-role-based-access-control-rbac.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/manage-role-based-access-control-rbac.md @@ -2,6 +2,10 @@ title: Role-Based Access Control (RBAC) --- + + + + Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. As mentioned in [Authentication](about-authentication.md), users can either be local or external. After you configure external authentication, the users that display on the **Users** page changes. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/monitoring-alerting-guides.md b/versioned_docs/version-2.5/pages-for-subheaders/monitoring-alerting-guides.md index ce9f991fc844..37821f2bba5c 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/monitoring-alerting-guides.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/monitoring-alerting-guides.md @@ -2,6 +2,10 @@ title: Monitoring Guides --- + + + + - [Enable monitoring](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md) - [Uninstall monitoring](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md) - [Monitoring workloads](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/monitoring-and-alerting.md b/versioned_docs/version-2.5/pages-for-subheaders/monitoring-and-alerting.md index 162b7f675cf0..6e1dbf474816 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/monitoring-and-alerting.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/monitoring-and-alerting.md @@ -3,6 +3,10 @@ title: Monitoring and Alerting description: Prometheus lets you view metrics from your different Rancher and Kubernetes objects. Learn about the scope of monitoring and how to enable cluster monitoring --- + + + + Using the `rancher-monitoring` application, you can quickly deploy leading open-source monitoring and alerting solutions onto your cluster. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/monitoring-v2-configuration-guides.md b/versioned_docs/version-2.5/pages-for-subheaders/monitoring-v2-configuration-guides.md index 2b588bf2d15f..087be0a76799 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/monitoring-v2-configuration-guides.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/monitoring-v2-configuration-guides.md @@ -2,6 +2,10 @@ title: Configuration --- + + + + This page captures some of the most important options for configuring Monitoring V2 in the Rancher UI. For information on configuring custom scrape targets and rules for Prometheus, please refer to the upstream documentation for the [Prometheus Operator.](https://github.com/prometheus-operator/prometheus-operator) Some of the most important custom resources are explained in the Prometheus Operator [design documentation.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/design.md) The Prometheus Operator documentation can help also you set up RBAC, Thanos, or custom configuration. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/monitoring-v2-configuration.md b/versioned_docs/version-2.5/pages-for-subheaders/monitoring-v2-configuration.md index abf493b4487a..79f97d9513d8 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/monitoring-v2-configuration.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/monitoring-v2-configuration.md @@ -2,6 +2,10 @@ title: Monitoring V2 Configuration --- + + + + The following sections will explain important options essential to configuring Monitoring V2 in Rancher: - [Receiver Configuration](../reference-guides/monitoring-v2-configuration/receivers.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/new-user-guides.md b/versioned_docs/version-2.5/pages-for-subheaders/new-user-guides.md index e2e667395c3b..e2e68ff0089a 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/new-user-guides.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/new-user-guides.md @@ -2,6 +2,10 @@ title: New User Guides --- + + + + New user guides, also known as **tutorials**, describe practical steps for users to follow in order to complete some concrete action. These docs are known as "learning-oriented" docs in which users learn by "doing". The new user guides are designed to guide beginners, or the everyday users of Rancher, through a series of steps to learn how to do something. The goal is that the user will be able to learn how to complete tasks by using easy-to-follow, meaningful, and repeatable directions. These guides will assist users to do work to then get the promised results immediately. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/node-template-configuration.md b/versioned_docs/version-2.5/pages-for-subheaders/node-template-configuration.md index 0777e7329383..009b421fe17b 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/node-template-configuration.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/node-template-configuration.md @@ -2,4 +2,8 @@ title: Node Template Configuration --- + + + + To learn about node template config, refer to [EC2 Node Template Configuration](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md), [DigitalOcean Node Template Configuration](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md), [Azure Node Template Configuration](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md), and [vSphere Node Template Configuration](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md). diff --git a/versioned_docs/version-2.5/pages-for-subheaders/other-installation-methods.md b/versioned_docs/version-2.5/pages-for-subheaders/other-installation-methods.md index e765865e4f97..2844dc4ff848 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/other-installation-methods.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/other-installation-methods.md @@ -2,6 +2,10 @@ title: Other Installation Methods --- + + + + ### Air Gapped Installations Follow [these steps](air-gapped-helm-cli-install.md) to install the Rancher server in an air gapped environment. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/provisioning-storage-examples.md b/versioned_docs/version-2.5/pages-for-subheaders/provisioning-storage-examples.md index 72d452efcc15..ea726b07c3ca 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/provisioning-storage-examples.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/provisioning-storage-examples.md @@ -2,6 +2,10 @@ title: Provisioning Storage Examples --- + + + + Rancher supports persistent storage with a variety of volume plugins. However, before you use any of these plugins to bind persistent storage to your workloads, you have to configure the storage itself, whether its a cloud-based solution from a service-provider or an on-prem solution that you manage yourself. For your convenience, Rancher offers documentation on how to configure some of the popular storage methods: diff --git a/versioned_docs/version-2.5/pages-for-subheaders/quick-start-guides.md b/versioned_docs/version-2.5/pages-for-subheaders/quick-start-guides.md index 1e83fb023f01..723c870846a3 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/quick-start-guides.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/quick-start-guides.md @@ -1,6 +1,10 @@ --- title: Rancher Deployment Quick Start Guides --- + + + + >**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](installation-and-upgrade.md). Howdy buckaroos! Use this section of the docs to jump start your deployment and testing of Rancher 2.x! It contains instructions for a simple Rancher setup and some common use cases. We plan on adding more content to this section in the future. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/rancher-behind-an-http-proxy.md b/versioned_docs/version-2.5/pages-for-subheaders/rancher-behind-an-http-proxy.md index 59a7eec2dba7..fd8a41b8e086 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/rancher-behind-an-http-proxy.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/rancher-behind-an-http-proxy.md @@ -2,6 +2,10 @@ title: Installing Rancher behind an HTTP Proxy --- + + + + In a lot of enterprise environments, servers or VMs running on premise do not have direct Internet access, but must connect to external services through a HTTP(S) proxy for security reasons. This tutorial shows step by step how to set up a highly available Rancher installation in such an environment. Alternatively, it is also possible to set up Rancher completely air-gapped without any Internet access. This process is described in detail in the [Rancher docs](air-gapped-helm-cli-install.md). diff --git a/versioned_docs/version-2.5/pages-for-subheaders/rancher-managed-clusters.md b/versioned_docs/version-2.5/pages-for-subheaders/rancher-managed-clusters.md index 2fc25c09150c..2cdb03fd9094 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/rancher-managed-clusters.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/rancher-managed-clusters.md @@ -2,6 +2,10 @@ title: Best Practices for Rancher Managed Clusters --- + + + + ### Logging Refer to [this guide](../reference-guides/best-practices/rancher-managed-clusters/logging-best-practices.md) for our recommendations for cluster-level logging and application logging. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/rancher-manager-architecture.md b/versioned_docs/version-2.5/pages-for-subheaders/rancher-manager-architecture.md index a9295751cabf..4947011c6f39 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/rancher-manager-architecture.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/rancher-manager-architecture.md @@ -2,6 +2,10 @@ title: Architecture --- + + + + This section focuses on the [Rancher server and its components](../reference-guides/rancher-manager-architecture/rancher-server-and-components.md) and how [Rancher communicates with downstream Kubernetes clusters](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md). For information on the different ways that Rancher can be installed, refer to the [overview of installation options.](installation-and-upgrade.md#overview-of-installation-options) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/rancher-on-a-single-node-with-docker.md b/versioned_docs/version-2.5/pages-for-subheaders/rancher-on-a-single-node-with-docker.md index 262ae12f7045..d6b6a34ae9d4 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/rancher-on-a-single-node-with-docker.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/rancher-on-a-single-node-with-docker.md @@ -3,6 +3,10 @@ title: Installing Rancher on a Single Node Using Docker description: For development and testing environments only, use a Docker install. Install Docker on a single Linux host, and deploy Rancher with a single Docker container. --- + + + + Rancher can be installed by running a single Docker container. In this installation scenario, you'll install Docker on a single Linux host, and then deploy Rancher on your host using a single Docker container. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/rancher-security.md b/versioned_docs/version-2.5/pages-for-subheaders/rancher-security.md index 1eebde802059..eda3d209eacf 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/rancher-security.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/rancher-security.md @@ -2,6 +2,10 @@ title: Security --- + + + +
diff --git a/versioned_docs/version-2.5/pages-for-subheaders/rancher-server-configuration.md b/versioned_docs/version-2.5/pages-for-subheaders/rancher-server-configuration.md index a8e1a880ea77..eac8ae244eb0 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/rancher-server-configuration.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/rancher-server-configuration.md @@ -2,6 +2,10 @@ title: Rancher Server Configuration --- + + + + - [RKE1 Cluster Configuration](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) - [EKS Cluster Configuration](../reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration.md) - [GKE Cluster Configuration](../pages-for-subheaders/gke-cluster-configuration.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/rancher-server.md b/versioned_docs/version-2.5/pages-for-subheaders/rancher-server.md index c8984d9c447b..45c3917cd588 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/rancher-server.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/rancher-server.md @@ -2,6 +2,10 @@ title: Best Practices for the Rancher Server --- + + + + This guide contains our recommendations for running the Rancher server, and is intended to be used in situations in which Rancher manages downstream Kubernetes clusters. ### Recommended Architecture and Infrastructure diff --git a/versioned_docs/version-2.5/pages-for-subheaders/resources.md b/versioned_docs/version-2.5/pages-for-subheaders/resources.md index 5954506a0dc4..e2a0a7a4c60e 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/resources.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/resources.md @@ -2,6 +2,10 @@ title: Resources --- + + + + ### Docker Installations The [single-node Docker installation](rancher-on-a-single-node-with-docker.md) is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster using Helm, you install the Rancher server component on a single node using a `docker run` command. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/selinux-rpm.md b/versioned_docs/version-2.5/pages-for-subheaders/selinux-rpm.md index 0d14f11e8bae..3f197205cee5 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/selinux-rpm.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/selinux-rpm.md @@ -2,6 +2,10 @@ title: SELinux RPM --- + + + + _Available as of v2.5.8_ [Security-Enhanced Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux) is a security enhancement to Linux. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/set-up-cloud-providers.md b/versioned_docs/version-2.5/pages-for-subheaders/set-up-cloud-providers.md index 72fa67ca0c64..47032b5b3c99 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/set-up-cloud-providers.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/set-up-cloud-providers.md @@ -2,6 +2,10 @@ title: Setting up Cloud Providers --- + + + + A _cloud provider_ is a module in Kubernetes that provides an interface for managing nodes, load balancers, and networking routes. When a cloud provider is set up in Rancher, the Rancher server can automatically provision new nodes, load balancers or persistent storage devices when launching Kubernetes definitions, if the cloud provider you're using supports such automation. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md b/versioned_docs/version-2.5/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md index edb8cc1b211a..f1805065aa65 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md @@ -2,6 +2,10 @@ title: Setting up Clusters from Hosted Kubernetes Providers --- + + + + In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/single-node-rancher-in-docker.md b/versioned_docs/version-2.5/pages-for-subheaders/single-node-rancher-in-docker.md index 61cd166a90c2..91072d2b3b44 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/single-node-rancher-in-docker.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/single-node-rancher-in-docker.md @@ -2,4 +2,8 @@ title: Single Node Rancher in Docker --- + + + + The following docs will discuss [HTTP proxy configuration](../reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md) and [advanced options](../reference-guides/single-node-rancher-in-docker/advanced-options.md) for Docker installs. \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/use-existing-nodes.md b/versioned_docs/version-2.5/pages-for-subheaders/use-existing-nodes.md index cd891838b350..5e5b26bc2755 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/use-existing-nodes.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/use-existing-nodes.md @@ -3,6 +3,10 @@ title: Launching Kubernetes on Existing Custom Nodes description: To create a cluster with custom nodes, you’ll need to access servers in your cluster and provision them according to Rancher requirements --- + + + + When you create a custom cluster, Rancher uses RKE (the Rancher Kubernetes Engine) to create a Kubernetes cluster in on-prem bare-metal servers, on-prem virtual machines, or in any node hosted by an infrastructure provider. To use this option you'll need access to servers you intend to use in your Kubernetes cluster. Provision each server according to the [requirements](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md), which includes some hardware specifications and Docker. After you install Docker on each server, you willl also run the command provided in the Rancher UI on each server to turn each one into a Kubernetes node. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md b/versioned_docs/version-2.5/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md index 8282dbd7961b..a5c01718b4fa 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md @@ -2,6 +2,10 @@ title: Launching Kubernetes on New Nodes in an Infrastructure Provider --- + + + + Using Rancher, you can create pools of nodes based on a [node template](use-new-nodes-in-an-infra-provider.md#node-templates). This node template defines the parameters you want to use to launch nodes in your infrastructure providers or cloud providers. One benefit of installing Kubernetes on node pools hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically create another node to join the cluster to ensure that the count of the node pool is as expected. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/use-windows-clusters.md b/versioned_docs/version-2.5/pages-for-subheaders/use-windows-clusters.md index 80b2bbc49300..337756ff27fe 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/use-windows-clusters.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/use-windows-clusters.md @@ -2,6 +2,10 @@ title: Launching Kubernetes on Windows Clusters --- + + + + When provisioning a [custom cluster](use-existing-nodes.md) using Rancher, Rancher uses RKE (the Rancher Kubernetes Engine) to install Kubernetes on your existing nodes. In a Windows cluster provisioned with Rancher, the cluster must contain both Linux and Windows nodes. The Kubernetes controlplane can only run on Linux nodes, and the Windows nodes can only have the worker role. Windows nodes can only be used for deploying workloads. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/user-settings.md b/versioned_docs/version-2.5/pages-for-subheaders/user-settings.md index 9b5db2f32784..d81f2da2fd1d 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/user-settings.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/user-settings.md @@ -2,6 +2,10 @@ title: User Settings --- + + + + Within Rancher, each user has a number of settings associated with their login: personal preferences, API keys, etc. You can configure these settings by choosing from the **User Settings** menu. You can open this menu by clicking your avatar, located within the main menu. ![User Settings Menu](/img/user-settings.png) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/vsphere.md b/versioned_docs/version-2.5/pages-for-subheaders/vsphere.md index 89dc130a0be7..301815e1743b 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/vsphere.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/vsphere.md @@ -2,6 +2,10 @@ title: Creating a vSphere Cluster description: Use Rancher to create a vSphere cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. --- + + + + import YouTube from '@site/src/components/YouTube' By using Rancher with vSphere, you can bring cloud operations on-premises. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/workloads-and-pods.md b/versioned_docs/version-2.5/pages-for-subheaders/workloads-and-pods.md index 94ac0881d6d6..5cfe84668af6 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/workloads-and-pods.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/workloads-and-pods.md @@ -3,6 +3,10 @@ title: "Kubernetes Workloads and Pods" description: "Learn about the two constructs with which you can build any complex containerized application in Kubernetes: Kubernetes workloads and pods" --- + + + + You can build any complex containerized application in Kubernetes using two basic constructs: pods and workloads. Once you build an application, you can expose it for access either within the same cluster or on the Internet using a third construct: services. ### Pods diff --git a/versioned_docs/version-2.6/pages-for-subheaders/about-provisioning-drivers.md b/versioned_docs/version-2.6/pages-for-subheaders/about-provisioning-drivers.md index 02ab3b0bc03f..812197b3b3f8 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/about-provisioning-drivers.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/about-provisioning-drivers.md @@ -2,6 +2,10 @@ title: Provisioning Drivers --- + + + + Drivers in Rancher allow you to manage which providers can be used to deploy [hosted Kubernetes clusters](set-up-clusters-from-hosted-kubernetes-providers.md) or [nodes in an infrastructure provider](use-new-nodes-in-an-infra-provider.md) to allow Rancher to deploy and manage Kubernetes. ### Rancher Drivers diff --git a/versioned_docs/version-2.6/pages-for-subheaders/about-rke1-templates.md b/versioned_docs/version-2.6/pages-for-subheaders/about-rke1-templates.md index 44e73fd794aa..601a622a5814 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/about-rke1-templates.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/about-rke1-templates.md @@ -2,6 +2,10 @@ title: RKE Templates --- + + + + RKE templates are designed to allow DevOps and security teams to standardize and simplify the creation of Kubernetes clusters. RKE is the [Rancher Kubernetes Engine,](https://rancher.com/docs/rke/latest/en/) which is the tool that Rancher uses to provision Kubernetes clusters. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/about-the-api.md b/versioned_docs/version-2.6/pages-for-subheaders/about-the-api.md index 4ac4ae8fcf6a..3b39d7c27172 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/about-the-api.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/about-the-api.md @@ -2,6 +2,10 @@ title: API --- + + + + ## How to use the API The API has its own user interface accessible from a web browser. This is an easy way to see resources, perform actions, and see the equivalent cURL or HTTP request & response. To access it: diff --git a/versioned_docs/version-2.6/pages-for-subheaders/access-clusters.md b/versioned_docs/version-2.6/pages-for-subheaders/access-clusters.md index 2dd82ffcb8a7..04edb86702c0 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/access-clusters.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/access-clusters.md @@ -2,6 +2,10 @@ title: Cluster Access --- + + + + This section is about what tools can be used to access clusters managed by Rancher. For information on how to give users permission to access a cluster, see the section on [adding users to clusters.](../how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) diff --git a/versioned_docs/version-2.6/pages-for-subheaders/advanced-configuration.md b/versioned_docs/version-2.6/pages-for-subheaders/advanced-configuration.md index 208abcf1ea87..87efa2a0f9e0 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/advanced-configuration.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/advanced-configuration.md @@ -2,6 +2,10 @@ title: Advanced Configuration --- + + + + ### Alertmanager For information on configuring the Alertmanager custom resource, see [this page.](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md) diff --git a/versioned_docs/version-2.6/pages-for-subheaders/advanced-user-guides.md b/versioned_docs/version-2.6/pages-for-subheaders/advanced-user-guides.md index e4b8c16e1e4f..b5b3d2410372 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/advanced-user-guides.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/advanced-user-guides.md @@ -2,6 +2,10 @@ title: Advanced User Guides --- + + + + Advanced user guides are "problem-oriented" docs in which users learn how to answer questions or solve problems. The major difference between these and the new user guides is that these guides are geared toward more experienced or advanced users who have more technical needs from their documentation. These users already have an understanding of Rancher and its functions. They know what they need to accomplish; they just need additional guidance to complete some more complex task they they have encountered while working. It should be noted that neither new user guides nor advanced user guides provide detailed explanations or discussions (these kinds of docs belong elsewhere). How-to guides focus on the action of guiding users through repeatable, effective steps to learn new skills, master some task, or overcome some problem. \ No newline at end of file diff --git a/versioned_docs/version-2.6/pages-for-subheaders/air-gapped-helm-cli-install.md b/versioned_docs/version-2.6/pages-for-subheaders/air-gapped-helm-cli-install.md index 058b697d93d6..d6fbc09698fd 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/air-gapped-helm-cli-install.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/air-gapped-helm-cli-install.md @@ -2,6 +2,10 @@ title: Air-Gapped Helm CLI Install --- + + + + This section is about using the Helm CLI to install the Rancher server in an air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. The installation steps differ depending on whether Rancher is installed on an RKE Kubernetes cluster, a K3s Kubernetes cluster, or a single Docker container. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/authentication-config.md b/versioned_docs/version-2.6/pages-for-subheaders/authentication-config.md index d28bfae8edbc..c1458fb47384 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/authentication-config.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/authentication-config.md @@ -3,6 +3,10 @@ title: Authentication Config weight: 10 --- + + + + One of the key features that Rancher adds to Kubernetes is centralized user authentication. This feature allows your users to use one set of credentials to authenticate with any of your Kubernetes clusters. This centralized user authentication is accomplished using the Rancher authentication proxy, which is installed along with the rest of Rancher. This proxy authenticates your users and forwards their requests to your Kubernetes clusters using a service account. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/authentication-permissions-and-global-configuration.md b/versioned_docs/version-2.6/pages-for-subheaders/authentication-permissions-and-global-configuration.md index b32d8068a916..2df94806af94 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/authentication-permissions-and-global-configuration.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/authentication-permissions-and-global-configuration.md @@ -2,6 +2,10 @@ title: Authentication, Permissions and Global Configuration --- + + + + After installation, the [system administrator](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) should configure Rancher to configure authentication, authorization, security, default settings, security policies, drivers and global DNS entries. ## First Log In diff --git a/versioned_docs/version-2.6/pages-for-subheaders/aws-cloud-marketplace.md b/versioned_docs/version-2.6/pages-for-subheaders/aws-cloud-marketplace.md index 0b4c81853429..c487e1d6f928 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/aws-cloud-marketplace.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/aws-cloud-marketplace.md @@ -2,6 +2,10 @@ title: AWS Marketplace Integration --- + + + + ## Overview Rancher offers an integration with the AWS Marketplace which allows users to purchase a support contract with SUSE. This integration allows you easily adjust your support needs as you start to support more clusters. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/backup-restore-and-disaster-recovery.md b/versioned_docs/version-2.6/pages-for-subheaders/backup-restore-and-disaster-recovery.md index 734d97722cae..c9ba92fb3e53 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/backup-restore-and-disaster-recovery.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/backup-restore-and-disaster-recovery.md @@ -3,6 +3,10 @@ title: Backups and Disaster Recovery keywords: [rancher v2.6 backup restore, rancher v2.6 backup and restore, backup restore rancher v2.6, backup and restore rancher v2.6] --- + + + + In this section, you'll learn how to create backups of Rancher, how to restore Rancher from backup, and how to migrate Rancher to a new Kubernetes cluster. The `rancher-backup` operator is used to backup and restore Rancher on any Kubernetes cluster. This application is a Helm chart, and it can be deployed through the Rancher **Apps & Marketplace** (Rancher before v2.6.5) or **Apps** (Rancher v2.6.5+) page, or by using the Helm CLI. The `rancher-backup` Helm chart is [here.](https://github.com/rancher/charts/tree/release-v2.6/charts/rancher-backup) diff --git a/versioned_docs/version-2.6/pages-for-subheaders/backup-restore-configuration.md b/versioned_docs/version-2.6/pages-for-subheaders/backup-restore-configuration.md index c6574b936007..104584f741d5 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/backup-restore-configuration.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/backup-restore-configuration.md @@ -2,6 +2,10 @@ title: Rancher Backup Configuration Reference --- + + + + - [Backup configuration](../reference-guides/backup-restore-configuration/backup-configuration.md) - [Restore configuration](../reference-guides/backup-restore-configuration/restore-configuration.md) - [Storage location configuration](../reference-guides/backup-restore-configuration/storage-configuration.md) diff --git a/versioned_docs/version-2.6/pages-for-subheaders/best-practices.md b/versioned_docs/version-2.6/pages-for-subheaders/best-practices.md index 81f14325f765..7009f6cce709 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/best-practices.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/best-practices.md @@ -2,6 +2,10 @@ title: Best Practices Guide --- + + + + The purpose of this section is to consolidate best practices for Rancher implementations. This also includes recommendations for related technologies, such as Kubernetes, Docker, containers, and more. The objective is to improve the outcome of a Rancher implementation using the operational experience of Rancher and its customers. If you have any questions about how these might apply to your use case, please contact your Customer Success Manager or Support. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/checklist-for-production-ready-clusters.md b/versioned_docs/version-2.6/pages-for-subheaders/checklist-for-production-ready-clusters.md index 64b0c8a37cfb..f5816af3c48b 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/checklist-for-production-ready-clusters.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/checklist-for-production-ready-clusters.md @@ -2,6 +2,10 @@ title: Checklist for Production-Ready Clusters --- + + + + In this section, we recommend best practices for creating the production-ready Kubernetes clusters that will run your apps and services. For a list of requirements for your cluster, including the requirements for OS/Docker, hardware, and networking, refer to the section on [node requirements.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) diff --git a/versioned_docs/version-2.6/pages-for-subheaders/cis-scan-guides.md b/versioned_docs/version-2.6/pages-for-subheaders/cis-scan-guides.md index 6fef05e5b7cc..e76d47504e64 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/cis-scan-guides.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/cis-scan-guides.md @@ -2,6 +2,10 @@ title: CIS Scan Guides --- + + + + - [Install rancher-cis-benchmark](../how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md) - [Uninstall rancher-cis-benchmark](../how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md) - [Run a Scan](../how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md) diff --git a/versioned_docs/version-2.6/pages-for-subheaders/cis-scans.md b/versioned_docs/version-2.6/pages-for-subheaders/cis-scans.md index 9b29c4b542fc..1a59c999988c 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/cis-scans.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/cis-scans.md @@ -2,6 +2,10 @@ title: CIS Scans --- + + + + Rancher can run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS Kubernetes Benchmark. The CIS scans can run on any Kubernetes cluster, including hosted Kubernetes providers such as EKS, AKS, and GKE. The `rancher-cis-benchmark` app leverages kube-bench, an open-source tool from Aqua Security, to check clusters for CIS Kubernetes Benchmark compliance. Also, to generate a cluster-wide report, the application utilizes Sonobuoy for report aggregation. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/cli-with-rancher.md b/versioned_docs/version-2.6/pages-for-subheaders/cli-with-rancher.md index d4c33d7f390f..547d4c50308f 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/cli-with-rancher.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/cli-with-rancher.md @@ -2,4 +2,8 @@ title: CLI with Rancher --- + + + + Interact with Rancher using command line interface (CLI) tools from your workstation. The following docs will describe the [Rancher CLI](../reference-guides/cli-with-rancher/rancher-cli.md) and [kubectl Utility](../reference-guides/cli-with-rancher/kubectl-utility.md). \ No newline at end of file diff --git a/versioned_docs/version-2.6/pages-for-subheaders/cloud-marketplace.md b/versioned_docs/version-2.6/pages-for-subheaders/cloud-marketplace.md index 2637dd78873c..3baacc0d602d 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/cloud-marketplace.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/cloud-marketplace.md @@ -2,6 +2,10 @@ title: Cloud Marketplace Integration --- + + + + Rancher offers integration with cloud marketplaces to easily purchase support for installations hosted on certain cloud providers. In addition, this integration also provides the ability to generate a supportconfig bundle which can be provided to rancher support. As of Rancher v2.6.7, only AWS is supported for this integration. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/cluster-configuration.md b/versioned_docs/version-2.6/pages-for-subheaders/cluster-configuration.md index 50f96d112586..60e02a8cd40b 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/cluster-configuration.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/cluster-configuration.md @@ -2,6 +2,10 @@ title: Cluster Configuration --- + + + + After you provision a Kubernetes cluster using Rancher, you can still edit options and settings for the cluster. For information on editing cluster membership, go to [this page.](../how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) diff --git a/versioned_docs/version-2.6/pages-for-subheaders/configuration-options.md b/versioned_docs/version-2.6/pages-for-subheaders/configuration-options.md index 15a0599ab000..92a375e948db 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/configuration-options.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/configuration-options.md @@ -2,6 +2,10 @@ title: Configuration Options --- + + + + ### Egress Support By default the Egress gateway is disabled, but can be enabled on install or upgrade through the values.yaml or via the [overlay file](#overlay-file). diff --git a/versioned_docs/version-2.6/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md b/versioned_docs/version-2.6/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md index 73c2c651cd6a..8662bf782fb0 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md @@ -2,6 +2,10 @@ title: Configuring Microsoft Active Directory Federation Service (SAML) --- + + + + If your organization uses Microsoft Active Directory Federation Services (AD FS) for user authentication, you can configure Rancher to allow your users to log in using their AD FS credentials. ## Prerequisites diff --git a/versioned_docs/version-2.6/pages-for-subheaders/configure-openldap.md b/versioned_docs/version-2.6/pages-for-subheaders/configure-openldap.md index be2aa86df697..9eb5fc7db2ac 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/configure-openldap.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/configure-openldap.md @@ -2,6 +2,10 @@ title: Configuring OpenLDAP --- + + + + If your organization uses LDAP for user authentication, you can configure Rancher to communicate with an OpenLDAP server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the organisation's central user repository, while allowing end-users to authenticate with their LDAP credentials when logging in to the Rancher UI. ## Prerequisites diff --git a/versioned_docs/version-2.6/pages-for-subheaders/create-kubernetes-persistent-storage.md b/versioned_docs/version-2.6/pages-for-subheaders/create-kubernetes-persistent-storage.md index 8952d8abafa9..cdb775108ca6 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/create-kubernetes-persistent-storage.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/create-kubernetes-persistent-storage.md @@ -2,6 +2,10 @@ title: "Kubernetes Persistent Storage: Volumes and Storage Classes" description: "Learn about the two ways with which you can create persistent storage in Kubernetes: persistent volumes and storage classes" --- + + + + When deploying an application that needs to retain data, you'll need to create persistent storage. Persistent storage allows you to store application data external from the pod running your application. This storage practice allows you to maintain application data, even if the application's pod fails. The documents in this section assume that you understand the Kubernetes concepts of persistent volumes, persistent volume claims, and storage classes. For more information, refer to the section on [how storage works.](../how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md) diff --git a/versioned_docs/version-2.6/pages-for-subheaders/custom-resource-configuration.md b/versioned_docs/version-2.6/pages-for-subheaders/custom-resource-configuration.md index cf43951e5b36..37c2f337d78c 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/custom-resource-configuration.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/custom-resource-configuration.md @@ -2,6 +2,10 @@ title: Custom Resource Configuration --- + + + + The following Custom Resource Definitions are used to configure logging: - [Flow and ClusterFlow](../integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows.md) diff --git a/versioned_docs/version-2.6/pages-for-subheaders/deploy-apps-across-clusters.md b/versioned_docs/version-2.6/pages-for-subheaders/deploy-apps-across-clusters.md index 3215a4f43b4d..8089f7a914dc 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/deploy-apps-across-clusters.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/deploy-apps-across-clusters.md @@ -1,6 +1,10 @@ --- title: Deploying Applications across Clusters --- + + + + ### Fleet Rancher v2.5 introduced Fleet, a new way to deploy applications across clusters. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/deploy-rancher-manager.md b/versioned_docs/version-2.6/pages-for-subheaders/deploy-rancher-manager.md index e5ad6602e641..9914926b2d6f 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/deploy-rancher-manager.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/deploy-rancher-manager.md @@ -2,6 +2,10 @@ title: Deploying Rancher Server --- + + + + Use one of the following guides to deploy and provision Rancher and a Kubernetes cluster in the provider of your choice. - [AWS](../getting-started/quick-start-guides/deploy-rancher-manager/aws.md) (uses Terraform) diff --git a/versioned_docs/version-2.6/pages-for-subheaders/deploy-rancher-workloads.md b/versioned_docs/version-2.6/pages-for-subheaders/deploy-rancher-workloads.md index 3e86165a071c..b2898cd513b7 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/deploy-rancher-workloads.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/deploy-rancher-workloads.md @@ -2,6 +2,10 @@ title: Deploying Workloads --- + + + + These guides walk you through the deployment of an application, including how to expose the application for use outside of the cluster. - [Workload with Ingress](../getting-started/quick-start-guides/deploy-workloads/workload-ingress.md) diff --git a/versioned_docs/version-2.6/pages-for-subheaders/downstream-cluster-configuration.md b/versioned_docs/version-2.6/pages-for-subheaders/downstream-cluster-configuration.md index e9065f1fb088..b9fbad0b9665 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/downstream-cluster-configuration.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/downstream-cluster-configuration.md @@ -2,4 +2,8 @@ title: Downstream Cluster Configuration --- + + + + The following docs will discuss [node template configuration](./node-template-configuration.md) and [machine configuration](./machine-configuration.md). \ No newline at end of file diff --git a/versioned_docs/version-2.6/pages-for-subheaders/enable-experimental-features.md b/versioned_docs/version-2.6/pages-for-subheaders/enable-experimental-features.md index f81d240f0fcd..5626965df48d 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/enable-experimental-features.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/enable-experimental-features.md @@ -1,6 +1,10 @@ --- title: Enabling Experimental Features --- + + + + Rancher includes some features that are experimental and disabled by default. You might want to enable these features, for example, if you decide that the benefits of using an [unsupported storage type](../how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md) outweighs the risk of using an untested feature. Feature flags were introduced to allow you to try these features that are not enabled by default. The features can be enabled in three ways: diff --git a/versioned_docs/version-2.6/pages-for-subheaders/gke-cluster-configuration.md b/versioned_docs/version-2.6/pages-for-subheaders/gke-cluster-configuration.md index 0207beafee7c..c627ea2bf95f 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/gke-cluster-configuration.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/gke-cluster-configuration.md @@ -2,6 +2,10 @@ title: GKE Cluster Configuration Reference --- + + + + ## Changes in Rancher v2.6 - Support for additional configuration options: diff --git a/versioned_docs/version-2.6/pages-for-subheaders/helm-charts-in-rancher.md b/versioned_docs/version-2.6/pages-for-subheaders/helm-charts-in-rancher.md index 156ca3e29cfb..fc0d7e41f5dd 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/helm-charts-in-rancher.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/helm-charts-in-rancher.md @@ -2,6 +2,10 @@ title: Helm Charts in Rancher --- + + + + In this section, you'll learn how to manage Helm chart repositories and applications in Rancher. Helm chart repositories are managed using **Apps & Marketplace** (Rancher before v2.6.5) or **Apps** (Rancher v2.6.5+). It uses a catalog-like system to import bundles of charts from repositories and then uses those charts to either deploy custom Helm applications or Rancher's tools such as Monitoring or Istio. Rancher tools come as pre-loaded repositories which deploy as standalone Helm charts. Any additional repositories are only added to the current cluster. ### Changes in Rancher v2.6 diff --git a/versioned_docs/version-2.6/pages-for-subheaders/horizontal-pod-autoscaler.md b/versioned_docs/version-2.6/pages-for-subheaders/horizontal-pod-autoscaler.md index 64d553553154..d18ce147d060 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/horizontal-pod-autoscaler.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/horizontal-pod-autoscaler.md @@ -3,6 +3,10 @@ title: The Horizontal Pod Autoscaler description: Learn about the horizontal pod autoscaler (HPA). How to manage HPAs and how to test them with a service deployment --- + + + + The [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (HPA) is a Kubernetes feature that allows you to configure your cluster to automatically scale the services it's running up or down. Rancher provides some additional features to help manage HPAs, depending on the version of Rancher. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/infrastructure-setup.md b/versioned_docs/version-2.6/pages-for-subheaders/infrastructure-setup.md index f7b834992aae..fabdc72e9751 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/infrastructure-setup.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/infrastructure-setup.md @@ -2,6 +2,10 @@ title: Don't have infrastructure for your Kubernetes cluster? Try one of these tutorials. --- + + + + To set up infrastructure for a high-availability K3s Kubernetes cluster with an external DB, refer to [this page.](../how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md) diff --git a/versioned_docs/version-2.6/pages-for-subheaders/install-cluster-autoscaler.md b/versioned_docs/version-2.6/pages-for-subheaders/install-cluster-autoscaler.md index 740348651316..8b749aae5ee0 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/install-cluster-autoscaler.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/install-cluster-autoscaler.md @@ -2,6 +2,10 @@ title: Cluster Autoscaler --- + + + + In this section, you'll learn how to install and use the [Kubernetes cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/) on Rancher custom clusters using AWS EC2 Auto Scaling Groups. The cluster autoscaler is a tool that automatically adjusts the size of the Kubernetes cluster when one of the following conditions is true: diff --git a/versioned_docs/version-2.6/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md b/versioned_docs/version-2.6/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md index a1a8a8b69ae2..cccb399eeae4 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md @@ -3,6 +3,10 @@ title: Install/Upgrade Rancher on a Kubernetes Cluster description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation --- + + + + In this section, you'll learn how to deploy Rancher on a Kubernetes cluster using the Helm CLI. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/installation-and-upgrade.md b/versioned_docs/version-2.6/pages-for-subheaders/installation-and-upgrade.md index 032c76e24d52..3077b14edc28 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/installation-and-upgrade.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/installation-and-upgrade.md @@ -3,6 +3,10 @@ title: Installing/Upgrading Rancher description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation --- + + + + This section provides an overview of the architecture options of installing Rancher, describing advantages of each option. ## Terminology diff --git a/versioned_docs/version-2.6/pages-for-subheaders/installation-references.md b/versioned_docs/version-2.6/pages-for-subheaders/installation-references.md index e2048279af00..6108728b04fa 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/installation-references.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/installation-references.md @@ -2,4 +2,8 @@ title: Installation References --- + + + + Please see the following reference guides for other installation resources: [Rancher Helm chart options](../getting-started/installation-and-upgrade/installation-references/helm-chart-options.md), [TLS settings](../getting-started/installation-and-upgrade/installation-references/tls-settings.md), and [feature flags](../getting-started/installation-and-upgrade/installation-references/feature-flags.md). \ No newline at end of file diff --git a/versioned_docs/version-2.6/pages-for-subheaders/installation-requirements.md b/versioned_docs/version-2.6/pages-for-subheaders/installation-requirements.md index f56751377183..1f06f1167b6f 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/installation-requirements.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/installation-requirements.md @@ -3,6 +3,10 @@ title: Installation Requirements description: Learn the node requirements for each node running Rancher server when you’re configuring Rancher to run either in a Docker or Kubernetes setup --- + + + + This page describes the software, hardware, and networking requirements for the nodes where the Rancher server will be installed. The Rancher server can be installed on a single node or a high-availability Kubernetes cluster. :::note Important: diff --git a/versioned_docs/version-2.6/pages-for-subheaders/istio-setup-guide.md b/versioned_docs/version-2.6/pages-for-subheaders/istio-setup-guide.md index 2c6ba0c41189..24475f7ffea7 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/istio-setup-guide.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/istio-setup-guide.md @@ -2,6 +2,10 @@ title: Setup Guide --- + + + + This section describes how to enable Istio and start using it in your projects. If you use Istio for traffic management, you will need to allow external traffic to the cluster. In that case, you will need to follow all of the steps below. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/istio.md b/versioned_docs/version-2.6/pages-for-subheaders/istio.md index bfbf7bb0a99c..50c9e616ebe8 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/istio.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/istio.md @@ -2,6 +2,10 @@ title: Istio --- + + + + [Istio](https://istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, secure, control, and troubleshoot the traffic within a complex network of microservices. As a network of microservices changes and grows, the interactions between them can become increasingly difficult to manage and understand. In such a situation, it is useful to have a service mesh as a separate infrastructure layer. Istio's service mesh lets you manipulate traffic between microservices without changing the microservices directly. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/kubernetes-cluster-setup.md b/versioned_docs/version-2.6/pages-for-subheaders/kubernetes-cluster-setup.md index 8f9a88e0da3d..9291d55bee45 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/kubernetes-cluster-setup.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/kubernetes-cluster-setup.md @@ -2,6 +2,10 @@ title: "Don't have a Kubernetes cluster? Try one of these tutorials." --- + + + + This section contains information on how to install a Kubernetes cluster that the Rancher server can be installed on. Rancher can run on any Kubernetes cluster. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md b/versioned_docs/version-2.6/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md index b7f2f0a3bb0b..899d4bb59375 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md @@ -3,6 +3,10 @@ title: Setting up Kubernetes Clusters in Rancher description: Provisioning Kubernetes Clusters --- + + + + Rancher simplifies the creation of clusters by allowing you to create them through the Rancher UI rather than more complex alternatives. Rancher provides multiple options for launching a cluster. Use the option that best fits your use case. This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts](../reference-guides/kubernetes-concepts.md) page. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/kubernetes-components.md b/versioned_docs/version-2.6/pages-for-subheaders/kubernetes-components.md index 5e482f2a1e85..f048b5ba19bf 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/kubernetes-components.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/kubernetes-components.md @@ -2,6 +2,10 @@ title: Kubernetes Components --- + + + + The commands and steps listed in this section apply to the core Kubernetes components on [Rancher Launched Kubernetes](../pages-for-subheaders/launch-kubernetes-with-rancher.md) clusters. This section includes troubleshooting tips in the following categories: diff --git a/versioned_docs/version-2.6/pages-for-subheaders/kubernetes-resources-setup.md b/versioned_docs/version-2.6/pages-for-subheaders/kubernetes-resources-setup.md index 863cca3f34fc..3b84270592cc 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/kubernetes-resources-setup.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/kubernetes-resources-setup.md @@ -2,6 +2,10 @@ title: Kubernetes Resources --- + + + + You can view and manipulate all of the custom resources and CRDs in a Kubernetes cluster from the Rancher UI. ## Workloads diff --git a/versioned_docs/version-2.6/pages-for-subheaders/launch-kubernetes-with-rancher.md b/versioned_docs/version-2.6/pages-for-subheaders/launch-kubernetes-with-rancher.md index fba8d3aeb668..5b7f4363bab6 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/launch-kubernetes-with-rancher.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/launch-kubernetes-with-rancher.md @@ -2,6 +2,10 @@ title: Launching Kubernetes with Rancher --- + + + + You can have Rancher launch a Kubernetes cluster using any nodes you want. When Rancher deploys Kubernetes onto these nodes, you can choose between [Rancher Kubernetes Engine](https://rancher.com/docs/rke/latest/en/) (RKE) or [RKE2](https://docs.rke2.io) distributions. Rancher can launch Kubernetes on any computers, including: - Bare-metal servers diff --git a/versioned_docs/version-2.6/pages-for-subheaders/load-balancer-and-ingress-controller.md b/versioned_docs/version-2.6/pages-for-subheaders/load-balancer-and-ingress-controller.md index ea5d34fed342..41bdf40a3234 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/load-balancer-and-ingress-controller.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/load-balancer-and-ingress-controller.md @@ -3,6 +3,10 @@ title: Set Up Load Balancer and Ingress Controller within Rancher description: Learn how you can set up load balancers and ingress controllers to redirect service requests within Rancher, and learn about the limitations of load balancers --- + + + + Within Rancher, you can set up load balancers and ingress controllers to redirect service requests. ## Load Balancers diff --git a/versioned_docs/version-2.6/pages-for-subheaders/logging.md b/versioned_docs/version-2.6/pages-for-subheaders/logging.md index 12a8be0d3a0a..4ed1084dd100 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/logging.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/logging.md @@ -3,6 +3,10 @@ title: Rancher Integration with Logging Services description: Rancher integrates with popular logging services. Learn the requirements and benefits of integrating with logging services, and enable logging on your cluster. --- + + + + The [Logging operator](https://kube-logging.github.io/docs/) now powers Rancher's logging solution in place of the former, in-house solution. ## Enabling Logging diff --git a/versioned_docs/version-2.6/pages-for-subheaders/machine-configuration.md b/versioned_docs/version-2.6/pages-for-subheaders/machine-configuration.md index 18ec97e5f193..e1b9bb72f0a4 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/machine-configuration.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/machine-configuration.md @@ -2,4 +2,8 @@ title: Machine Configuration --- + + + + Machine configuration is the arrangement of resources assigned to a virtual machine. Please see the docs for [Amazon EC2](../reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/amazon-ec2.md), [DigitalOcean](../reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/digitalocean.md), and [Azure](../reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/azure.md) to learn more. \ No newline at end of file diff --git a/versioned_docs/version-2.6/pages-for-subheaders/manage-clusters.md b/versioned_docs/version-2.6/pages-for-subheaders/manage-clusters.md index 627df4c34eaf..5494fc037093 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/manage-clusters.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/manage-clusters.md @@ -2,6 +2,10 @@ title: Cluster Administration --- + + + + After you provision a cluster in Rancher, you can begin using powerful Kubernetes features to deploy and scale your containerized applications in development, testing, or production environments. :::note diff --git a/versioned_docs/version-2.6/pages-for-subheaders/manage-project-resource-quotas.md b/versioned_docs/version-2.6/pages-for-subheaders/manage-project-resource-quotas.md index f6023c0f6089..fa6f10ec84c8 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/manage-project-resource-quotas.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/manage-project-resource-quotas.md @@ -2,6 +2,10 @@ title: Project Resource Quotas --- + + + + In situations where several teams share a cluster, one team may overconsume the resources available: CPU, memory, storage, services, Kubernetes objects like pods or secrets, and so on. To prevent this overconsumption, you can apply a _resource quota_, which is a Rancher feature that limits the resources available to a project or namespace. This page is a how-to guide for creating resource quotas in existing projects. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/manage-projects.md b/versioned_docs/version-2.6/pages-for-subheaders/manage-projects.md index 3c421c9b10da..fdb12ee544c0 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/manage-projects.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/manage-projects.md @@ -2,6 +2,10 @@ title: Project Administration --- + + + + _Projects_ are objects introduced in Rancher that help organize namespaces in your Kubernetes cluster. You can use projects to create multi-tenant clusters, which allows a group of users to share the same underlying resources without interacting with each other's applications. In terms of hierarchy: diff --git a/versioned_docs/version-2.6/pages-for-subheaders/manage-role-based-access-control-rbac.md b/versioned_docs/version-2.6/pages-for-subheaders/manage-role-based-access-control-rbac.md index b2674b753b9e..c6c87eff5fc9 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/manage-role-based-access-control-rbac.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/manage-role-based-access-control-rbac.md @@ -2,6 +2,10 @@ title: Role-Based Access Control (RBAC) --- + + + + Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. As mentioned in [Authentication](authentication-config.md), users can either be local or external. After you configure external authentication, the users that display on the **Users** page changes. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/monitoring-alerting-guides.md b/versioned_docs/version-2.6/pages-for-subheaders/monitoring-alerting-guides.md index ce9f991fc844..37821f2bba5c 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/monitoring-alerting-guides.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/monitoring-alerting-guides.md @@ -2,6 +2,10 @@ title: Monitoring Guides --- + + + + - [Enable monitoring](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md) - [Uninstall monitoring](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md) - [Monitoring workloads](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md) diff --git a/versioned_docs/version-2.6/pages-for-subheaders/monitoring-and-alerting.md b/versioned_docs/version-2.6/pages-for-subheaders/monitoring-and-alerting.md index fc5684035ca0..f47fca300a71 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/monitoring-and-alerting.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/monitoring-and-alerting.md @@ -3,6 +3,10 @@ title: Monitoring and Alerting description: Prometheus lets you view metrics from your different Rancher and Kubernetes objects. Learn about the scope of monitoring and how to enable cluster monitoring --- + + + + Using the `rancher-monitoring` application, you can quickly deploy leading open-source monitoring and alerting solutions onto your cluster. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/monitoring-v2-configuration-guides.md b/versioned_docs/version-2.6/pages-for-subheaders/monitoring-v2-configuration-guides.md index 443e84dfd96c..61453b5a8cd0 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/monitoring-v2-configuration-guides.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/monitoring-v2-configuration-guides.md @@ -2,6 +2,10 @@ title: Configuration --- + + + + This page captures some of the most important options for configuring Monitoring V2 in the Rancher UI. For information on configuring custom scrape targets and rules for Prometheus, please refer to the upstream documentation for the [Prometheus Operator.](https://github.com/prometheus-operator/prometheus-operator) Some of the most important custom resources are explained in the Prometheus Operator [design documentation.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/design.md) The Prometheus Operator documentation can help also you set up RBAC, Thanos, or custom configuration. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/monitoring-v2-configuration.md b/versioned_docs/version-2.6/pages-for-subheaders/monitoring-v2-configuration.md index abf493b4487a..79f97d9513d8 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/monitoring-v2-configuration.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/monitoring-v2-configuration.md @@ -2,6 +2,10 @@ title: Monitoring V2 Configuration --- + + + + The following sections will explain important options essential to configuring Monitoring V2 in Rancher: - [Receiver Configuration](../reference-guides/monitoring-v2-configuration/receivers.md) diff --git a/versioned_docs/version-2.6/pages-for-subheaders/new-user-guides.md b/versioned_docs/version-2.6/pages-for-subheaders/new-user-guides.md index e2e667395c3b..e2e68ff0089a 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/new-user-guides.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/new-user-guides.md @@ -2,6 +2,10 @@ title: New User Guides --- + + + + New user guides, also known as **tutorials**, describe practical steps for users to follow in order to complete some concrete action. These docs are known as "learning-oriented" docs in which users learn by "doing". The new user guides are designed to guide beginners, or the everyday users of Rancher, through a series of steps to learn how to do something. The goal is that the user will be able to learn how to complete tasks by using easy-to-follow, meaningful, and repeatable directions. These guides will assist users to do work to then get the promised results immediately. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/node-template-configuration.md b/versioned_docs/version-2.6/pages-for-subheaders/node-template-configuration.md index 0fff7eb9835b..e6c22d5e8526 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/node-template-configuration.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/node-template-configuration.md @@ -2,4 +2,8 @@ title: Node Template Configuration --- + + + + To learn about node template config, refer to [EC2 Node Template Configuration](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md), [DigitalOcean Node Template Configuration](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md), [Azure Node Template Configuration](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md), [vSphere Node Template Configuration](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md), and [Nutanix Node Template Configuration](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/nutanix.md). diff --git a/versioned_docs/version-2.6/pages-for-subheaders/nutanix.md b/versioned_docs/version-2.6/pages-for-subheaders/nutanix.md index 706ecbae214b..b626cf2bd12b 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/nutanix.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/nutanix.md @@ -3,6 +3,10 @@ title: Creating a Nutanix AOS Cluster description: Use Rancher to create a Nutanix AOS (AHV) cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. --- + + + + [Nutanix Acropolis Operating System](https://www.nutanix.com/products/acropolis) (Nutanix AOS) is an operating system for the Nutanix hyper-converged infrastructure platform. AOS comes with a built-in hypervisor called [Acropolis Hypervisor](https://www.nutanix.com/products/ahv), or AHV. By using Rancher with Nutanix AOS (AHV), you can bring cloud operations on-premises. Rancher can provision nodes in AOS (AHV) and install Kubernetes on them. When creating a Kubernetes cluster in AOS, Rancher first provisions the specified number of virtual machines by communicating with the Prism Central API. Then it installs Kubernetes on top of the VMs. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/other-installation-methods.md b/versioned_docs/version-2.6/pages-for-subheaders/other-installation-methods.md index effd2424c191..7cd497a8d489 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/other-installation-methods.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/other-installation-methods.md @@ -2,6 +2,10 @@ title: Other Installation Methods --- + + + + ### Air Gapped Installations Follow [these steps](air-gapped-helm-cli-install.md) to install the Rancher server in an air gapped environment. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/prometheus-federator-guides.md b/versioned_docs/version-2.6/pages-for-subheaders/prometheus-federator-guides.md index 0f82f71e1ada..2d1c0ae82246 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/prometheus-federator-guides.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/prometheus-federator-guides.md @@ -2,6 +2,10 @@ title: Prometheus Federator Guides --- + + + + - [Enable Prometheus Operator](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md) - [Uninstall Prometheus Operator](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/uninstall-prometheus-federator.md) - [Customize Grafana Dashboards](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/customize-grafana-dashboards.md) diff --git a/versioned_docs/version-2.6/pages-for-subheaders/prometheus-federator.md b/versioned_docs/version-2.6/pages-for-subheaders/prometheus-federator.md index 918d0b0abea9..efef5f5abae8 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/prometheus-federator.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/prometheus-federator.md @@ -2,6 +2,10 @@ title: Prometheus Federator --- + + + + Prometheus Federator, also referred to as Project Monitoring v2, deploys a Helm Project Operator (based on the [rancher/helm-project-operator](https://github.com/rancher/helm-project-operator)), an operator that manages deploying Helm charts each containing a Project Monitoring Stack, where each stack contains: - [Prometheus](https://prometheus.io/) (managed externally by [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)) diff --git a/versioned_docs/version-2.6/pages-for-subheaders/provisioning-storage-examples.md b/versioned_docs/version-2.6/pages-for-subheaders/provisioning-storage-examples.md index 7dcd3960ae51..44fd9593fba5 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/provisioning-storage-examples.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/provisioning-storage-examples.md @@ -2,6 +2,10 @@ title: Provisioning Storage Examples --- + + + + Rancher supports persistent storage with a variety of volume plugins. However, before you use any of these plugins to bind persistent storage to your workloads, you have to configure the storage itself, whether its a cloud-based solution from a service-provider or an on-prem solution that you manage yourself. For your convenience, Rancher offers documentation on how to configure some of the popular storage methods: diff --git a/versioned_docs/version-2.6/pages-for-subheaders/quick-start-guides.md b/versioned_docs/version-2.6/pages-for-subheaders/quick-start-guides.md index c0010d438507..8a7f8028dfbc 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/quick-start-guides.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/quick-start-guides.md @@ -1,6 +1,10 @@ --- title: Rancher Deployment Quick Start Guides --- + + + + :::caution The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](installation-and-upgrade.md). diff --git a/versioned_docs/version-2.6/pages-for-subheaders/rancher-behind-an-http-proxy.md b/versioned_docs/version-2.6/pages-for-subheaders/rancher-behind-an-http-proxy.md index 59a7eec2dba7..fd8a41b8e086 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/rancher-behind-an-http-proxy.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/rancher-behind-an-http-proxy.md @@ -2,6 +2,10 @@ title: Installing Rancher behind an HTTP Proxy --- + + + + In a lot of enterprise environments, servers or VMs running on premise do not have direct Internet access, but must connect to external services through a HTTP(S) proxy for security reasons. This tutorial shows step by step how to set up a highly available Rancher installation in such an environment. Alternatively, it is also possible to set up Rancher completely air-gapped without any Internet access. This process is described in detail in the [Rancher docs](air-gapped-helm-cli-install.md). diff --git a/versioned_docs/version-2.6/pages-for-subheaders/rancher-managed-clusters.md b/versioned_docs/version-2.6/pages-for-subheaders/rancher-managed-clusters.md index 2fc25c09150c..2cdb03fd9094 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/rancher-managed-clusters.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/rancher-managed-clusters.md @@ -2,6 +2,10 @@ title: Best Practices for Rancher Managed Clusters --- + + + + ### Logging Refer to [this guide](../reference-guides/best-practices/rancher-managed-clusters/logging-best-practices.md) for our recommendations for cluster-level logging and application logging. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/rancher-manager-architecture.md b/versioned_docs/version-2.6/pages-for-subheaders/rancher-manager-architecture.md index 03ec604e3f66..d7e76f285739 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/rancher-manager-architecture.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/rancher-manager-architecture.md @@ -2,6 +2,10 @@ title: Architecture --- + + + + This section focuses on the [Rancher server and its components](../reference-guides/rancher-manager-architecture/rancher-server-and-components.md) and how [Rancher communicates with downstream Kubernetes clusters](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md). For information on the different ways that Rancher can be installed, refer to the [overview of installation options.](installation-and-upgrade.md#overview-of-installation-options) diff --git a/versioned_docs/version-2.6/pages-for-subheaders/rancher-on-a-single-node-with-docker.md b/versioned_docs/version-2.6/pages-for-subheaders/rancher-on-a-single-node-with-docker.md index 339dfc856a0a..da5b39209cfe 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/rancher-on-a-single-node-with-docker.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/rancher-on-a-single-node-with-docker.md @@ -3,6 +3,10 @@ title: Installing Rancher on a Single Node Using Docker description: For development and testing environments only, use a Docker install. Install Docker on a single Linux host, and deploy Rancher with a single Docker container. --- + + + + Rancher can be installed by running a single Docker container. In this installation scenario, you'll install Docker on a single Linux host, and then deploy Rancher on your host using a single Docker container. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/rancher-security.md b/versioned_docs/version-2.6/pages-for-subheaders/rancher-security.md index 389a79672ff8..f1e31f322f01 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/rancher-security.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/rancher-security.md @@ -2,6 +2,10 @@ title: Security --- + + + +
diff --git a/versioned_docs/version-2.6/pages-for-subheaders/rancher-server-configuration.md b/versioned_docs/version-2.6/pages-for-subheaders/rancher-server-configuration.md index 0892fb94db35..5e18f69e7406 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/rancher-server-configuration.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/rancher-server-configuration.md @@ -2,6 +2,10 @@ title: Rancher Server Configuration --- + + + + - [RKE1 Cluster Configuration](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) - [RKE2 Cluster Configuration](../reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md) - [K3s Cluster Configuration](../reference-guides/cluster-configuration/rancher-server-configuration/k3s-cluster-configuration.md) diff --git a/versioned_docs/version-2.6/pages-for-subheaders/rancher-server.md b/versioned_docs/version-2.6/pages-for-subheaders/rancher-server.md index c8984d9c447b..45c3917cd588 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/rancher-server.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/rancher-server.md @@ -2,6 +2,10 @@ title: Best Practices for the Rancher Server --- + + + + This guide contains our recommendations for running the Rancher server, and is intended to be used in situations in which Rancher manages downstream Kubernetes clusters. ### Recommended Architecture and Infrastructure diff --git a/versioned_docs/version-2.6/pages-for-subheaders/resources.md b/versioned_docs/version-2.6/pages-for-subheaders/resources.md index 57f6ee1e7b62..52e61353441b 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/resources.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/resources.md @@ -2,6 +2,10 @@ title: Resources --- + + + + ### Docker Installations The [single-node Docker installation](rancher-on-a-single-node-with-docker.md) is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster using Helm, you install the Rancher server component on a single node using a `docker run` command. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/selinux-rpm.md b/versioned_docs/version-2.6/pages-for-subheaders/selinux-rpm.md index 0b06703cf32d..c72c72ff56a7 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/selinux-rpm.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/selinux-rpm.md @@ -2,6 +2,10 @@ title: SELinux RPM --- + + + + [Security-Enhanced Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux) is a security enhancement to Linux. Developed by Red Hat, it is an implementation of mandatory access controls (MAC) on Linux. Mandatory access controls allow an administrator of a system to define how applications and users can access different resources such as files, devices, networks and inter-process communication. SELinux also enhances security by making an OS restrictive by default. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/set-up-cloud-providers.md b/versioned_docs/version-2.6/pages-for-subheaders/set-up-cloud-providers.md index 4bf3356c2671..9a02515dba97 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/set-up-cloud-providers.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/set-up-cloud-providers.md @@ -2,6 +2,10 @@ title: Setting up Cloud Providers --- + + + + A _cloud provider_ is a module in Kubernetes that provides an interface for managing nodes, load balancers, and networking routes. When a cloud provider is set up in Rancher, the Rancher server can automatically provision new nodes, load balancers or persistent storage devices when launching Kubernetes definitions, if the cloud provider you're using supports such automation. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md b/versioned_docs/version-2.6/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md index 001533601639..fd4f4ed5bda7 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md @@ -2,6 +2,10 @@ title: Setting up Clusters from Hosted Kubernetes Providers --- + + + + In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/single-node-rancher-in-docker.md b/versioned_docs/version-2.6/pages-for-subheaders/single-node-rancher-in-docker.md index 61cd166a90c2..91072d2b3b44 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/single-node-rancher-in-docker.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/single-node-rancher-in-docker.md @@ -2,4 +2,8 @@ title: Single Node Rancher in Docker --- + + + + The following docs will discuss [HTTP proxy configuration](../reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md) and [advanced options](../reference-guides/single-node-rancher-in-docker/advanced-options.md) for Docker installs. \ No newline at end of file diff --git a/versioned_docs/version-2.6/pages-for-subheaders/use-existing-nodes.md b/versioned_docs/version-2.6/pages-for-subheaders/use-existing-nodes.md index d0c301bdc32e..2aeb05bc488e 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/use-existing-nodes.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/use-existing-nodes.md @@ -3,6 +3,10 @@ title: Launching Kubernetes on Existing Custom Nodes description: To create a cluster with custom nodes, you’ll need to access servers in your cluster and provision them according to Rancher requirements --- + + + + When you create a custom cluster, Rancher uses RKE (the Rancher Kubernetes Engine) to create a Kubernetes cluster in on-prem bare-metal servers, on-prem virtual machines, or in any node hosted by an infrastructure provider. To use this option you'll need access to servers you intend to use in your Kubernetes cluster. Provision each server according to the [requirements](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md), which includes some hardware specifications and Docker. After you install Docker on each server, you willl also run the command provided in the Rancher UI on each server to turn each one into a Kubernetes node. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md b/versioned_docs/version-2.6/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md index 51c0036055a7..ffd0d85d239d 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md @@ -2,6 +2,10 @@ title: Launching Kubernetes on New Nodes in an Infrastructure Provider --- + + + + When you create an RKE or RKE2 cluster using a node template in Rancher, each resulting node pool is shown in a new **Machine Pools** tab. You can see the machine pools by doing the following: 1. Click **☰ > Cluster Management**. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/use-windows-clusters.md b/versioned_docs/version-2.6/pages-for-subheaders/use-windows-clusters.md index 4c4d19538a0a..fcdcdd8949ac 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/use-windows-clusters.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/use-windows-clusters.md @@ -2,6 +2,10 @@ title: Launching Kubernetes on Windows Clusters --- + + + + When provisioning a [custom cluster](use-existing-nodes.md) using Rancher, Rancher uses RKE (the Rancher Kubernetes Engine) to install Kubernetes on your existing nodes. In a Windows cluster provisioned with Rancher, the cluster must contain both Linux and Windows nodes. The Kubernetes controlplane can only run on Linux nodes, and the Windows nodes can only have the worker role. Windows nodes can only be used for deploying workloads. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/user-settings.md b/versioned_docs/version-2.6/pages-for-subheaders/user-settings.md index db2376f3df89..a9ed1c72d923 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/user-settings.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/user-settings.md @@ -2,6 +2,10 @@ title: User Settings --- + + + + Within Rancher, each user has a number of settings associated with their login: personal preferences, API keys, etc. You can configure these settings by choosing from the **User Settings** menu. You can open this menu by clicking your avatar, located within the main menu. ![User Settings Menu](/img/user-settings.png) diff --git a/versioned_docs/version-2.6/pages-for-subheaders/vsphere.md b/versioned_docs/version-2.6/pages-for-subheaders/vsphere.md index 8d5cac9d7b89..70eea8bfd52c 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/vsphere.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/vsphere.md @@ -2,6 +2,10 @@ title: Creating a vSphere Cluster description: Use Rancher to create a vSphere cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. --- + + + + import YouTube from '@site/src/components/YouTube' By using Rancher with vSphere, you can bring cloud operations on-premises. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/workloads-and-pods.md b/versioned_docs/version-2.6/pages-for-subheaders/workloads-and-pods.md index 94ac0881d6d6..5cfe84668af6 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/workloads-and-pods.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/workloads-and-pods.md @@ -3,6 +3,10 @@ title: "Kubernetes Workloads and Pods" description: "Learn about the two constructs with which you can build any complex containerized application in Kubernetes: Kubernetes workloads and pods" --- + + + + You can build any complex containerized application in Kubernetes using two basic constructs: pods and workloads. Once you build an application, you can expose it for access either within the same cluster or on the Internet using a third construct: services. ### Pods diff --git a/versioned_docs/version-2.7/pages-for-subheaders/about-provisioning-drivers.md b/versioned_docs/version-2.7/pages-for-subheaders/about-provisioning-drivers.md index 02ab3b0bc03f..812197b3b3f8 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/about-provisioning-drivers.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/about-provisioning-drivers.md @@ -2,6 +2,10 @@ title: Provisioning Drivers --- + + + + Drivers in Rancher allow you to manage which providers can be used to deploy [hosted Kubernetes clusters](set-up-clusters-from-hosted-kubernetes-providers.md) or [nodes in an infrastructure provider](use-new-nodes-in-an-infra-provider.md) to allow Rancher to deploy and manage Kubernetes. ### Rancher Drivers diff --git a/versioned_docs/version-2.7/pages-for-subheaders/about-rke1-templates.md b/versioned_docs/version-2.7/pages-for-subheaders/about-rke1-templates.md index 44e73fd794aa..601a622a5814 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/about-rke1-templates.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/about-rke1-templates.md @@ -2,6 +2,10 @@ title: RKE Templates --- + + + + RKE templates are designed to allow DevOps and security teams to standardize and simplify the creation of Kubernetes clusters. RKE is the [Rancher Kubernetes Engine,](https://rancher.com/docs/rke/latest/en/) which is the tool that Rancher uses to provision Kubernetes clusters. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/about-the-api.md b/versioned_docs/version-2.7/pages-for-subheaders/about-the-api.md index 4ac4ae8fcf6a..3b39d7c27172 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/about-the-api.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/about-the-api.md @@ -2,6 +2,10 @@ title: API --- + + + + ## How to use the API The API has its own user interface accessible from a web browser. This is an easy way to see resources, perform actions, and see the equivalent cURL or HTTP request & response. To access it: diff --git a/versioned_docs/version-2.7/pages-for-subheaders/access-clusters.md b/versioned_docs/version-2.7/pages-for-subheaders/access-clusters.md index 2dd82ffcb8a7..04edb86702c0 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/access-clusters.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/access-clusters.md @@ -2,6 +2,10 @@ title: Cluster Access --- + + + + This section is about what tools can be used to access clusters managed by Rancher. For information on how to give users permission to access a cluster, see the section on [adding users to clusters.](../how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) diff --git a/versioned_docs/version-2.7/pages-for-subheaders/advanced-configuration.md b/versioned_docs/version-2.7/pages-for-subheaders/advanced-configuration.md index 208abcf1ea87..87efa2a0f9e0 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/advanced-configuration.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/advanced-configuration.md @@ -2,6 +2,10 @@ title: Advanced Configuration --- + + + + ### Alertmanager For information on configuring the Alertmanager custom resource, see [this page.](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md) diff --git a/versioned_docs/version-2.7/pages-for-subheaders/advanced-user-guides.md b/versioned_docs/version-2.7/pages-for-subheaders/advanced-user-guides.md index e4b8c16e1e4f..b5b3d2410372 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/advanced-user-guides.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/advanced-user-guides.md @@ -2,6 +2,10 @@ title: Advanced User Guides --- + + + + Advanced user guides are "problem-oriented" docs in which users learn how to answer questions or solve problems. The major difference between these and the new user guides is that these guides are geared toward more experienced or advanced users who have more technical needs from their documentation. These users already have an understanding of Rancher and its functions. They know what they need to accomplish; they just need additional guidance to complete some more complex task they they have encountered while working. It should be noted that neither new user guides nor advanced user guides provide detailed explanations or discussions (these kinds of docs belong elsewhere). How-to guides focus on the action of guiding users through repeatable, effective steps to learn new skills, master some task, or overcome some problem. \ No newline at end of file diff --git a/versioned_docs/version-2.7/pages-for-subheaders/air-gapped-helm-cli-install.md b/versioned_docs/version-2.7/pages-for-subheaders/air-gapped-helm-cli-install.md index 058b697d93d6..d6fbc09698fd 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/air-gapped-helm-cli-install.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/air-gapped-helm-cli-install.md @@ -2,6 +2,10 @@ title: Air-Gapped Helm CLI Install --- + + + + This section is about using the Helm CLI to install the Rancher server in an air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. The installation steps differ depending on whether Rancher is installed on an RKE Kubernetes cluster, a K3s Kubernetes cluster, or a single Docker container. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/authentication-config.md b/versioned_docs/version-2.7/pages-for-subheaders/authentication-config.md index ac2375cb697a..9bb89f46ee88 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/authentication-config.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/authentication-config.md @@ -3,6 +3,10 @@ title: Authentication Config weight: 10 --- + + + + One of the key features that Rancher adds to Kubernetes is centralized user authentication. This feature allows your users to use one set of credentials to authenticate with any of your Kubernetes clusters. This centralized user authentication is accomplished using the Rancher authentication proxy, which is installed along with the rest of Rancher. This proxy authenticates your users and forwards their requests to your Kubernetes clusters using a service account. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/authentication-permissions-and-global-configuration.md b/versioned_docs/version-2.7/pages-for-subheaders/authentication-permissions-and-global-configuration.md index b32d8068a916..2df94806af94 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/authentication-permissions-and-global-configuration.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/authentication-permissions-and-global-configuration.md @@ -2,6 +2,10 @@ title: Authentication, Permissions and Global Configuration --- + + + + After installation, the [system administrator](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) should configure Rancher to configure authentication, authorization, security, default settings, security policies, drivers and global DNS entries. ## First Log In diff --git a/versioned_docs/version-2.7/pages-for-subheaders/aws-cloud-marketplace.md b/versioned_docs/version-2.7/pages-for-subheaders/aws-cloud-marketplace.md index 0b4c81853429..c487e1d6f928 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/aws-cloud-marketplace.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/aws-cloud-marketplace.md @@ -2,6 +2,10 @@ title: AWS Marketplace Integration --- + + + + ## Overview Rancher offers an integration with the AWS Marketplace which allows users to purchase a support contract with SUSE. This integration allows you easily adjust your support needs as you start to support more clusters. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/backup-restore-and-disaster-recovery.md b/versioned_docs/version-2.7/pages-for-subheaders/backup-restore-and-disaster-recovery.md index ccb69108f66c..19feeb770d0f 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/backup-restore-and-disaster-recovery.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/backup-restore-and-disaster-recovery.md @@ -3,6 +3,10 @@ title: Backups and Disaster Recovery keywords: [rancher backup restore, rancher backup and restore, backup restore rancher, rancher backup and restore rancher] --- + + + + In this section, you'll learn how to create backups of Rancher, how to restore Rancher from backup, and how to migrate Rancher to a new Kubernetes cluster. The `rancher-backup` operator is used to backup and restore Rancher on any Kubernetes cluster. This application is a Helm chart, and it can be deployed through the Rancher **Apps** page, or by using the Helm CLI. The `rancher-backup` Helm chart is [here.](https://github.com/rancher/charts/tree/release-v2.6/charts/rancher-backup) diff --git a/versioned_docs/version-2.7/pages-for-subheaders/backup-restore-configuration.md b/versioned_docs/version-2.7/pages-for-subheaders/backup-restore-configuration.md index c6574b936007..104584f741d5 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/backup-restore-configuration.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/backup-restore-configuration.md @@ -2,6 +2,10 @@ title: Rancher Backup Configuration Reference --- + + + + - [Backup configuration](../reference-guides/backup-restore-configuration/backup-configuration.md) - [Restore configuration](../reference-guides/backup-restore-configuration/restore-configuration.md) - [Storage location configuration](../reference-guides/backup-restore-configuration/storage-configuration.md) diff --git a/versioned_docs/version-2.7/pages-for-subheaders/best-practices.md b/versioned_docs/version-2.7/pages-for-subheaders/best-practices.md index 81f14325f765..7009f6cce709 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/best-practices.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/best-practices.md @@ -2,6 +2,10 @@ title: Best Practices Guide --- + + + + The purpose of this section is to consolidate best practices for Rancher implementations. This also includes recommendations for related technologies, such as Kubernetes, Docker, containers, and more. The objective is to improve the outcome of a Rancher implementation using the operational experience of Rancher and its customers. If you have any questions about how these might apply to your use case, please contact your Customer Success Manager or Support. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/checklist-for-production-ready-clusters.md b/versioned_docs/version-2.7/pages-for-subheaders/checklist-for-production-ready-clusters.md index 64b0c8a37cfb..f5816af3c48b 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/checklist-for-production-ready-clusters.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/checklist-for-production-ready-clusters.md @@ -2,6 +2,10 @@ title: Checklist for Production-Ready Clusters --- + + + + In this section, we recommend best practices for creating the production-ready Kubernetes clusters that will run your apps and services. For a list of requirements for your cluster, including the requirements for OS/Docker, hardware, and networking, refer to the section on [node requirements.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) diff --git a/versioned_docs/version-2.7/pages-for-subheaders/cis-scan-guides.md b/versioned_docs/version-2.7/pages-for-subheaders/cis-scan-guides.md index 6fef05e5b7cc..e76d47504e64 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/cis-scan-guides.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/cis-scan-guides.md @@ -2,6 +2,10 @@ title: CIS Scan Guides --- + + + + - [Install rancher-cis-benchmark](../how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md) - [Uninstall rancher-cis-benchmark](../how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md) - [Run a Scan](../how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md) diff --git a/versioned_docs/version-2.7/pages-for-subheaders/cis-scans.md b/versioned_docs/version-2.7/pages-for-subheaders/cis-scans.md index ea59178857b5..d9c5dbecabb8 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/cis-scans.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/cis-scans.md @@ -2,6 +2,10 @@ title: CIS Scans --- + + + + Rancher can run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS Kubernetes Benchmark. The CIS scans can run on any Kubernetes cluster, including hosted Kubernetes providers such as EKS, AKS, and GKE. The `rancher-cis-benchmark` app leverages kube-bench, an open-source tool from Aqua Security, to check clusters for CIS Kubernetes Benchmark compliance. Also, to generate a cluster-wide report, the application utilizes Sonobuoy for report aggregation. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/cli-with-rancher.md b/versioned_docs/version-2.7/pages-for-subheaders/cli-with-rancher.md index d4c33d7f390f..547d4c50308f 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/cli-with-rancher.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/cli-with-rancher.md @@ -2,4 +2,8 @@ title: CLI with Rancher --- + + + + Interact with Rancher using command line interface (CLI) tools from your workstation. The following docs will describe the [Rancher CLI](../reference-guides/cli-with-rancher/rancher-cli.md) and [kubectl Utility](../reference-guides/cli-with-rancher/kubectl-utility.md). \ No newline at end of file diff --git a/versioned_docs/version-2.7/pages-for-subheaders/cloud-marketplace.md b/versioned_docs/version-2.7/pages-for-subheaders/cloud-marketplace.md index 41d499ef9dae..6f90a2c3ff42 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/cloud-marketplace.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/cloud-marketplace.md @@ -2,6 +2,10 @@ title: Cloud Marketplace Integration --- + + + + Rancher offers integration with cloud marketplaces to easily purchase support for installations hosted on certain cloud providers. In addition, this integration also provides the ability to generate a supportconfig bundle which can be provided to rancher support. This integration only supports AWS. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/cluster-configuration.md b/versioned_docs/version-2.7/pages-for-subheaders/cluster-configuration.md index 50f96d112586..60e02a8cd40b 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/cluster-configuration.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/cluster-configuration.md @@ -2,6 +2,10 @@ title: Cluster Configuration --- + + + + After you provision a Kubernetes cluster using Rancher, you can still edit options and settings for the cluster. For information on editing cluster membership, go to [this page.](../how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) diff --git a/versioned_docs/version-2.7/pages-for-subheaders/configuration-options.md b/versioned_docs/version-2.7/pages-for-subheaders/configuration-options.md index 15a0599ab000..92a375e948db 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/configuration-options.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/configuration-options.md @@ -2,6 +2,10 @@ title: Configuration Options --- + + + + ### Egress Support By default the Egress gateway is disabled, but can be enabled on install or upgrade through the values.yaml or via the [overlay file](#overlay-file). diff --git a/versioned_docs/version-2.7/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md b/versioned_docs/version-2.7/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md index 73c2c651cd6a..8662bf782fb0 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md @@ -2,6 +2,10 @@ title: Configuring Microsoft Active Directory Federation Service (SAML) --- + + + + If your organization uses Microsoft Active Directory Federation Services (AD FS) for user authentication, you can configure Rancher to allow your users to log in using their AD FS credentials. ## Prerequisites diff --git a/versioned_docs/version-2.7/pages-for-subheaders/configure-openldap.md b/versioned_docs/version-2.7/pages-for-subheaders/configure-openldap.md index be2aa86df697..9eb5fc7db2ac 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/configure-openldap.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/configure-openldap.md @@ -2,6 +2,10 @@ title: Configuring OpenLDAP --- + + + + If your organization uses LDAP for user authentication, you can configure Rancher to communicate with an OpenLDAP server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the organisation's central user repository, while allowing end-users to authenticate with their LDAP credentials when logging in to the Rancher UI. ## Prerequisites diff --git a/versioned_docs/version-2.7/pages-for-subheaders/create-kubernetes-persistent-storage.md b/versioned_docs/version-2.7/pages-for-subheaders/create-kubernetes-persistent-storage.md index 8952d8abafa9..cdb775108ca6 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/create-kubernetes-persistent-storage.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/create-kubernetes-persistent-storage.md @@ -2,6 +2,10 @@ title: "Kubernetes Persistent Storage: Volumes and Storage Classes" description: "Learn about the two ways with which you can create persistent storage in Kubernetes: persistent volumes and storage classes" --- + + + + When deploying an application that needs to retain data, you'll need to create persistent storage. Persistent storage allows you to store application data external from the pod running your application. This storage practice allows you to maintain application data, even if the application's pod fails. The documents in this section assume that you understand the Kubernetes concepts of persistent volumes, persistent volume claims, and storage classes. For more information, refer to the section on [how storage works.](../how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md) diff --git a/versioned_docs/version-2.7/pages-for-subheaders/custom-resource-configuration.md b/versioned_docs/version-2.7/pages-for-subheaders/custom-resource-configuration.md index cf43951e5b36..37c2f337d78c 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/custom-resource-configuration.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/custom-resource-configuration.md @@ -2,6 +2,10 @@ title: Custom Resource Configuration --- + + + + The following Custom Resource Definitions are used to configure logging: - [Flow and ClusterFlow](../integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows.md) diff --git a/versioned_docs/version-2.7/pages-for-subheaders/deploy-apps-across-clusters.md b/versioned_docs/version-2.7/pages-for-subheaders/deploy-apps-across-clusters.md index 3215a4f43b4d..8089f7a914dc 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/deploy-apps-across-clusters.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/deploy-apps-across-clusters.md @@ -1,6 +1,10 @@ --- title: Deploying Applications across Clusters --- + + + + ### Fleet Rancher v2.5 introduced Fleet, a new way to deploy applications across clusters. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/deploy-rancher-manager.md b/versioned_docs/version-2.7/pages-for-subheaders/deploy-rancher-manager.md index c91b6f37aa39..74e282f0832c 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/deploy-rancher-manager.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/deploy-rancher-manager.md @@ -2,6 +2,10 @@ title: Deploying Rancher Server --- + + + + Use one of the following guides to deploy and provision Rancher and a Kubernetes cluster in the provider of your choice. - [AWS](../getting-started/quick-start-guides/deploy-rancher-manager/aws.md) (uses Terraform) diff --git a/versioned_docs/version-2.7/pages-for-subheaders/deploy-rancher-workloads.md b/versioned_docs/version-2.7/pages-for-subheaders/deploy-rancher-workloads.md index 3e86165a071c..b2898cd513b7 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/deploy-rancher-workloads.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/deploy-rancher-workloads.md @@ -2,6 +2,10 @@ title: Deploying Workloads --- + + + + These guides walk you through the deployment of an application, including how to expose the application for use outside of the cluster. - [Workload with Ingress](../getting-started/quick-start-guides/deploy-workloads/workload-ingress.md) diff --git a/versioned_docs/version-2.7/pages-for-subheaders/downstream-cluster-configuration.md b/versioned_docs/version-2.7/pages-for-subheaders/downstream-cluster-configuration.md index e9065f1fb088..b9fbad0b9665 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/downstream-cluster-configuration.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/downstream-cluster-configuration.md @@ -2,4 +2,8 @@ title: Downstream Cluster Configuration --- + + + + The following docs will discuss [node template configuration](./node-template-configuration.md) and [machine configuration](./machine-configuration.md). \ No newline at end of file diff --git a/versioned_docs/version-2.7/pages-for-subheaders/enable-experimental-features.md b/versioned_docs/version-2.7/pages-for-subheaders/enable-experimental-features.md index 0494ed5f0fa8..c2fca17aa2f8 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/enable-experimental-features.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/enable-experimental-features.md @@ -1,6 +1,10 @@ --- title: Enabling Experimental Features --- + + + + Rancher includes some features that are experimental and disabled by default. You might want to enable these features, for example, if you decide that the benefits of using an [unsupported storage type](../how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md) outweighs the risk of using an untested feature. Feature flags were introduced to allow you to try these features that are not enabled by default. The features can be enabled in three ways: diff --git a/versioned_docs/version-2.7/pages-for-subheaders/gke-cluster-configuration.md b/versioned_docs/version-2.7/pages-for-subheaders/gke-cluster-configuration.md index 0d7e0804c479..de9a1638a9c5 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/gke-cluster-configuration.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/gke-cluster-configuration.md @@ -2,6 +2,10 @@ title: GKE Cluster Configuration Reference --- + + + + ## Changes in Rancher v2.6 - Support for additional configuration options: diff --git a/versioned_docs/version-2.7/pages-for-subheaders/helm-charts-in-rancher.md b/versioned_docs/version-2.7/pages-for-subheaders/helm-charts-in-rancher.md index c196f1999626..f724ce6f0cfd 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/helm-charts-in-rancher.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/helm-charts-in-rancher.md @@ -2,6 +2,10 @@ title: Helm Charts in Rancher --- + + + + In this section, you'll learn how to manage Helm chart repositories and applications in Rancher. Helm chart repositories are managed using **Apps**. It uses a catalog-like system to import bundles of charts from repositories and then uses those charts to either deploy custom Helm applications or Rancher's tools such as Monitoring or Istio. Rancher tools come as pre-loaded repositories which deploy as standalone Helm charts. Any additional repositories are only added to the current cluster. ### Versioning Scheme diff --git a/versioned_docs/version-2.7/pages-for-subheaders/horizontal-pod-autoscaler.md b/versioned_docs/version-2.7/pages-for-subheaders/horizontal-pod-autoscaler.md index 64d553553154..d18ce147d060 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/horizontal-pod-autoscaler.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/horizontal-pod-autoscaler.md @@ -3,6 +3,10 @@ title: The Horizontal Pod Autoscaler description: Learn about the horizontal pod autoscaler (HPA). How to manage HPAs and how to test them with a service deployment --- + + + + The [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (HPA) is a Kubernetes feature that allows you to configure your cluster to automatically scale the services it's running up or down. Rancher provides some additional features to help manage HPAs, depending on the version of Rancher. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/infrastructure-setup.md b/versioned_docs/version-2.7/pages-for-subheaders/infrastructure-setup.md index f7b834992aae..fabdc72e9751 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/infrastructure-setup.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/infrastructure-setup.md @@ -2,6 +2,10 @@ title: Don't have infrastructure for your Kubernetes cluster? Try one of these tutorials. --- + + + + To set up infrastructure for a high-availability K3s Kubernetes cluster with an external DB, refer to [this page.](../how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md) diff --git a/versioned_docs/version-2.7/pages-for-subheaders/install-cluster-autoscaler.md b/versioned_docs/version-2.7/pages-for-subheaders/install-cluster-autoscaler.md index 740348651316..8b749aae5ee0 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/install-cluster-autoscaler.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/install-cluster-autoscaler.md @@ -2,6 +2,10 @@ title: Cluster Autoscaler --- + + + + In this section, you'll learn how to install and use the [Kubernetes cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/) on Rancher custom clusters using AWS EC2 Auto Scaling Groups. The cluster autoscaler is a tool that automatically adjusts the size of the Kubernetes cluster when one of the following conditions is true: diff --git a/versioned_docs/version-2.7/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md b/versioned_docs/version-2.7/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md index 5708b68b23c0..4eda613d25f2 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md @@ -3,6 +3,10 @@ title: Install/Upgrade Rancher on a Kubernetes Cluster description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation --- + + + + In this section, you'll learn how to deploy Rancher on a Kubernetes cluster using the Helm CLI. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/installation-and-upgrade.md b/versioned_docs/version-2.7/pages-for-subheaders/installation-and-upgrade.md index 032c76e24d52..3077b14edc28 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/installation-and-upgrade.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/installation-and-upgrade.md @@ -3,6 +3,10 @@ title: Installing/Upgrading Rancher description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation --- + + + + This section provides an overview of the architecture options of installing Rancher, describing advantages of each option. ## Terminology diff --git a/versioned_docs/version-2.7/pages-for-subheaders/installation-references.md b/versioned_docs/version-2.7/pages-for-subheaders/installation-references.md index e2048279af00..6108728b04fa 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/installation-references.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/installation-references.md @@ -2,4 +2,8 @@ title: Installation References --- + + + + Please see the following reference guides for other installation resources: [Rancher Helm chart options](../getting-started/installation-and-upgrade/installation-references/helm-chart-options.md), [TLS settings](../getting-started/installation-and-upgrade/installation-references/tls-settings.md), and [feature flags](../getting-started/installation-and-upgrade/installation-references/feature-flags.md). \ No newline at end of file diff --git a/versioned_docs/version-2.7/pages-for-subheaders/installation-requirements.md b/versioned_docs/version-2.7/pages-for-subheaders/installation-requirements.md index 8431f164d4b3..b7214336b133 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/installation-requirements.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/installation-requirements.md @@ -3,6 +3,10 @@ title: Installation Requirements description: Learn the node requirements for each node running Rancher server when you’re configuring Rancher to run either in a Docker or Kubernetes setup --- + + + + This page describes the software, hardware, and networking requirements for the nodes where the Rancher server will be installed. The Rancher server can be installed on a single node or a high-availability Kubernetes cluster. :::note Important: diff --git a/versioned_docs/version-2.7/pages-for-subheaders/istio-setup-guide.md b/versioned_docs/version-2.7/pages-for-subheaders/istio-setup-guide.md index 2c6ba0c41189..24475f7ffea7 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/istio-setup-guide.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/istio-setup-guide.md @@ -2,6 +2,10 @@ title: Setup Guide --- + + + + This section describes how to enable Istio and start using it in your projects. If you use Istio for traffic management, you will need to allow external traffic to the cluster. In that case, you will need to follow all of the steps below. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/istio.md b/versioned_docs/version-2.7/pages-for-subheaders/istio.md index 8b00fd018cb9..93f1e9b7cc50 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/istio.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/istio.md @@ -2,6 +2,10 @@ title: Istio --- + + + + [Istio](https://istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, secure, control, and troubleshoot the traffic within a complex network of microservices. As a network of microservices changes and grows, the interactions between them can become increasingly difficult to manage and understand. In such a situation, it is useful to have a service mesh as a separate infrastructure layer. Istio's service mesh lets you manipulate traffic between microservices without changing the microservices directly. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/k3s-hardening-guide.md b/versioned_docs/version-2.7/pages-for-subheaders/k3s-hardening-guide.md index fc356dfda175..1fdcce7964bf 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/k3s-hardening-guide.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/k3s-hardening-guide.md @@ -2,6 +2,10 @@ title: K3s Hardening Guide --- + + + + This document provides prescriptive guidance for how to harden a K3s cluster intended for production, before provisioning it with Rancher. It outlines the configurations and controls required for Center for Information Security (CIS) Kubernetes benchmark controls. :::note diff --git a/versioned_docs/version-2.7/pages-for-subheaders/kubernetes-cluster-setup.md b/versioned_docs/version-2.7/pages-for-subheaders/kubernetes-cluster-setup.md index 8f9a88e0da3d..9291d55bee45 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/kubernetes-cluster-setup.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/kubernetes-cluster-setup.md @@ -2,6 +2,10 @@ title: "Don't have a Kubernetes cluster? Try one of these tutorials." --- + + + + This section contains information on how to install a Kubernetes cluster that the Rancher server can be installed on. Rancher can run on any Kubernetes cluster. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md b/versioned_docs/version-2.7/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md index b7f2f0a3bb0b..899d4bb59375 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md @@ -3,6 +3,10 @@ title: Setting up Kubernetes Clusters in Rancher description: Provisioning Kubernetes Clusters --- + + + + Rancher simplifies the creation of clusters by allowing you to create them through the Rancher UI rather than more complex alternatives. Rancher provides multiple options for launching a cluster. Use the option that best fits your use case. This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts](../reference-guides/kubernetes-concepts.md) page. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/kubernetes-components.md b/versioned_docs/version-2.7/pages-for-subheaders/kubernetes-components.md index 5e482f2a1e85..f048b5ba19bf 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/kubernetes-components.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/kubernetes-components.md @@ -2,6 +2,10 @@ title: Kubernetes Components --- + + + + The commands and steps listed in this section apply to the core Kubernetes components on [Rancher Launched Kubernetes](../pages-for-subheaders/launch-kubernetes-with-rancher.md) clusters. This section includes troubleshooting tips in the following categories: diff --git a/versioned_docs/version-2.7/pages-for-subheaders/kubernetes-resources-setup.md b/versioned_docs/version-2.7/pages-for-subheaders/kubernetes-resources-setup.md index ec485c8f80e9..865f5ae5c462 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/kubernetes-resources-setup.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/kubernetes-resources-setup.md @@ -2,6 +2,10 @@ title: Kubernetes Resources --- + + + + You can view and manipulate all of the custom resources and CRDs in a Kubernetes cluster from the Rancher UI. ## Workloads diff --git a/versioned_docs/version-2.7/pages-for-subheaders/launch-kubernetes-with-rancher.md b/versioned_docs/version-2.7/pages-for-subheaders/launch-kubernetes-with-rancher.md index fba8d3aeb668..5b7f4363bab6 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/launch-kubernetes-with-rancher.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/launch-kubernetes-with-rancher.md @@ -2,6 +2,10 @@ title: Launching Kubernetes with Rancher --- + + + + You can have Rancher launch a Kubernetes cluster using any nodes you want. When Rancher deploys Kubernetes onto these nodes, you can choose between [Rancher Kubernetes Engine](https://rancher.com/docs/rke/latest/en/) (RKE) or [RKE2](https://docs.rke2.io) distributions. Rancher can launch Kubernetes on any computers, including: - Bare-metal servers diff --git a/versioned_docs/version-2.7/pages-for-subheaders/load-balancer-and-ingress-controller.md b/versioned_docs/version-2.7/pages-for-subheaders/load-balancer-and-ingress-controller.md index ea5d34fed342..41bdf40a3234 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/load-balancer-and-ingress-controller.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/load-balancer-and-ingress-controller.md @@ -3,6 +3,10 @@ title: Set Up Load Balancer and Ingress Controller within Rancher description: Learn how you can set up load balancers and ingress controllers to redirect service requests within Rancher, and learn about the limitations of load balancers --- + + + + Within Rancher, you can set up load balancers and ingress controllers to redirect service requests. ## Load Balancers diff --git a/versioned_docs/version-2.7/pages-for-subheaders/logging.md b/versioned_docs/version-2.7/pages-for-subheaders/logging.md index a7698cbf32cc..427422627f31 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/logging.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/logging.md @@ -3,6 +3,10 @@ title: Rancher Integration with Logging Services description: Rancher integrates with popular logging services. Learn the requirements and benefits of integrating with logging services, and enable logging on your cluster. --- + + + + The [Logging operator](https://kube-logging.github.io/docs/) now powers Rancher's logging solution in place of the former, in-house solution. ## Enabling Logging diff --git a/versioned_docs/version-2.7/pages-for-subheaders/machine-configuration.md b/versioned_docs/version-2.7/pages-for-subheaders/machine-configuration.md index 18ec97e5f193..e1b9bb72f0a4 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/machine-configuration.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/machine-configuration.md @@ -2,4 +2,8 @@ title: Machine Configuration --- + + + + Machine configuration is the arrangement of resources assigned to a virtual machine. Please see the docs for [Amazon EC2](../reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/amazon-ec2.md), [DigitalOcean](../reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/digitalocean.md), and [Azure](../reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/azure.md) to learn more. \ No newline at end of file diff --git a/versioned_docs/version-2.7/pages-for-subheaders/manage-clusters.md b/versioned_docs/version-2.7/pages-for-subheaders/manage-clusters.md index c3f8c1fd6ff3..e6f698853388 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/manage-clusters.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/manage-clusters.md @@ -2,6 +2,10 @@ title: Cluster Administration --- + + + + After you provision a cluster in Rancher, you can begin using powerful Kubernetes features to deploy and scale your containerized applications in development, testing, or production environments. :::note diff --git a/versioned_docs/version-2.7/pages-for-subheaders/manage-project-resource-quotas.md b/versioned_docs/version-2.7/pages-for-subheaders/manage-project-resource-quotas.md index f6023c0f6089..fa6f10ec84c8 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/manage-project-resource-quotas.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/manage-project-resource-quotas.md @@ -2,6 +2,10 @@ title: Project Resource Quotas --- + + + + In situations where several teams share a cluster, one team may overconsume the resources available: CPU, memory, storage, services, Kubernetes objects like pods or secrets, and so on. To prevent this overconsumption, you can apply a _resource quota_, which is a Rancher feature that limits the resources available to a project or namespace. This page is a how-to guide for creating resource quotas in existing projects. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/manage-projects.md b/versioned_docs/version-2.7/pages-for-subheaders/manage-projects.md index 2d40492ba3ca..be308c7e3423 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/manage-projects.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/manage-projects.md @@ -2,6 +2,10 @@ title: Project Administration --- + + + + _Projects_ are objects introduced in Rancher that help organize namespaces in your Kubernetes cluster. You can use projects to create multi-tenant clusters, which allows a group of users to share the same underlying resources without interacting with each other's applications. In terms of hierarchy: diff --git a/versioned_docs/version-2.7/pages-for-subheaders/manage-role-based-access-control-rbac.md b/versioned_docs/version-2.7/pages-for-subheaders/manage-role-based-access-control-rbac.md index b2674b753b9e..c6c87eff5fc9 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/manage-role-based-access-control-rbac.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/manage-role-based-access-control-rbac.md @@ -2,6 +2,10 @@ title: Role-Based Access Control (RBAC) --- + + + + Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. As mentioned in [Authentication](authentication-config.md), users can either be local or external. After you configure external authentication, the users that display on the **Users** page changes. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/monitoring-alerting-guides.md b/versioned_docs/version-2.7/pages-for-subheaders/monitoring-alerting-guides.md index 720151ea38c7..97e3e801b263 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/monitoring-alerting-guides.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/monitoring-alerting-guides.md @@ -2,6 +2,10 @@ title: Monitoring Guides --- + + + + - [Enable monitoring](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md) - [Uninstall monitoring](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md) - [Monitoring workloads](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md) diff --git a/versioned_docs/version-2.7/pages-for-subheaders/monitoring-and-alerting.md b/versioned_docs/version-2.7/pages-for-subheaders/monitoring-and-alerting.md index a398b725ede8..773796fc0959 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/monitoring-and-alerting.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/monitoring-and-alerting.md @@ -3,6 +3,10 @@ title: Monitoring and Alerting description: Prometheus lets you view metrics from your different Rancher and Kubernetes objects. Learn about the scope of monitoring and how to enable cluster monitoring --- + + + + Using the `rancher-monitoring` application, you can quickly deploy leading open-source monitoring and alerting solutions onto your cluster. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/monitoring-v2-configuration-guides.md b/versioned_docs/version-2.7/pages-for-subheaders/monitoring-v2-configuration-guides.md index de3a78168233..bd0de341f465 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/monitoring-v2-configuration-guides.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/monitoring-v2-configuration-guides.md @@ -2,6 +2,10 @@ title: Configuration --- + + + + This page captures some of the most important options for configuring Monitoring V2 in the Rancher UI. For information on configuring custom scrape targets and rules for Prometheus, please refer to the upstream documentation for the [Prometheus Operator.](https://github.com/prometheus-operator/prometheus-operator) Some of the most important custom resources are explained in the Prometheus Operator [design documentation.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/design.md) The Prometheus Operator documentation can help also you set up RBAC, Thanos, or custom configuration. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/monitoring-v2-configuration.md b/versioned_docs/version-2.7/pages-for-subheaders/monitoring-v2-configuration.md index abf493b4487a..79f97d9513d8 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/monitoring-v2-configuration.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/monitoring-v2-configuration.md @@ -2,6 +2,10 @@ title: Monitoring V2 Configuration --- + + + + The following sections will explain important options essential to configuring Monitoring V2 in Rancher: - [Receiver Configuration](../reference-guides/monitoring-v2-configuration/receivers.md) diff --git a/versioned_docs/version-2.7/pages-for-subheaders/new-user-guides.md b/versioned_docs/version-2.7/pages-for-subheaders/new-user-guides.md index e2e667395c3b..e2e68ff0089a 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/new-user-guides.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/new-user-guides.md @@ -2,6 +2,10 @@ title: New User Guides --- + + + + New user guides, also known as **tutorials**, describe practical steps for users to follow in order to complete some concrete action. These docs are known as "learning-oriented" docs in which users learn by "doing". The new user guides are designed to guide beginners, or the everyday users of Rancher, through a series of steps to learn how to do something. The goal is that the user will be able to learn how to complete tasks by using easy-to-follow, meaningful, and repeatable directions. These guides will assist users to do work to then get the promised results immediately. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/node-template-configuration.md b/versioned_docs/version-2.7/pages-for-subheaders/node-template-configuration.md index 0fff7eb9835b..e6c22d5e8526 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/node-template-configuration.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/node-template-configuration.md @@ -2,4 +2,8 @@ title: Node Template Configuration --- + + + + To learn about node template config, refer to [EC2 Node Template Configuration](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md), [DigitalOcean Node Template Configuration](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md), [Azure Node Template Configuration](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md), [vSphere Node Template Configuration](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md), and [Nutanix Node Template Configuration](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/nutanix.md). diff --git a/versioned_docs/version-2.7/pages-for-subheaders/nutanix.md b/versioned_docs/version-2.7/pages-for-subheaders/nutanix.md index 706ecbae214b..b626cf2bd12b 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/nutanix.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/nutanix.md @@ -3,6 +3,10 @@ title: Creating a Nutanix AOS Cluster description: Use Rancher to create a Nutanix AOS (AHV) cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. --- + + + + [Nutanix Acropolis Operating System](https://www.nutanix.com/products/acropolis) (Nutanix AOS) is an operating system for the Nutanix hyper-converged infrastructure platform. AOS comes with a built-in hypervisor called [Acropolis Hypervisor](https://www.nutanix.com/products/ahv), or AHV. By using Rancher with Nutanix AOS (AHV), you can bring cloud operations on-premises. Rancher can provision nodes in AOS (AHV) and install Kubernetes on them. When creating a Kubernetes cluster in AOS, Rancher first provisions the specified number of virtual machines by communicating with the Prism Central API. Then it installs Kubernetes on top of the VMs. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/other-installation-methods.md b/versioned_docs/version-2.7/pages-for-subheaders/other-installation-methods.md index effd2424c191..7cd497a8d489 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/other-installation-methods.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/other-installation-methods.md @@ -2,6 +2,10 @@ title: Other Installation Methods --- + + + + ### Air Gapped Installations Follow [these steps](air-gapped-helm-cli-install.md) to install the Rancher server in an air gapped environment. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/prometheus-federator-guides.md b/versioned_docs/version-2.7/pages-for-subheaders/prometheus-federator-guides.md index 0f82f71e1ada..2d1c0ae82246 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/prometheus-federator-guides.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/prometheus-federator-guides.md @@ -2,6 +2,10 @@ title: Prometheus Federator Guides --- + + + + - [Enable Prometheus Operator](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md) - [Uninstall Prometheus Operator](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/uninstall-prometheus-federator.md) - [Customize Grafana Dashboards](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/customize-grafana-dashboards.md) diff --git a/versioned_docs/version-2.7/pages-for-subheaders/prometheus-federator.md b/versioned_docs/version-2.7/pages-for-subheaders/prometheus-federator.md index 918d0b0abea9..efef5f5abae8 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/prometheus-federator.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/prometheus-federator.md @@ -2,6 +2,10 @@ title: Prometheus Federator --- + + + + Prometheus Federator, also referred to as Project Monitoring v2, deploys a Helm Project Operator (based on the [rancher/helm-project-operator](https://github.com/rancher/helm-project-operator)), an operator that manages deploying Helm charts each containing a Project Monitoring Stack, where each stack contains: - [Prometheus](https://prometheus.io/) (managed externally by [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)) diff --git a/versioned_docs/version-2.7/pages-for-subheaders/provisioning-storage-examples.md b/versioned_docs/version-2.7/pages-for-subheaders/provisioning-storage-examples.md index 7dcd3960ae51..44fd9593fba5 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/provisioning-storage-examples.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/provisioning-storage-examples.md @@ -2,6 +2,10 @@ title: Provisioning Storage Examples --- + + + + Rancher supports persistent storage with a variety of volume plugins. However, before you use any of these plugins to bind persistent storage to your workloads, you have to configure the storage itself, whether its a cloud-based solution from a service-provider or an on-prem solution that you manage yourself. For your convenience, Rancher offers documentation on how to configure some of the popular storage methods: diff --git a/versioned_docs/version-2.7/pages-for-subheaders/quick-start-guides.md b/versioned_docs/version-2.7/pages-for-subheaders/quick-start-guides.md index c0010d438507..8a7f8028dfbc 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/quick-start-guides.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/quick-start-guides.md @@ -1,6 +1,10 @@ --- title: Rancher Deployment Quick Start Guides --- + + + + :::caution The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](installation-and-upgrade.md). diff --git a/versioned_docs/version-2.7/pages-for-subheaders/rancher-behind-an-http-proxy.md b/versioned_docs/version-2.7/pages-for-subheaders/rancher-behind-an-http-proxy.md index 59a7eec2dba7..fd8a41b8e086 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/rancher-behind-an-http-proxy.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/rancher-behind-an-http-proxy.md @@ -2,6 +2,10 @@ title: Installing Rancher behind an HTTP Proxy --- + + + + In a lot of enterprise environments, servers or VMs running on premise do not have direct Internet access, but must connect to external services through a HTTP(S) proxy for security reasons. This tutorial shows step by step how to set up a highly available Rancher installation in such an environment. Alternatively, it is also possible to set up Rancher completely air-gapped without any Internet access. This process is described in detail in the [Rancher docs](air-gapped-helm-cli-install.md). diff --git a/versioned_docs/version-2.7/pages-for-subheaders/rancher-hardening-guides.md b/versioned_docs/version-2.7/pages-for-subheaders/rancher-hardening-guides.md index 48386cdd8712..0648de359b50 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/rancher-hardening-guides.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/rancher-hardening-guides.md @@ -2,6 +2,10 @@ title: Self-Assessment and Hardening Guides for Rancher --- + + + + Rancher provides specific security hardening guides for each supported Rancher version's Kubernetes distributions. ## Rancher Kubernetes Distributions diff --git a/versioned_docs/version-2.7/pages-for-subheaders/rancher-managed-clusters.md b/versioned_docs/version-2.7/pages-for-subheaders/rancher-managed-clusters.md index 2fc25c09150c..2cdb03fd9094 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/rancher-managed-clusters.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/rancher-managed-clusters.md @@ -2,6 +2,10 @@ title: Best Practices for Rancher Managed Clusters --- + + + + ### Logging Refer to [this guide](../reference-guides/best-practices/rancher-managed-clusters/logging-best-practices.md) for our recommendations for cluster-level logging and application logging. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/rancher-manager-architecture.md b/versioned_docs/version-2.7/pages-for-subheaders/rancher-manager-architecture.md index 03ec604e3f66..d7e76f285739 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/rancher-manager-architecture.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/rancher-manager-architecture.md @@ -2,6 +2,10 @@ title: Architecture --- + + + + This section focuses on the [Rancher server and its components](../reference-guides/rancher-manager-architecture/rancher-server-and-components.md) and how [Rancher communicates with downstream Kubernetes clusters](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md). For information on the different ways that Rancher can be installed, refer to the [overview of installation options.](installation-and-upgrade.md#overview-of-installation-options) diff --git a/versioned_docs/version-2.7/pages-for-subheaders/rancher-on-a-single-node-with-docker.md b/versioned_docs/version-2.7/pages-for-subheaders/rancher-on-a-single-node-with-docker.md index 339dfc856a0a..da5b39209cfe 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/rancher-on-a-single-node-with-docker.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/rancher-on-a-single-node-with-docker.md @@ -3,6 +3,10 @@ title: Installing Rancher on a Single Node Using Docker description: For development and testing environments only, use a Docker install. Install Docker on a single Linux host, and deploy Rancher with a single Docker container. --- + + + + Rancher can be installed by running a single Docker container. In this installation scenario, you'll install Docker on a single Linux host, and then deploy Rancher on your host using a single Docker container. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/rancher-security.md b/versioned_docs/version-2.7/pages-for-subheaders/rancher-security.md index 4ad6516f91ad..5a30b09c77be 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/rancher-security.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/rancher-security.md @@ -2,6 +2,10 @@ title: Security --- + + + +
diff --git a/versioned_docs/version-2.7/pages-for-subheaders/rancher-server-configuration.md b/versioned_docs/version-2.7/pages-for-subheaders/rancher-server-configuration.md index 0892fb94db35..5e18f69e7406 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/rancher-server-configuration.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/rancher-server-configuration.md @@ -2,6 +2,10 @@ title: Rancher Server Configuration --- + + + + - [RKE1 Cluster Configuration](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) - [RKE2 Cluster Configuration](../reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md) - [K3s Cluster Configuration](../reference-guides/cluster-configuration/rancher-server-configuration/k3s-cluster-configuration.md) diff --git a/versioned_docs/version-2.7/pages-for-subheaders/rancher-server.md b/versioned_docs/version-2.7/pages-for-subheaders/rancher-server.md index c8984d9c447b..45c3917cd588 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/rancher-server.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/rancher-server.md @@ -2,6 +2,10 @@ title: Best Practices for the Rancher Server --- + + + + This guide contains our recommendations for running the Rancher server, and is intended to be used in situations in which Rancher manages downstream Kubernetes clusters. ### Recommended Architecture and Infrastructure diff --git a/versioned_docs/version-2.7/pages-for-subheaders/resources.md b/versioned_docs/version-2.7/pages-for-subheaders/resources.md index 57f6ee1e7b62..52e61353441b 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/resources.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/resources.md @@ -2,6 +2,10 @@ title: Resources --- + + + + ### Docker Installations The [single-node Docker installation](rancher-on-a-single-node-with-docker.md) is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster using Helm, you install the Rancher server component on a single node using a `docker run` command. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/rke1-hardening-guide.md b/versioned_docs/version-2.7/pages-for-subheaders/rke1-hardening-guide.md index 6a187df32738..5c5fad27a162 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/rke1-hardening-guide.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/rke1-hardening-guide.md @@ -2,6 +2,10 @@ title: RKE Hardening Guide --- + + + + This document provides prescriptive guidance for how to harden an RKE cluster intended for production, before provisioning it with Rancher. It outlines the configurations and controls required for Center for Information Security (CIS) Kubernetes benchmark controls. :::note diff --git a/versioned_docs/version-2.7/pages-for-subheaders/rke2-hardening-guide.md b/versioned_docs/version-2.7/pages-for-subheaders/rke2-hardening-guide.md index 962462c7f3fd..5f3c8c7697f4 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/rke2-hardening-guide.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/rke2-hardening-guide.md @@ -2,6 +2,10 @@ title: RKE2 Hardening Guide --- + + + + This document provides prescriptive guidance for how to harden an RKE2 cluster intended for production, before provisioning it with Rancher. It outlines the configurations and controls required for Center for Information Security (CIS) Kubernetes benchmark controls. :::note diff --git a/versioned_docs/version-2.7/pages-for-subheaders/selinux-rpm.md b/versioned_docs/version-2.7/pages-for-subheaders/selinux-rpm.md index 0b06703cf32d..c72c72ff56a7 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/selinux-rpm.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/selinux-rpm.md @@ -2,6 +2,10 @@ title: SELinux RPM --- + + + + [Security-Enhanced Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux) is a security enhancement to Linux. Developed by Red Hat, it is an implementation of mandatory access controls (MAC) on Linux. Mandatory access controls allow an administrator of a system to define how applications and users can access different resources such as files, devices, networks and inter-process communication. SELinux also enhances security by making an OS restrictive by default. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/set-up-cloud-providers.md b/versioned_docs/version-2.7/pages-for-subheaders/set-up-cloud-providers.md index 4bf3356c2671..9a02515dba97 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/set-up-cloud-providers.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/set-up-cloud-providers.md @@ -2,6 +2,10 @@ title: Setting up Cloud Providers --- + + + + A _cloud provider_ is a module in Kubernetes that provides an interface for managing nodes, load balancers, and networking routes. When a cloud provider is set up in Rancher, the Rancher server can automatically provision new nodes, load balancers or persistent storage devices when launching Kubernetes definitions, if the cloud provider you're using supports such automation. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md b/versioned_docs/version-2.7/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md index 001533601639..fd4f4ed5bda7 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md @@ -2,6 +2,10 @@ title: Setting up Clusters from Hosted Kubernetes Providers --- + + + + In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/single-node-rancher-in-docker.md b/versioned_docs/version-2.7/pages-for-subheaders/single-node-rancher-in-docker.md index 61cd166a90c2..91072d2b3b44 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/single-node-rancher-in-docker.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/single-node-rancher-in-docker.md @@ -2,4 +2,8 @@ title: Single Node Rancher in Docker --- + + + + The following docs will discuss [HTTP proxy configuration](../reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md) and [advanced options](../reference-guides/single-node-rancher-in-docker/advanced-options.md) for Docker installs. \ No newline at end of file diff --git a/versioned_docs/version-2.7/pages-for-subheaders/use-existing-nodes.md b/versioned_docs/version-2.7/pages-for-subheaders/use-existing-nodes.md index d0c301bdc32e..2aeb05bc488e 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/use-existing-nodes.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/use-existing-nodes.md @@ -3,6 +3,10 @@ title: Launching Kubernetes on Existing Custom Nodes description: To create a cluster with custom nodes, you’ll need to access servers in your cluster and provision them according to Rancher requirements --- + + + + When you create a custom cluster, Rancher uses RKE (the Rancher Kubernetes Engine) to create a Kubernetes cluster in on-prem bare-metal servers, on-prem virtual machines, or in any node hosted by an infrastructure provider. To use this option you'll need access to servers you intend to use in your Kubernetes cluster. Provision each server according to the [requirements](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md), which includes some hardware specifications and Docker. After you install Docker on each server, you willl also run the command provided in the Rancher UI on each server to turn each one into a Kubernetes node. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md b/versioned_docs/version-2.7/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md index fb11e6f46c7c..d875e2325c2f 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md @@ -2,6 +2,10 @@ title: Launching Kubernetes on New Nodes in an Infrastructure Provider --- + + + + When you create an RKE or RKE2 cluster using a node template in Rancher, each resulting node pool is shown in a new **Machine Pools** tab. You can see the machine pools by doing the following: 1. Click **☰ > Cluster Management**. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/use-windows-clusters.md b/versioned_docs/version-2.7/pages-for-subheaders/use-windows-clusters.md index d60a24435b17..36fe47c422f9 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/use-windows-clusters.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/use-windows-clusters.md @@ -2,6 +2,10 @@ title: Launching Kubernetes on Windows Clusters --- + + + + When provisioning a [custom cluster](use-existing-nodes.md) using Rancher, Rancher uses RKE (the Rancher Kubernetes Engine) to install Kubernetes on your existing nodes. In a Windows cluster provisioned with Rancher, the cluster must contain both Linux and Windows nodes. The Kubernetes controlplane can only run on Linux nodes, and the Windows nodes can only have the worker role. Windows nodes can only be used for deploying workloads. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/user-settings.md b/versioned_docs/version-2.7/pages-for-subheaders/user-settings.md index db2376f3df89..a9ed1c72d923 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/user-settings.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/user-settings.md @@ -2,6 +2,10 @@ title: User Settings --- + + + + Within Rancher, each user has a number of settings associated with their login: personal preferences, API keys, etc. You can configure these settings by choosing from the **User Settings** menu. You can open this menu by clicking your avatar, located within the main menu. ![User Settings Menu](/img/user-settings.png) diff --git a/versioned_docs/version-2.7/pages-for-subheaders/vsphere.md b/versioned_docs/version-2.7/pages-for-subheaders/vsphere.md index 8d5cac9d7b89..70eea8bfd52c 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/vsphere.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/vsphere.md @@ -2,6 +2,10 @@ title: Creating a vSphere Cluster description: Use Rancher to create a vSphere cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. --- + + + + import YouTube from '@site/src/components/YouTube' By using Rancher with vSphere, you can bring cloud operations on-premises. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/workloads-and-pods.md b/versioned_docs/version-2.7/pages-for-subheaders/workloads-and-pods.md index 94ac0881d6d6..5cfe84668af6 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/workloads-and-pods.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/workloads-and-pods.md @@ -3,6 +3,10 @@ title: "Kubernetes Workloads and Pods" description: "Learn about the two constructs with which you can build any complex containerized application in Kubernetes: Kubernetes workloads and pods" --- + + + + You can build any complex containerized application in Kubernetes using two basic constructs: pods and workloads. Once you build an application, you can expose it for access either within the same cluster or on the Internet using a third construct: services. ### Pods From 23cbb2f1649098c984d0f0490771593983cad579 Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 15 Aug 2023 17:26:32 -0400 Subject: [PATCH 02/54] missing versioned_docs --- .../pages-for-subheaders/about-authentication.md | 4 ++++ .../version-2.0-2.4/pages-for-subheaders/advanced-options.md | 4 ++++ .../version-2.0-2.4/pages-for-subheaders/cluster-alerts.md | 4 ++++ .../version-2.0-2.4/pages-for-subheaders/cluster-logging.md | 4 ++++ .../pages-for-subheaders/cluster-monitoring.md | 4 ++++ .../configure-microsoft-ad-federation-service-saml.md | 1 + .../pages-for-subheaders/configure-shibboleth-saml.md | 4 ++++ .../pages-for-subheaders/creating-a-vsphere-cluster.md | 4 ++++ .../version-2.0-2.4/pages-for-subheaders/introduction.md | 4 ++++ .../pages-for-subheaders/manage-persistent-storage.md | 4 ++++ .../pages-for-subheaders/other-cloud-providers.md | 4 ++++ .../version-2.0-2.4/pages-for-subheaders/rke-add-on.md | 4 ++++ .../version-2.0-2.4/pages-for-subheaders/vsphere.md | 1 + .../version-2.5/pages-for-subheaders/advanced-options.md | 4 ++++ .../pages-for-subheaders/configure-shibboleth-saml.md | 4 ++++ .../pages-for-subheaders/integrations-in-rancher.md | 4 ++++ .../version-2.5/pages-for-subheaders/introduction.md | 4 ++++ .../pages-for-subheaders/manage-persistent-storage.md | 4 ++++ .../version-2.5/pages-for-subheaders/other-cloud-providers.md | 4 ++++ .../pages-for-subheaders/vsphere-cloud-provider.md | 4 ++++ versioned_docs/version-2.5/pages-for-subheaders/vsphere.md | 1 + 21 files changed, 75 insertions(+) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-authentication.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-authentication.md index b46c20667203..af64be43c238 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-authentication.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-authentication.md @@ -2,6 +2,10 @@ title: Authentication --- + + + + One of the key features that Rancher adds to Kubernetes is centralized user authentication. This feature allows your users to use one set of credentials to authenticate with any of your Kubernetes clusters. This centralized user authentication is accomplished using the Rancher authentication proxy, which is installed along with the rest of Rancher. This proxy authenticates your users and forwards their requests to your Kubernetes clusters using a service account. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/advanced-options.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/advanced-options.md index 4e9027dee15f..b42aace9c018 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/advanced-options.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/advanced-options.md @@ -2,4 +2,8 @@ title: Advanced --- + + + + The documents in this section contain resources for less common use cases. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-alerts.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-alerts.md index 413d8309f584..b2ddd246d1a9 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-alerts.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-alerts.md @@ -2,6 +2,10 @@ title: Cluster Alerts --- + + + + To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. When an event occurs, your alert is triggered, and you are sent a notification. You can then, if necessary, follow up with corrective actions. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-logging.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-logging.md index aeeb8be555ca..3adc17c283be 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-logging.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-logging.md @@ -3,6 +3,10 @@ title: Cluster Logging description: Rancher integrates with popular logging services. Learn the requirements and benefits of integrating with logging services, and enable logging on your cluster. --- + + + + Logging is helpful because it allows you to: - Capture and analyze the state of your cluster diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-monitoring.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-monitoring.md index cbb8e81bdc0f..ab843e71912b 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-monitoring.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-monitoring.md @@ -3,6 +3,10 @@ title: Integrating Rancher and Prometheus for Cluster Monitoring description: Prometheus lets you view metrics from your different Rancher and Kubernetes objects. Learn about the scope of monitoring and how to enable cluster monitoring --- + + + + _Available as of v2.2.0_ Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md index a93e37aff4c8..bea46290e78d 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md @@ -5,6 +5,7 @@ title: Configuring Microsoft Active Directory Federation Service (SAML) + _Available as of v2.0.7_ If your organization uses Microsoft Active Directory Federation Services (AD FS) for user authentication, you can configure Rancher to allow your users to log in using their AD FS credentials. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-shibboleth-saml.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-shibboleth-saml.md index 075d978b53c5..e6f855161b4a 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-shibboleth-saml.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-shibboleth-saml.md @@ -2,6 +2,10 @@ title: Configuring Shibboleth (SAML) --- + + + + _Available as of v2.4.0_ If your organization uses Shibboleth Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in to Rancher using their Shibboleth credentials. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/creating-a-vsphere-cluster.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/creating-a-vsphere-cluster.md index caba41ff0655..68461c8f2e29 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/creating-a-vsphere-cluster.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/creating-a-vsphere-cluster.md @@ -2,6 +2,10 @@ title: VSphere Node Template Configuration --- + + + + The vSphere node templates in Rancher were updated in the following Rancher versions. Refer to the newest configuration reference that is less than or equal to your Rancher version: - [v2.3.3](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.3.3.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/introduction.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/introduction.md index a3dd45625385..f9a706ffc4fa 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/introduction.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/introduction.md @@ -2,4 +2,8 @@ title: Introduction --- + + + + The [overview](../getting-started/introduction/overview.md) will discuss Rancher's features, capabilities, and how it makes running Kubernetes easy. The guide to the [new Rancher docs structure, Divio,](../getting-started/introduction/what-are-divio-docs.md) will explain more about the updated look and function of our docs. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-persistent-storage.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-persistent-storage.md index aa4dfd5b0267..8368cbc8a441 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-persistent-storage.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-persistent-storage.md @@ -2,6 +2,10 @@ title: Manage Persistent Storage --- + + + + The following sections will explain how to manage persistent storage: - [How Persistent Storage Works](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-cloud-providers.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-cloud-providers.md index f9c33021ef9d..320bdcea7af3 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-cloud-providers.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-cloud-providers.md @@ -2,6 +2,10 @@ title: Other Cloud Providers --- + + + + The following sections will outline how to set up the following cloud providers: - [Amazon Cloud Provider](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rke-add-on.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rke-add-on.md index f7e03d805653..a1686ad8a358 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rke-add-on.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rke-add-on.md @@ -2,5 +2,9 @@ title: RKE Add-On Install --- + + + + - [Kubernetes Install with External Load Balancer (TCP/Layer 4)](../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/rke-add-on/layer-4-lb.md) - [Kubernetes Install with External Load Balancer (HTTPS/Layer 7)](../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/rke-add-on/layer-7-lb.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/vsphere.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/vsphere.md index e1ba4150bb78..630921bb38c6 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/vsphere.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/vsphere.md @@ -6,6 +6,7 @@ description: Use Rancher to create a vSphere cluster. It may consist of groups o + import YouTube from '@site/src/components/YouTube' By using Rancher with vSphere, you can bring cloud operations on-premises. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/advanced-options.md b/versioned_docs/version-2.5/pages-for-subheaders/advanced-options.md index 4e9027dee15f..b42aace9c018 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/advanced-options.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/advanced-options.md @@ -2,4 +2,8 @@ title: Advanced --- + + + + The documents in this section contain resources for less common use cases. \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/configure-shibboleth-saml.md b/versioned_docs/version-2.5/pages-for-subheaders/configure-shibboleth-saml.md index f756fa548a8b..39e80cf21495 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/configure-shibboleth-saml.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/configure-shibboleth-saml.md @@ -2,6 +2,10 @@ title: Configuring Shibboleth (SAML) --- + + + + If your organization uses Shibboleth Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in to Rancher using their Shibboleth credentials. In this configuration, when Rancher users log in, they will be redirected to the Shibboleth IdP to enter their credentials. After authentication, they will be redirected back to the Rancher UI. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/integrations-in-rancher.md b/versioned_docs/version-2.5/pages-for-subheaders/integrations-in-rancher.md index 6ae53ea156f7..07a2c6d823bf 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/integrations-in-rancher.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/integrations-in-rancher.md @@ -2,6 +2,10 @@ title: Integrations in Rancher --- + + + + Over time, Rancher has accrued several products and projects that have been integrated into the Rancher UI. Examples of some of these integrations are [Continuous Delivery with Fleet](../pages-for-subheaders/fleet-gitops-at-scale.md) and [Monitoring and Alerting](../pages-for-subheaders/monitoring-and-alerting.md). diff --git a/versioned_docs/version-2.5/pages-for-subheaders/introduction.md b/versioned_docs/version-2.5/pages-for-subheaders/introduction.md index a3dd45625385..f9a706ffc4fa 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/introduction.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/introduction.md @@ -2,4 +2,8 @@ title: Introduction --- + + + + The [overview](../getting-started/introduction/overview.md) will discuss Rancher's features, capabilities, and how it makes running Kubernetes easy. The guide to the [new Rancher docs structure, Divio,](../getting-started/introduction/what-are-divio-docs.md) will explain more about the updated look and function of our docs. \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/manage-persistent-storage.md b/versioned_docs/version-2.5/pages-for-subheaders/manage-persistent-storage.md index 38f9b23fe1cd..e07bcda996d9 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/manage-persistent-storage.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/manage-persistent-storage.md @@ -2,6 +2,10 @@ title: Manage Persistent Storage --- + + + + The following sections will explain how to manage persistent storage: - [How Persistent Storage Works](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/other-cloud-providers.md b/versioned_docs/version-2.5/pages-for-subheaders/other-cloud-providers.md index b9d8965a7620..57388b5a7efd 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/other-cloud-providers.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/other-cloud-providers.md @@ -2,6 +2,10 @@ title: Other Cloud Providers --- + + + + The following sections will outline how to set up the following cloud providers: - [Amazon Cloud Provider](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/vsphere-cloud-provider.md b/versioned_docs/version-2.5/pages-for-subheaders/vsphere-cloud-provider.md index 3e22f1c36603..7dd0259ffe68 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/vsphere-cloud-provider.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/vsphere-cloud-provider.md @@ -2,6 +2,10 @@ title: Setting up the vSphere Cloud Provider --- + + + + In this section, you'll learn how to set up a vSphere cloud provider for a Rancher managed RKE Kubernetes cluster in vSphere. ## In-tree Cloud Provider diff --git a/versioned_docs/version-2.5/pages-for-subheaders/vsphere.md b/versioned_docs/version-2.5/pages-for-subheaders/vsphere.md index 301815e1743b..580dafd3ab5c 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/vsphere.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/vsphere.md @@ -6,6 +6,7 @@ description: Use Rancher to create a vSphere cluster. It may consist of groups o + import YouTube from '@site/src/components/YouTube' By using Rancher with vSphere, you can bring cloud operations on-premises. From d8a21f823c0111f57665826aba401a1a280fc788 Mon Sep 17 00:00:00 2001 From: Billy Tat Date: Thu, 17 Aug 2023 10:49:57 -0700 Subject: [PATCH 03/54] Add v2.7.6 entry to versions table --- src/pages/versions.md | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/pages/versions.md b/src/pages/versions.md index 18b2b8402b04..e30f1483b848 100644 --- a/src/pages/versions.md +++ b/src/pages/versions.md @@ -10,10 +10,10 @@ Below are the documentation and release notes for the currently released version - + - - + +
v2.7.5v2.7.6 DocumentationRelease NotesSupport MatrixRelease NotesSupport Matrix
@@ -33,6 +33,12 @@ Below are the documentation and release notes for the currently released version Below are the documentation and release notes for previous versions of Rancher 2.7.x: + + + + + + From aed5f8fbbebc1f97faa8018249353b66feb9eddb Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Fri, 18 Aug 2023 09:51:09 -0400 Subject: [PATCH 04/54] #787 - Update reference to outdated UI label (#792) --- docs/reference-guides/cli-with-rancher/rancher-cli.md | 2 +- .../reference-guides/cli-with-rancher/rancher-cli.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference-guides/cli-with-rancher/rancher-cli.md b/docs/reference-guides/cli-with-rancher/rancher-cli.md index b5c276f4c12d..938f1de72064 100644 --- a/docs/reference-guides/cli-with-rancher/rancher-cli.md +++ b/docs/reference-guides/cli-with-rancher/rancher-cli.md @@ -10,7 +10,7 @@ The Rancher CLI (Command Line Interface) is a unified tool that you can use to i The binary can be downloaded directly from the UI. 1. In the upper left corner, click **☰**. -1. At the bottom, click **v2.6.x**, where **v2.6.x** is a hyperlinked text indicating the installed Rancher version. +1. At the bottom of the navigation sidebar menu, click **About**. 1. Under the **CLI Downloads section**, there are links to download the binaries for Windows, Mac, and Linux. You can also check the [releases page for our CLI](https://github.com/rancher/cli/releases) for direct downloads of the binary. ### Requirements diff --git a/versioned_docs/version-2.7/reference-guides/cli-with-rancher/rancher-cli.md b/versioned_docs/version-2.7/reference-guides/cli-with-rancher/rancher-cli.md index b5c276f4c12d..938f1de72064 100644 --- a/versioned_docs/version-2.7/reference-guides/cli-with-rancher/rancher-cli.md +++ b/versioned_docs/version-2.7/reference-guides/cli-with-rancher/rancher-cli.md @@ -10,7 +10,7 @@ The Rancher CLI (Command Line Interface) is a unified tool that you can use to i The binary can be downloaded directly from the UI. 1. In the upper left corner, click **☰**. -1. At the bottom, click **v2.6.x**, where **v2.6.x** is a hyperlinked text indicating the installed Rancher version. +1. At the bottom of the navigation sidebar menu, click **About**. 1. Under the **CLI Downloads section**, there are links to download the binaries for Windows, Mac, and Linux. You can also check the [releases page for our CLI](https://github.com/rancher/cli/releases) for direct downloads of the binary. ### Requirements From 0b9a2bc9917517e180146fb1db20a7c2c60d68e8 Mon Sep 17 00:00:00 2001 From: Andy Pitcher Date: Tue, 22 Aug 2023 16:56:10 +0200 Subject: [PATCH 05/54] Update rke-hardening-guide based on CIS-1.24 and CIS-1.7 by removing --protect-kernel-defaults and updating notes --- docs/pages-for-subheaders/rke1-hardening-guide.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/docs/pages-for-subheaders/rke1-hardening-guide.md b/docs/pages-for-subheaders/rke1-hardening-guide.md index 6a187df32738..d842af828594 100644 --- a/docs/pages-for-subheaders/rke1-hardening-guide.md +++ b/docs/pages-for-subheaders/rke1-hardening-guide.md @@ -12,10 +12,11 @@ This hardening guide is intended to be used for RKE clusters and is associated w | Rancher Version | CIS Benchmark Version | Kubernetes Version | |-----------------|-----------------------|------------------------------| -| Rancher v2.7 | Benchmark v1.23 | Kubernetes v1.23 up to v1.25 | +| Rancher v2.7 | Benchmark v1.7 | Kubernetes v1.24 up to v1.25 | :::note -At the time of writing, the upstream CIS Kubernetes v1.25 benchmark is not yet available in Rancher. At this time Rancher is using the CIS v1.23 benchmark when scanning Kubernetes v1.25 clusters. Due to that, the CIS checks 5.2.3, 5.2.4, 5.2.5 and 5.2.6 might fail. +- Since Benchmark v1.24, check id `4.1.7 Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)` might fail, as /etc/kubernetes/ssl/kube-ca.pem is provisioned in 644 by default. +- Since Benchmark v1.7 (latest), `--protect-kernel-defaults` (check id 4.2.6) parameter is not required anymore, and was replaced. ::: For more details on how to evaluate a hardened RKE cluster against the official CIS benchmark, refer to the RKE self-assessment guides for specific Kubernetes and CIS benchmark versions. @@ -243,7 +244,6 @@ services: kubelet: extra_args: feature-gates: RotateKubeletServerCertificate=true - protect-kernel-defaults: "true" generate_serving_certificate: true addons: | apiVersion: networking.k8s.io/v1 @@ -293,7 +293,6 @@ services: kubelet: extra_args: feature-gates: RotateKubeletServerCertificate=true - protect-kernel-defaults: true generate_serving_certificate: true addons: | # Upstream Kubernetes restricted PSP policy From a09eb326a7bf71c30df6db69f5931e713fcd4d24 Mon Sep 17 00:00:00 2001 From: Andy Pitcher Date: Tue, 22 Aug 2023 17:47:23 +0200 Subject: [PATCH 06/54] Update k3s-hardening-guide based on CIS-1.24 and CIS-1.7 by removing --protect-kernel-defaults and updating notes --- .../k3s-hardening-guide.md | 21 +++---------------- 1 file changed, 3 insertions(+), 18 deletions(-) diff --git a/docs/pages-for-subheaders/k3s-hardening-guide.md b/docs/pages-for-subheaders/k3s-hardening-guide.md index fc356dfda175..c3b8eb51ef11 100644 --- a/docs/pages-for-subheaders/k3s-hardening-guide.md +++ b/docs/pages-for-subheaders/k3s-hardening-guide.md @@ -12,10 +12,11 @@ This hardening guide is intended to be used for K3s clusters and is associated w | Rancher Version | CIS Benchmark Version | Kubernetes Version | |-----------------|-----------------------|------------------------------| -| Rancher v2.7 | Benchmark v1.23 | Kubernetes v1.23 up to v1.25 | +| Rancher v2.7 | Benchmark v1.7 | Kubernetes v1.24 up to v1.25 | :::note -At the time of writing, the upstream CIS Kubernetes v1.25 benchmark is not yet available in Rancher. At this time Rancher is using the CIS v1.23 benchmark when scanning Kubernetes v1.25 clusters. +- Since Benchmark v1.24, some check ids might fail due to file permission new requirements (600 instead of 644). Impacted check ids: `1.1.1`, `1.1.3`, `1.1.5`, `1.1.7`, `1.1.13`, `1.1.15`, `4.1.7`, `4.1.9`, `4.1.15`. + - Since Benchmark v1.7 (latest), `--protect-kernel-defaults` (check id 4.2.6) parameter is not required anymore, and was replaced. ::: For more details on how to evaluate a hardened K3s cluster against the official CIS benchmark, refer to the K3s self-assessment guides for specific Kubernetes and CIS benchmark versions. @@ -31,20 +32,6 @@ The first section (1.1) of the CIS Benchmark primarily focuses on pod manifest ## Host-level Requirements -### Ensure `protect-kernel-defaults` is set - -This is a kubelet flag that will cause the kubelet to exit if the required kernel parameters are unset or are set to values that are different from the kubelet's defaults. - -The `protect-kernel-defaults` flag can be set in the cluster configuration in Rancher. - -```yaml -spec: - rkeConfig: - machineSelectorConfig: - - config: - protect-kernel-defaults: true -``` - ### Set kernel parameters The following `sysctl` configuration is recommended for all nodes type in the cluster. Set the following parameters in `/etc/sysctl.d/90-kubelet.conf`: @@ -685,7 +672,6 @@ spec: - config: kubelet-arg: - make-iptables-util-chains=true # CIS 4.2.7 - protect-kernel-defaults: true # CIS 4.2.6 ``` @@ -717,7 +703,6 @@ spec: - config: kubelet-arg: - make-iptables-util-chains=true # CIS 4.2.7 - protect-kernel-defaults: true # CIS 4.2.6 ``` From aca79758eeaf682358f49b0eab6b0c77e00ad509 Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Tue, 22 Aug 2023 12:13:10 -0400 Subject: [PATCH 07/54] Update hardening guide links with broken redirects (#798) While testing a build, I noticed that the self-assment guide redirects weren't resolving from pages-for-subheaders/rancher-hardening-guides.md. I updated the links on that page to point to the current name for those pages. When I tested the links in docusaurus.config, it seemed like the redirects were formatted correctly and pointed to the right paths. I know we had a similar issue some months ago with other redirects. Updating the links on pages-for-subheaders/rancher-hardening-guides.md is a quick fix -- we still need to look into why the redirects weren't resolving. --- docs/pages-for-subheaders/rancher-hardening-guides.md | 6 +++--- .../pages-for-subheaders/rancher-hardening-guides.md | 6 +++--- .../pages-for-subheaders/rancher-hardening-guides.md | 6 +++--- .../pages-for-subheaders/rancher-hardening-guides.md | 6 +++--- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/pages-for-subheaders/rancher-hardening-guides.md b/docs/pages-for-subheaders/rancher-hardening-guides.md index 48386cdd8712..fcbbf594054c 100644 --- a/docs/pages-for-subheaders/rancher-hardening-guides.md +++ b/docs/pages-for-subheaders/rancher-hardening-guides.md @@ -24,7 +24,7 @@ Each self-assessment guide is accompanied by a hardening guide. These guides wer |--------------------|-----------------------|-----------------------|------------------| | Kubernetes v1.23 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.23-k8s-v1.23.md) | [Link](rke1-hardening-guide.md) | | Kubernetes v1.24 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.23-k8s-v1.24.md) | [Link](rke1-hardening-guide.md) | -| Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.23-k8s-v1.25.md) | [Link](rke1-hardening-guide.md) | +| Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md) | [Link](rke1-hardening-guide.md) | ### RKE2 Guides @@ -32,7 +32,7 @@ Each self-assessment guide is accompanied by a hardening guide. These guides wer |------|--------------------|-----------------------|-----------------------|------------------| | Rancher provisioned RKE2 | Kubernetes v1.23 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.23-k8s-v1.23.md) | [Link](rke2-hardening-guide.md) | | Rancher provisioned RKE2 | Kubernetes v1.24 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.23-k8s-v1.24.md) | [Link](rke2-hardening-guide.md) | -| Rancher provisioned RKE2 | Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.23-k8s-v1.25.md) | [Link](rke2-hardening-guide.md) | +| Rancher provisioned RKE2 | Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md) | [Link](rke2-hardening-guide.md) | | Standalone RKE2 | Kubernetes v1.25 | CIS v1.23 | [Link](https://docs.rke2.io/security/cis_self_assessment123) | [Link](https://docs.rke2.io/security/hardening_guide) | ### K3s Guides @@ -41,7 +41,7 @@ Each self-assessment guide is accompanied by a hardening guide. These guides wer |------|--------------------|-----------------------|-----------------------|------------------| | Rancher provisioned K3s cluster | Kubernetes v1.23 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.23-k8s-v1.23.md) | [Link](k3s-hardening-guide.md) | | Rancher provisioned K3s cluster | Kubernetes v1.24 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.23-k8s-v1.24.md) | [Link](k3s-hardening-guide.md) | -| Rancher provisioned K3s cluster | Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.23-k8s-v1.25.md) | [Link](k3s-hardening-guide.md) | +| Rancher provisioned K3s cluster | Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md) | [Link](k3s-hardening-guide.md) | | Standalone K3s | Kubernetes v1.22 up to v1.24 | CIS v1.23 | [Link](https://docs.k3s.io/security/self-assessment) | [Link](https://docs.k3s.io/security/hardening-guide) | ## Rancher with SELinux diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/rancher-hardening-guides.md b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/rancher-hardening-guides.md index 48386cdd8712..fcbbf594054c 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/rancher-hardening-guides.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/rancher-hardening-guides.md @@ -24,7 +24,7 @@ Each self-assessment guide is accompanied by a hardening guide. These guides wer |--------------------|-----------------------|-----------------------|------------------| | Kubernetes v1.23 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.23-k8s-v1.23.md) | [Link](rke1-hardening-guide.md) | | Kubernetes v1.24 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.23-k8s-v1.24.md) | [Link](rke1-hardening-guide.md) | -| Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.23-k8s-v1.25.md) | [Link](rke1-hardening-guide.md) | +| Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md) | [Link](rke1-hardening-guide.md) | ### RKE2 Guides @@ -32,7 +32,7 @@ Each self-assessment guide is accompanied by a hardening guide. These guides wer |------|--------------------|-----------------------|-----------------------|------------------| | Rancher provisioned RKE2 | Kubernetes v1.23 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.23-k8s-v1.23.md) | [Link](rke2-hardening-guide.md) | | Rancher provisioned RKE2 | Kubernetes v1.24 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.23-k8s-v1.24.md) | [Link](rke2-hardening-guide.md) | -| Rancher provisioned RKE2 | Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.23-k8s-v1.25.md) | [Link](rke2-hardening-guide.md) | +| Rancher provisioned RKE2 | Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md) | [Link](rke2-hardening-guide.md) | | Standalone RKE2 | Kubernetes v1.25 | CIS v1.23 | [Link](https://docs.rke2.io/security/cis_self_assessment123) | [Link](https://docs.rke2.io/security/hardening_guide) | ### K3s Guides @@ -41,7 +41,7 @@ Each self-assessment guide is accompanied by a hardening guide. These guides wer |------|--------------------|-----------------------|-----------------------|------------------| | Rancher provisioned K3s cluster | Kubernetes v1.23 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.23-k8s-v1.23.md) | [Link](k3s-hardening-guide.md) | | Rancher provisioned K3s cluster | Kubernetes v1.24 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.23-k8s-v1.24.md) | [Link](k3s-hardening-guide.md) | -| Rancher provisioned K3s cluster | Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.23-k8s-v1.25.md) | [Link](k3s-hardening-guide.md) | +| Rancher provisioned K3s cluster | Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md) | [Link](k3s-hardening-guide.md) | | Standalone K3s | Kubernetes v1.22 up to v1.24 | CIS v1.23 | [Link](https://docs.k3s.io/security/self-assessment) | [Link](https://docs.k3s.io/security/hardening-guide) | ## Rancher with SELinux diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/rancher-hardening-guides.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/rancher-hardening-guides.md index 48386cdd8712..fcbbf594054c 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/rancher-hardening-guides.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/rancher-hardening-guides.md @@ -24,7 +24,7 @@ Each self-assessment guide is accompanied by a hardening guide. These guides wer |--------------------|-----------------------|-----------------------|------------------| | Kubernetes v1.23 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.23-k8s-v1.23.md) | [Link](rke1-hardening-guide.md) | | Kubernetes v1.24 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.23-k8s-v1.24.md) | [Link](rke1-hardening-guide.md) | -| Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.23-k8s-v1.25.md) | [Link](rke1-hardening-guide.md) | +| Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md) | [Link](rke1-hardening-guide.md) | ### RKE2 Guides @@ -32,7 +32,7 @@ Each self-assessment guide is accompanied by a hardening guide. These guides wer |------|--------------------|-----------------------|-----------------------|------------------| | Rancher provisioned RKE2 | Kubernetes v1.23 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.23-k8s-v1.23.md) | [Link](rke2-hardening-guide.md) | | Rancher provisioned RKE2 | Kubernetes v1.24 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.23-k8s-v1.24.md) | [Link](rke2-hardening-guide.md) | -| Rancher provisioned RKE2 | Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.23-k8s-v1.25.md) | [Link](rke2-hardening-guide.md) | +| Rancher provisioned RKE2 | Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md) | [Link](rke2-hardening-guide.md) | | Standalone RKE2 | Kubernetes v1.25 | CIS v1.23 | [Link](https://docs.rke2.io/security/cis_self_assessment123) | [Link](https://docs.rke2.io/security/hardening_guide) | ### K3s Guides @@ -41,7 +41,7 @@ Each self-assessment guide is accompanied by a hardening guide. These guides wer |------|--------------------|-----------------------|-----------------------|------------------| | Rancher provisioned K3s cluster | Kubernetes v1.23 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.23-k8s-v1.23.md) | [Link](k3s-hardening-guide.md) | | Rancher provisioned K3s cluster | Kubernetes v1.24 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.23-k8s-v1.24.md) | [Link](k3s-hardening-guide.md) | -| Rancher provisioned K3s cluster | Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.23-k8s-v1.25.md) | [Link](k3s-hardening-guide.md) | +| Rancher provisioned K3s cluster | Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md) | [Link](k3s-hardening-guide.md) | | Standalone K3s | Kubernetes v1.22 up to v1.24 | CIS v1.23 | [Link](https://docs.k3s.io/security/self-assessment) | [Link](https://docs.k3s.io/security/hardening-guide) | ## Rancher with SELinux diff --git a/versioned_docs/version-2.7/pages-for-subheaders/rancher-hardening-guides.md b/versioned_docs/version-2.7/pages-for-subheaders/rancher-hardening-guides.md index 48386cdd8712..fcbbf594054c 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/rancher-hardening-guides.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/rancher-hardening-guides.md @@ -24,7 +24,7 @@ Each self-assessment guide is accompanied by a hardening guide. These guides wer |--------------------|-----------------------|-----------------------|------------------| | Kubernetes v1.23 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.23-k8s-v1.23.md) | [Link](rke1-hardening-guide.md) | | Kubernetes v1.24 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.23-k8s-v1.24.md) | [Link](rke1-hardening-guide.md) | -| Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.23-k8s-v1.25.md) | [Link](rke1-hardening-guide.md) | +| Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md) | [Link](rke1-hardening-guide.md) | ### RKE2 Guides @@ -32,7 +32,7 @@ Each self-assessment guide is accompanied by a hardening guide. These guides wer |------|--------------------|-----------------------|-----------------------|------------------| | Rancher provisioned RKE2 | Kubernetes v1.23 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.23-k8s-v1.23.md) | [Link](rke2-hardening-guide.md) | | Rancher provisioned RKE2 | Kubernetes v1.24 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.23-k8s-v1.24.md) | [Link](rke2-hardening-guide.md) | -| Rancher provisioned RKE2 | Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.23-k8s-v1.25.md) | [Link](rke2-hardening-guide.md) | +| Rancher provisioned RKE2 | Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md) | [Link](rke2-hardening-guide.md) | | Standalone RKE2 | Kubernetes v1.25 | CIS v1.23 | [Link](https://docs.rke2.io/security/cis_self_assessment123) | [Link](https://docs.rke2.io/security/hardening_guide) | ### K3s Guides @@ -41,7 +41,7 @@ Each self-assessment guide is accompanied by a hardening guide. These guides wer |------|--------------------|-----------------------|-----------------------|------------------| | Rancher provisioned K3s cluster | Kubernetes v1.23 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.23-k8s-v1.23.md) | [Link](k3s-hardening-guide.md) | | Rancher provisioned K3s cluster | Kubernetes v1.24 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.23-k8s-v1.24.md) | [Link](k3s-hardening-guide.md) | -| Rancher provisioned K3s cluster | Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.23-k8s-v1.25.md) | [Link](k3s-hardening-guide.md) | +| Rancher provisioned K3s cluster | Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md) | [Link](k3s-hardening-guide.md) | | Standalone K3s | Kubernetes v1.22 up to v1.24 | CIS v1.23 | [Link](https://docs.k3s.io/security/self-assessment) | [Link](https://docs.k3s.io/security/hardening-guide) | ## Rancher with SELinux From 95b8aa9530a0c91de97612ab0b89ff2c8c2fa475 Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Tue, 22 Aug 2023 15:59:23 -0400 Subject: [PATCH 08/54] canonicized monitoring-v2-configuration-guides (#799) --- .../advanced-configuration/alertmanager.md | 4 ++++ .../advanced-configuration/prometheus.md | 4 ++++ .../advanced-configuration/prometheusrules.md | 4 ++++ .../advanced-configuration/alertmanager.md | 4 ++++ .../advanced-configuration/prometheus.md | 4 ++++ .../advanced-configuration/prometheusrules.md | 4 ++++ .../advanced-configuration/alertmanager.md | 4 ++++ .../advanced-configuration/prometheus.md | 4 ++++ .../advanced-configuration/prometheusrules.md | 4 ++++ .../advanced-configuration/alertmanager.md | 4 ++++ .../advanced-configuration/prometheus.md | 4 ++++ .../advanced-configuration/prometheusrules.md | 4 ++++ 12 files changed, 48 insertions(+) diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md b/docs/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md index 2c2f5e14454b..bc213ee0637f 100644 --- a/docs/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md +++ b/docs/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md @@ -2,6 +2,10 @@ title: Alertmanager Configuration --- + + + + It is usually not necessary to directly edit the Alertmanager custom resource. For most use cases, you will only need to edit the Receivers and Routes to configure notifications. When Receivers and Routes are updated, the monitoring application will automatically update the Alertmanager custom resource to be consistent with those changes. diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md b/docs/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md index 7a818f180c36..122316d018df 100644 --- a/docs/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md +++ b/docs/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md @@ -2,6 +2,10 @@ title: Prometheus Configuration --- + + + + It is usually not necessary to directly edit the Prometheus custom resource because the monitoring application automatically updates it based on changes to ServiceMonitors and PodMonitors. :::note diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md b/docs/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md index daecc9853efd..b4259adf475f 100644 --- a/docs/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md +++ b/docs/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md @@ -2,6 +2,10 @@ title: Configuring PrometheusRules --- + + + + A PrometheusRule defines a group of Prometheus alerting and/or recording rules. :::note diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md index 820452b14eec..20d1375f4178 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md @@ -2,6 +2,10 @@ title: Alertmanager Configuration --- + + + + It is usually not necessary to directly edit the Alertmanager custom resource. For most use cases, you will only need to edit the Receivers and Routes to configure notifications. When Receivers and Routes are updated, the monitoring application will automatically update the Alertmanager custom resource to be consistent with those changes. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md index 2e2272a5e512..89ad0b38556f 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md @@ -2,6 +2,10 @@ title: Prometheus Configuration --- + + + + It is usually not necessary to directly edit the Prometheus custom resource because the monitoring application automatically updates it based on changes to ServiceMonitors and PodMonitors. > This section assumes familiarity with how monitoring components work together. For more information, see [this section.](../../../../explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md) diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md index 4e4110466aca..a1bc247fe32c 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md @@ -2,6 +2,10 @@ title: Configuring PrometheusRules --- + + + + A PrometheusRule defines a group of Prometheus alerting and/or recording rules. > This section assumes familiarity with how monitoring components work together. For more information, see [this section.](../../../../explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md) diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md index 2c2f5e14454b..bc213ee0637f 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md @@ -2,6 +2,10 @@ title: Alertmanager Configuration --- + + + + It is usually not necessary to directly edit the Alertmanager custom resource. For most use cases, you will only need to edit the Receivers and Routes to configure notifications. When Receivers and Routes are updated, the monitoring application will automatically update the Alertmanager custom resource to be consistent with those changes. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md index 7a818f180c36..122316d018df 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md @@ -2,6 +2,10 @@ title: Prometheus Configuration --- + + + + It is usually not necessary to directly edit the Prometheus custom resource because the monitoring application automatically updates it based on changes to ServiceMonitors and PodMonitors. :::note diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md index daecc9853efd..b4259adf475f 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md @@ -2,6 +2,10 @@ title: Configuring PrometheusRules --- + + + + A PrometheusRule defines a group of Prometheus alerting and/or recording rules. :::note diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md index 2c2f5e14454b..bc213ee0637f 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md @@ -2,6 +2,10 @@ title: Alertmanager Configuration --- + + + + It is usually not necessary to directly edit the Alertmanager custom resource. For most use cases, you will only need to edit the Receivers and Routes to configure notifications. When Receivers and Routes are updated, the monitoring application will automatically update the Alertmanager custom resource to be consistent with those changes. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md index 7a818f180c36..122316d018df 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md @@ -2,6 +2,10 @@ title: Prometheus Configuration --- + + + + It is usually not necessary to directly edit the Prometheus custom resource because the monitoring application automatically updates it based on changes to ServiceMonitors and PodMonitors. :::note diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md index daecc9853efd..b4259adf475f 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md @@ -2,6 +2,10 @@ title: Configuring PrometheusRules --- + + + + A PrometheusRule defines a group of Prometheus alerting and/or recording rules. :::note From d9bf7f41a5140eafa1fbd98057832ccfc2794e98 Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Tue, 22 Aug 2023 16:55:09 -0400 Subject: [PATCH 09/54] canonicized prometheus-federator-guides (#800) --- .../customize-grafana-dashboards.md | 4 ++++ .../enable-prometheus-federator.md | 4 ++++ .../prometheus-federator-guides/project-monitors.md | 4 ++++ .../prometheus-federator-guides/set-up-workloads.md | 4 ++++ .../uninstall-prometheus-federator.md | 4 ++++ .../customize-grafana-dashboards.md | 4 ++++ .../enable-prometheus-federator.md | 4 ++++ .../prometheus-federator-guides/project-monitors.md | 4 ++++ .../prometheus-federator-guides/set-up-workloads.md | 4 ++++ .../uninstall-prometheus-federator.md | 4 ++++ .../customize-grafana-dashboards.md | 4 ++++ .../enable-prometheus-federator.md | 4 ++++ .../prometheus-federator-guides/project-monitors.md | 4 ++++ .../prometheus-federator-guides/set-up-workloads.md | 4 ++++ .../uninstall-prometheus-federator.md | 4 ++++ 15 files changed, 60 insertions(+) diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/customize-grafana-dashboards.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/customize-grafana-dashboards.md index 8763447dfa40..50a45a162169 100644 --- a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/customize-grafana-dashboards.md +++ b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/customize-grafana-dashboards.md @@ -2,6 +2,10 @@ title: Customizing Grafana Dashboards --- + + + + Grafana dashboards are customized the same way whether it's for rancher-monitoring or for Prometheus Federator. For instructions, refer to [this page](../customize-grafana-dashboard.md). \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md index 00ad8ebc5bd3..b8decaf2e181 100644 --- a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md +++ b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md @@ -2,6 +2,10 @@ title: Enable Prometheus Federator --- + + + + ## Requirements By default, Prometheus Federator is configured and intended to be deployed alongside [rancher-monitoring](https://rancher.com/docs/rancher/v2.6/en/monitoring-alerting/), which deploys Prometheus Operator alongside a Cluster Prometheus that each Project Monitoring Stack is configured to federate namespace-scoped metrics from by default. diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/project-monitors.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/project-monitors.md index dc6a1fdb710a..8efe48f4ae80 100644 --- a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/project-monitors.md +++ b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/project-monitors.md @@ -2,6 +2,10 @@ title: Installing Project Monitors --- + + + + Install **Project Monitors** in each project where you want to enable project monitoring. 1. Click **☰ > Cluster Management**. diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md index 238223463935..d816f9b9e686 100644 --- a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md +++ b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md @@ -2,6 +2,10 @@ title: Setting up Prometheus Federator for a Workload --- + + + + ### Display CPU and Memory Metrics for a Workload Displaying CPU and memory metrics with Prometheus Federator is done the same way as with rancher-monitoring. For instructions, refer [here](../set-up-monitoring-for-workloads.md#display-cpu-and-memory-metrics-for-a-workload). diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/uninstall-prometheus-federator.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/uninstall-prometheus-federator.md index b770f584ade0..d5703cf33aae 100644 --- a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/uninstall-prometheus-federator.md +++ b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/uninstall-prometheus-federator.md @@ -2,6 +2,10 @@ title: Uninstall Prometheus Federator --- + + + + 1. Click **☰ > Cluster Management**. 1. Go to the cluster that you created and click **Explore**. 1. In the left navigation bar, click **Apps**. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/customize-grafana-dashboards.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/customize-grafana-dashboards.md index 8763447dfa40..50a45a162169 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/customize-grafana-dashboards.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/customize-grafana-dashboards.md @@ -2,6 +2,10 @@ title: Customizing Grafana Dashboards --- + + + + Grafana dashboards are customized the same way whether it's for rancher-monitoring or for Prometheus Federator. For instructions, refer to [this page](../customize-grafana-dashboard.md). \ No newline at end of file diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md index 00ad8ebc5bd3..b8decaf2e181 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md @@ -2,6 +2,10 @@ title: Enable Prometheus Federator --- + + + + ## Requirements By default, Prometheus Federator is configured and intended to be deployed alongside [rancher-monitoring](https://rancher.com/docs/rancher/v2.6/en/monitoring-alerting/), which deploys Prometheus Operator alongside a Cluster Prometheus that each Project Monitoring Stack is configured to federate namespace-scoped metrics from by default. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/project-monitors.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/project-monitors.md index dc6a1fdb710a..8efe48f4ae80 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/project-monitors.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/project-monitors.md @@ -2,6 +2,10 @@ title: Installing Project Monitors --- + + + + Install **Project Monitors** in each project where you want to enable project monitoring. 1. Click **☰ > Cluster Management**. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md index 238223463935..d816f9b9e686 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md @@ -2,6 +2,10 @@ title: Setting up Prometheus Federator for a Workload --- + + + + ### Display CPU and Memory Metrics for a Workload Displaying CPU and memory metrics with Prometheus Federator is done the same way as with rancher-monitoring. For instructions, refer [here](../set-up-monitoring-for-workloads.md#display-cpu-and-memory-metrics-for-a-workload). diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/uninstall-prometheus-federator.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/uninstall-prometheus-federator.md index 57ed1fdee2e8..971eeba41cf2 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/uninstall-prometheus-federator.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/uninstall-prometheus-federator.md @@ -2,6 +2,10 @@ title: Uninstall Prometheus Federator --- + + + + diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/customize-grafana-dashboards.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/customize-grafana-dashboards.md index 8763447dfa40..50a45a162169 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/customize-grafana-dashboards.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/customize-grafana-dashboards.md @@ -2,6 +2,10 @@ title: Customizing Grafana Dashboards --- + + + + Grafana dashboards are customized the same way whether it's for rancher-monitoring or for Prometheus Federator. For instructions, refer to [this page](../customize-grafana-dashboard.md). \ No newline at end of file diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md index 00ad8ebc5bd3..b8decaf2e181 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md @@ -2,6 +2,10 @@ title: Enable Prometheus Federator --- + + + + ## Requirements By default, Prometheus Federator is configured and intended to be deployed alongside [rancher-monitoring](https://rancher.com/docs/rancher/v2.6/en/monitoring-alerting/), which deploys Prometheus Operator alongside a Cluster Prometheus that each Project Monitoring Stack is configured to federate namespace-scoped metrics from by default. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/project-monitors.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/project-monitors.md index dc6a1fdb710a..8efe48f4ae80 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/project-monitors.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/project-monitors.md @@ -2,6 +2,10 @@ title: Installing Project Monitors --- + + + + Install **Project Monitors** in each project where you want to enable project monitoring. 1. Click **☰ > Cluster Management**. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md index 238223463935..d816f9b9e686 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md @@ -2,6 +2,10 @@ title: Setting up Prometheus Federator for a Workload --- + + + + ### Display CPU and Memory Metrics for a Workload Displaying CPU and memory metrics with Prometheus Federator is done the same way as with rancher-monitoring. For instructions, refer [here](../set-up-monitoring-for-workloads.md#display-cpu-and-memory-metrics-for-a-workload). diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/uninstall-prometheus-federator.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/uninstall-prometheus-federator.md index b770f584ade0..d5703cf33aae 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/uninstall-prometheus-federator.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/uninstall-prometheus-federator.md @@ -2,6 +2,10 @@ title: Uninstall Prometheus Federator --- + + + + 1. Click **☰ > Cluster Management**. 1. Go to the cluster that you created and click **Explore**. 1. In the left navigation bar, click **Apps**. From 411825bbdc12b4c86222ad81c481e0dfe268c4e7 Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Tue, 22 Aug 2023 16:55:33 -0400 Subject: [PATCH 10/54] #755 Update CIS Benchmark note (#791) * #755 Update CIS Benchmark note * Update docs/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md Co-authored-by: Billy Tat * syncing version-2.7 --------- Co-authored-by: Billy Tat --- .../cis-scan-guides/install-rancher-cis-benchmark.md | 2 +- .../cis-scan-guides/install-rancher-cis-benchmark.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md index 43abd15703fa..00adb621ed51 100644 --- a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md +++ b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md @@ -12,6 +12,6 @@ title: Install Rancher CIS Benchmark :::note -CIS Benchmark 4.0.0 and above have PSPs disabled by default. To install CIS Benchmark on a hardened cluster, set `global.psp.enabled` to `true` in the values before installing the chart. +If you are running Kubernetes v1.24 or earlier, and have a [Pod Security Policy](../../new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md) (PSP) hardened cluster, CIS Benchmark 4.0.0 and later disable PSPs by default. To install CIS Benchmark on a PSP-hardened cluster, set `global.psp.enabled` to `true` in the values before installing the chart. [Pod Security Admission](../../new-user-guides/authentication-permissions-and-global-configuration/pod-security-standards.md) (PSA) hardened clusters aren't affected. ::: diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md index 43abd15703fa..00adb621ed51 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md @@ -12,6 +12,6 @@ title: Install Rancher CIS Benchmark :::note -CIS Benchmark 4.0.0 and above have PSPs disabled by default. To install CIS Benchmark on a hardened cluster, set `global.psp.enabled` to `true` in the values before installing the chart. +If you are running Kubernetes v1.24 or earlier, and have a [Pod Security Policy](../../new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md) (PSP) hardened cluster, CIS Benchmark 4.0.0 and later disable PSPs by default. To install CIS Benchmark on a PSP-hardened cluster, set `global.psp.enabled` to `true` in the values before installing the chart. [Pod Security Admission](../../new-user-guides/authentication-permissions-and-global-configuration/pod-security-standards.md) (PSA) hardened clusters aren't affected. ::: From 7df449fad6df6ce7df26316188e60e27ee42aadc Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 22 Aug 2023 17:55:01 -0400 Subject: [PATCH 11/54] updated canonical link in integrations-for-rancher --- .../pages-for-subheaders/integrations-in-rancher.md | 4 ++++ .../pages-for-subheaders/integrations-in-rancher.md | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/integrations-in-rancher.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/integrations-in-rancher.md index 95f8d796ceee..f42739ba7da4 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/integrations-in-rancher.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/integrations-in-rancher.md @@ -2,6 +2,10 @@ title: Integrations in Rancher --- + + + + Over time, Rancher has accrued several products and projects that have been integrated into the Rancher UI. Examples of some of these integrations are [Istio](../pages-for-subheaders/istio.md) and [CIS Scans](../pages-for-subheaders/cis-scans.md). \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/integrations-in-rancher.md b/versioned_docs/version-2.5/pages-for-subheaders/integrations-in-rancher.md index 07a2c6d823bf..03942d5743a2 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/integrations-in-rancher.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/integrations-in-rancher.md @@ -3,7 +3,7 @@ title: Integrations in Rancher --- - + Over time, Rancher has accrued several products and projects that have been integrated into the Rancher UI. From b2972d427c4fc53db8f435219f5e703092940e3b Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 22 Aug 2023 17:56:21 -0400 Subject: [PATCH 12/54] removed canonical link from rke-add-ons so older page can stand alone --- .../version-2.0-2.4/pages-for-subheaders/rke-add-on.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rke-add-on.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rke-add-on.md index a1686ad8a358..f7e03d805653 100644 --- a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rke-add-on.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rke-add-on.md @@ -2,9 +2,5 @@ title: RKE Add-On Install --- - - - - - [Kubernetes Install with External Load Balancer (TCP/Layer 4)](../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/rke-add-on/layer-4-lb.md) - [Kubernetes Install with External Load Balancer (HTTPS/Layer 7)](../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/rke-add-on/layer-7-lb.md) From d08532ac1824729c831051ebf88a3f31561a68e4 Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Wed, 23 Aug 2023 15:12:40 -0400 Subject: [PATCH 13/54] 801 - correcting no-space between head tag and next line in file (#803) --- .../kubernetes-and-docker-registries.md | 1 + .../layer-4-and-layer-7-load-balancing.md | 1 + .../pages-for-subheaders/create-kubernetes-persistent-storage.md | 1 + docs/pages-for-subheaders/enable-experimental-features.md | 1 + docs/pages-for-subheaders/vsphere.md | 1 + .../layer-4-and-layer-7-load-balancing.md | 1 + .../kubernetes-and-docker-registries.md | 1 + .../layer-4-and-layer-7-load-balancing.md | 1 + .../pages-for-subheaders/create-kubernetes-persistent-storage.md | 1 + .../pages-for-subheaders/enable-experimental-features.md | 1 + versioned_docs/version-2.6/pages-for-subheaders/vsphere.md | 1 + .../kubernetes-and-docker-registries.md | 1 + .../layer-4-and-layer-7-load-balancing.md | 1 + .../pages-for-subheaders/create-kubernetes-persistent-storage.md | 1 + .../pages-for-subheaders/enable-experimental-features.md | 1 + versioned_docs/version-2.7/pages-for-subheaders/vsphere.md | 1 + 16 files changed, 16 insertions(+) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md index b75e633f88b0..fb19bea8379b 100644 --- a/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md +++ b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md @@ -6,6 +6,7 @@ description: Learn about the container image registry and Kubernetes registry, t + Registries are Kubernetes secrets containing credentials used to authenticate with [private container registries](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). The word "registry" can mean two things, depending on whether it is used to refer to a container or Kubernetes registry: diff --git a/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md index 2b530640e3ef..e0d25c169e69 100644 --- a/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md +++ b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md @@ -6,6 +6,7 @@ description: "Kubernetes supports load balancing in two ways: Layer-4 Load Balan + Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. ## Layer-4 Load Balancer diff --git a/docs/pages-for-subheaders/create-kubernetes-persistent-storage.md b/docs/pages-for-subheaders/create-kubernetes-persistent-storage.md index cdb775108ca6..6bda26af36e7 100644 --- a/docs/pages-for-subheaders/create-kubernetes-persistent-storage.md +++ b/docs/pages-for-subheaders/create-kubernetes-persistent-storage.md @@ -6,6 +6,7 @@ description: "Learn about the two ways with which you can create persistent stor + When deploying an application that needs to retain data, you'll need to create persistent storage. Persistent storage allows you to store application data external from the pod running your application. This storage practice allows you to maintain application data, even if the application's pod fails. The documents in this section assume that you understand the Kubernetes concepts of persistent volumes, persistent volume claims, and storage classes. For more information, refer to the section on [how storage works.](../how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md) diff --git a/docs/pages-for-subheaders/enable-experimental-features.md b/docs/pages-for-subheaders/enable-experimental-features.md index c2fca17aa2f8..40c722001f17 100644 --- a/docs/pages-for-subheaders/enable-experimental-features.md +++ b/docs/pages-for-subheaders/enable-experimental-features.md @@ -5,6 +5,7 @@ title: Enabling Experimental Features + Rancher includes some features that are experimental and disabled by default. You might want to enable these features, for example, if you decide that the benefits of using an [unsupported storage type](../how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md) outweighs the risk of using an untested feature. Feature flags were introduced to allow you to try these features that are not enabled by default. The features can be enabled in three ways: diff --git a/docs/pages-for-subheaders/vsphere.md b/docs/pages-for-subheaders/vsphere.md index 70eea8bfd52c..634a037c1cd9 100644 --- a/docs/pages-for-subheaders/vsphere.md +++ b/docs/pages-for-subheaders/vsphere.md @@ -6,6 +6,7 @@ description: Use Rancher to create a vSphere cluster. It may consist of groups o + import YouTube from '@site/src/components/YouTube' By using Rancher with vSphere, you can bring cloud operations on-premises. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md index c66b751f10d4..43949a4bd5c4 100644 --- a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md @@ -6,6 +6,7 @@ description: "Kubernetes supports load balancing in two ways: Layer-4 Load Balan + Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. ## Layer-4 Load Balancer diff --git a/versioned_docs/version-2.6/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md b/versioned_docs/version-2.6/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md index 8001e912860b..bcea60925b93 100644 --- a/versioned_docs/version-2.6/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md +++ b/versioned_docs/version-2.6/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md @@ -6,6 +6,7 @@ description: Learn about the Docker registry and Kubernetes registry, their use + Registries are Kubernetes secrets containing credentials used to authenticate with [private Docker registries](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). The word "registry" can mean two things, depending on whether it is used to refer to a Docker or Kubernetes registry: diff --git a/versioned_docs/version-2.6/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md b/versioned_docs/version-2.6/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md index 2b530640e3ef..e0d25c169e69 100644 --- a/versioned_docs/version-2.6/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md +++ b/versioned_docs/version-2.6/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md @@ -6,6 +6,7 @@ description: "Kubernetes supports load balancing in two ways: Layer-4 Load Balan + Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. ## Layer-4 Load Balancer diff --git a/versioned_docs/version-2.6/pages-for-subheaders/create-kubernetes-persistent-storage.md b/versioned_docs/version-2.6/pages-for-subheaders/create-kubernetes-persistent-storage.md index cdb775108ca6..6bda26af36e7 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/create-kubernetes-persistent-storage.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/create-kubernetes-persistent-storage.md @@ -6,6 +6,7 @@ description: "Learn about the two ways with which you can create persistent stor + When deploying an application that needs to retain data, you'll need to create persistent storage. Persistent storage allows you to store application data external from the pod running your application. This storage practice allows you to maintain application data, even if the application's pod fails. The documents in this section assume that you understand the Kubernetes concepts of persistent volumes, persistent volume claims, and storage classes. For more information, refer to the section on [how storage works.](../how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md) diff --git a/versioned_docs/version-2.6/pages-for-subheaders/enable-experimental-features.md b/versioned_docs/version-2.6/pages-for-subheaders/enable-experimental-features.md index 5626965df48d..4e9cd6a601a1 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/enable-experimental-features.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/enable-experimental-features.md @@ -5,6 +5,7 @@ title: Enabling Experimental Features + Rancher includes some features that are experimental and disabled by default. You might want to enable these features, for example, if you decide that the benefits of using an [unsupported storage type](../how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md) outweighs the risk of using an untested feature. Feature flags were introduced to allow you to try these features that are not enabled by default. The features can be enabled in three ways: diff --git a/versioned_docs/version-2.6/pages-for-subheaders/vsphere.md b/versioned_docs/version-2.6/pages-for-subheaders/vsphere.md index 70eea8bfd52c..634a037c1cd9 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/vsphere.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/vsphere.md @@ -6,6 +6,7 @@ description: Use Rancher to create a vSphere cluster. It may consist of groups o + import YouTube from '@site/src/components/YouTube' By using Rancher with vSphere, you can bring cloud operations on-premises. diff --git a/versioned_docs/version-2.7/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md b/versioned_docs/version-2.7/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md index b75e633f88b0..fb19bea8379b 100644 --- a/versioned_docs/version-2.7/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md +++ b/versioned_docs/version-2.7/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md @@ -6,6 +6,7 @@ description: Learn about the container image registry and Kubernetes registry, t + Registries are Kubernetes secrets containing credentials used to authenticate with [private container registries](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). The word "registry" can mean two things, depending on whether it is used to refer to a container or Kubernetes registry: diff --git a/versioned_docs/version-2.7/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md b/versioned_docs/version-2.7/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md index 2b530640e3ef..e0d25c169e69 100644 --- a/versioned_docs/version-2.7/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md +++ b/versioned_docs/version-2.7/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md @@ -6,6 +6,7 @@ description: "Kubernetes supports load balancing in two ways: Layer-4 Load Balan + Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. ## Layer-4 Load Balancer diff --git a/versioned_docs/version-2.7/pages-for-subheaders/create-kubernetes-persistent-storage.md b/versioned_docs/version-2.7/pages-for-subheaders/create-kubernetes-persistent-storage.md index cdb775108ca6..6bda26af36e7 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/create-kubernetes-persistent-storage.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/create-kubernetes-persistent-storage.md @@ -6,6 +6,7 @@ description: "Learn about the two ways with which you can create persistent stor + When deploying an application that needs to retain data, you'll need to create persistent storage. Persistent storage allows you to store application data external from the pod running your application. This storage practice allows you to maintain application data, even if the application's pod fails. The documents in this section assume that you understand the Kubernetes concepts of persistent volumes, persistent volume claims, and storage classes. For more information, refer to the section on [how storage works.](../how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md) diff --git a/versioned_docs/version-2.7/pages-for-subheaders/enable-experimental-features.md b/versioned_docs/version-2.7/pages-for-subheaders/enable-experimental-features.md index c2fca17aa2f8..40c722001f17 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/enable-experimental-features.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/enable-experimental-features.md @@ -5,6 +5,7 @@ title: Enabling Experimental Features + Rancher includes some features that are experimental and disabled by default. You might want to enable these features, for example, if you decide that the benefits of using an [unsupported storage type](../how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md) outweighs the risk of using an untested feature. Feature flags were introduced to allow you to try these features that are not enabled by default. The features can be enabled in three ways: diff --git a/versioned_docs/version-2.7/pages-for-subheaders/vsphere.md b/versioned_docs/version-2.7/pages-for-subheaders/vsphere.md index 70eea8bfd52c..634a037c1cd9 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/vsphere.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/vsphere.md @@ -6,6 +6,7 @@ description: Use Rancher to create a vSphere cluster. It may consist of groups o + import YouTube from '@site/src/components/YouTube' By using Rancher with vSphere, you can bring cloud operations on-premises. From 8c850bb5a50b2b4e34eb585c5c7172c8717b2d63 Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Wed, 23 Aug 2023 17:18:18 -0400 Subject: [PATCH 14/54] 795 - updated contribute-to-rancher page (#805) --- docs/contribute-to-rancher.md | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/docs/contribute-to-rancher.md b/docs/contribute-to-rancher.md index 2d5e97ab3e66..45874694b861 100644 --- a/docs/contribute-to-rancher.md +++ b/docs/contribute-to-rancher.md @@ -2,7 +2,7 @@ title: Contributing to Rancher --- -This section explains the repositories used for Rancher, how to build the repositories, and what information to include when you file an issue. +Learn about the repositories used for Rancher and Rancher docs, how to build Rancher repositories, and what information to include when you file an issue. For more detailed information on how to contribute to the development of Rancher projects, refer to the [Rancher Developer Wiki](https://github.com/rancher/rancher/wiki). The wiki has resources on many topics, including the following: @@ -14,7 +14,15 @@ For more detailed information on how to contribute to the development of Rancher On the Rancher Users Slack, the channel for developers is **#developer**. -## Repositories +## Rancher Docs + +If you have suggestions for the documentation on this website, [open](https://github.com/rancher/rancher-docs/issues/new/choose) an issue in the main [Rancher docs](https://github.com/rancher/rancher-docs) repository. This repo contains documentation for Rancher v2.0 and later. + +See the [Rancher docs README](https://github.com/rancher/rancher-docs#readme) for more details on contributing to and building the Rancher v2.x docs repo. + +For documentation describing Rancher v1.6 and earlier, see the [Rancher 1.x docs](https://github.com/rancher/rancher.github.io) repo, which contains source files for https://rancher.com/docs/rancher/v1.6/en/. + +## Rancher Repositories All of repositories are located within our main GitHub organization. There are many repositories used for Rancher, but we'll provide descriptions of some of the main ones used in Rancher. @@ -38,19 +46,19 @@ To see all libraries/projects used in Rancher, see the [`go.mod` file](https://g ![Rancher diagram](/img/ranchercomponentsdiagram-2.6.svg)
Rancher components used for provisioning/managing Kubernetes clusters. -## Building +### Building Rancher Repositories Every repository should have a Makefile and can be built using the `make` command. The `make` targets are based on the scripts in the `/scripts` directory in the repository, and each target will use [Dapper](https://github.com/rancher/dapper) to run the target in an isolated environment. The `Dockerfile.dapper` will be used for this process, and includes all the necessary build tooling needed. The default target is `ci`, and will run `./scripts/validate`, `./scripts/build`, `./scripts/test` and `./scripts/package`. The resulting binaries of the build will be in `./build/bin` and are usually also packaged in a Docker image. -## Bugs, Issues or Questions +### Rancher Bugs, Issues or Questions If you find any bugs or are having any trouble, please search the [reported issue](https://github.com/rancher/rancher/issues) as someone may have experienced the same issue or we are actively working on a solution. If you can't find anything related to your issue, contact us by [filing an issue](https://github.com/rancher/rancher/issues/new). Though we have many repositories related to Rancher, we want the bugs filed in the Rancher repository so we won't miss them! If you want to ask a question or ask fellow users about an use case, we suggest creating a post on the [Rancher Forums](https://forums.rancher.com). -### Checklist for Filing Issues +#### Checklist for Filing Issues Please follow this checklist when filing an issue which will helps us investigate and fix the issue. More info means more data we can use to determine what is causing the issue or what might be related to the issue. @@ -126,11 +134,3 @@ Please remove any sensitive data as it will be publicly viewable. - Docker daemon logging (these might not all exist, depending on operating system) - `/var/log/docker.log` - **Metrics:** If you are experiencing performance issues, please provide as much of data (files or screenshots) of metrics which can help determining what is going on. If you have an issue related to a machine, it helps to supply output of `top`, `free -m`, `df` which shows processes/memory/disk usage. - -## Docs - -If you have any updates to our documentation, please make any pull request to our docs repo. - -- [Rancher 2.x Docs repository](https://github.com/rancher/docs): This repo is where all the docs for Rancher 2.x are located. They are located in the `content` folder in the repo. - -- [Rancher 1.x Docs repository](https://github.com/rancher/rancher.github.io): This repo is where all the docs for Rancher 1.x are located. They are located in the `rancher` folder in the repo. From cc1b56c892ec753a4e7df6ea013dd105362fff92 Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Wed, 23 Aug 2023 17:18:43 -0400 Subject: [PATCH 15/54] canonicized monitoring-alerting-guides (#804) --- .../create-persistent-grafana-dashboard.md | 4 ++++ .../monitoring-alerting-guides/customize-grafana-dashboard.md | 4 ++++ .../monitoring-alerting-guides/debug-high-memory-usage.md | 4 ++++ .../monitoring-alerting-guides/enable-monitoring.md | 4 ++++ .../set-up-monitoring-for-workloads.md | 4 ++++ .../monitoring-alerting-guides/uninstall-monitoring.md | 4 ++++ .../create-persistent-grafana-dashboard.md | 4 ++++ .../monitoring-alerting-guides/customize-grafana-dashboard.md | 4 ++++ .../monitoring-alerting-guides/debug-high-memory-usage.md | 4 ++++ .../monitoring-alerting-guides/enable-monitoring.md | 4 ++++ .../set-up-monitoring-for-workloads.md | 4 ++++ .../monitoring-alerting-guides/uninstall-monitoring.md | 4 ++++ .../create-persistent-grafana-dashboard.md | 4 ++++ .../monitoring-alerting-guides/customize-grafana-dashboard.md | 4 ++++ .../monitoring-alerting-guides/debug-high-memory-usage.md | 4 ++++ .../monitoring-alerting-guides/enable-monitoring.md | 4 ++++ .../set-up-monitoring-for-workloads.md | 4 ++++ .../monitoring-alerting-guides/uninstall-monitoring.md | 4 ++++ .../create-persistent-grafana-dashboard.md | 4 ++++ .../monitoring-alerting-guides/customize-grafana-dashboard.md | 4 ++++ .../monitoring-alerting-guides/debug-high-memory-usage.md | 4 ++++ .../monitoring-alerting-guides/enable-monitoring.md | 4 ++++ .../set-up-monitoring-for-workloads.md | 4 ++++ .../monitoring-alerting-guides/uninstall-monitoring.md | 4 ++++ 24 files changed, 96 insertions(+) diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md index 791247ff0344..fe129e27bb4f 100644 --- a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md +++ b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md @@ -2,6 +2,10 @@ title: Persistent Grafana Dashboards --- + + + + To allow the Grafana dashboard to persist after the Grafana instance restarts, add the dashboard configuration JSON into a ConfigMap. ConfigMaps also allow the dashboards to be deployed with a GitOps or CD based approach. This allows the dashboard to be put under version control. - [Creating a Persistent Grafana Dashboard](#creating-a-persistent-grafana-dashboard) diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md index 2309f9d694be..c3b315f3e5bb 100644 --- a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md +++ b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md @@ -2,6 +2,10 @@ title: Customizing Grafana Dashboards --- + + + + In this section, you'll learn how to customize the Grafana dashboard to show metrics that apply to a certain container. ### Prerequisites diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md index d411a0d92fcb..020b06924d68 100644 --- a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md +++ b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md @@ -2,6 +2,10 @@ title: Debugging High Memory Usage --- + + + + Every time series in Prometheus is uniquely identified by its [metric name](https://prometheus.io/docs/practices/naming/#metric-names) and optional key-value pairs called [labels.](https://prometheus.io/docs/practices/naming/#labels) The labels allow the ability to filter and aggregate the time series data, but they also multiply the amount of data that Prometheus collects. diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md index 878ce4071768..2e4ab117ca0d 100644 --- a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md +++ b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md @@ -2,6 +2,10 @@ title: Enable Monitoring --- + + + + As an [administrator](../../new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) or [cluster owner](../../new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), you can configure Rancher to deploy Prometheus to monitor your Kubernetes cluster. This page describes how to enable monitoring and alerting within a cluster using the new monitoring application. diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md index 0b32bfdf2d7c..b9a655dd913f 100644 --- a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md +++ b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md @@ -2,6 +2,10 @@ title: Setting up Monitoring for a Workload --- + + + + If you only need CPU and memory time series for the workload, you don't need to deploy a ServiceMonitor or PodMonitor because the monitoring application already collects metrics data on resource usage by default. The steps for setting up monitoring for workloads depend on whether you want basic metrics such as CPU and memory for the workload, or whether you want to scrape custom metrics from the workload. diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md index 6f5fad2f12c5..688d54a0506b 100644 --- a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md +++ b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md @@ -2,6 +2,10 @@ title: Uninstall Monitoring --- + + + + 1. Click **☰ > Cluster Management**. 1. Go to the cluster that you created and click **Explore**. 1. In the left navigation bar, click **Apps**. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md index bef6e9921950..03165a2ff04c 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md @@ -2,6 +2,10 @@ title: Persistent Grafana Dashboards --- + + + + To allow the Grafana dashboard to persist after the Grafana instance restarts, add the dashboard configuration JSON into a ConfigMap. ConfigMaps also allow the dashboards to be deployed with a GitOps or CD based approach. This allows the dashboard to be put under version control. - [Creating a Persistent Grafana Dashboard](#creating-a-persistent-grafana-dashboard) diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md index 0c81d89b9e37..8a116920252d 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md @@ -2,6 +2,10 @@ title: Customizing Grafana Dashboards --- + + + + In this section, you'll learn how to customize the Grafana dashboard to show metrics that apply to a certain container. ### Prerequisites diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md index d411a0d92fcb..020b06924d68 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md @@ -2,6 +2,10 @@ title: Debugging High Memory Usage --- + + + + Every time series in Prometheus is uniquely identified by its [metric name](https://prometheus.io/docs/practices/naming/#metric-names) and optional key-value pairs called [labels.](https://prometheus.io/docs/practices/naming/#labels) The labels allow the ability to filter and aggregate the time series data, but they also multiply the amount of data that Prometheus collects. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md index e7d02c373a5b..f77e692a5fe8 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md @@ -2,6 +2,10 @@ title: Enable Monitoring --- + + + + As an [administrator](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) or [cluster owner](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), you can configure Rancher to deploy Prometheus to monitor your Kubernetes cluster. This page describes how to enable monitoring and alerting within a cluster using the new monitoring application. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md index befcd8e2d046..114e5049ae57 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md @@ -2,6 +2,10 @@ title: Setting up Monitoring for a Workload --- + + + + If you only need CPU and memory time series for the workload, you don't need to deploy a ServiceMonitor or PodMonitor because the monitoring application already collects metrics data on resource usage by default. The steps for setting up monitoring for workloads depend on whether you want basic metrics such as CPU and memory for the workload, or whether you want to scrape custom metrics from the workload. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md index 72b8fe204794..6e9f3dc0f238 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md @@ -2,6 +2,10 @@ title: Uninstall Monitoring --- + + + + 1. From the **Cluster Explorer,** click Apps & Marketplace. 1. Click **Installed Apps.** 1. Go to the `cattle-monitoring-system` namespace and check the boxes for `rancher-monitoring-crd` and `rancher-monitoring`. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md index 791247ff0344..fe129e27bb4f 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md @@ -2,6 +2,10 @@ title: Persistent Grafana Dashboards --- + + + + To allow the Grafana dashboard to persist after the Grafana instance restarts, add the dashboard configuration JSON into a ConfigMap. ConfigMaps also allow the dashboards to be deployed with a GitOps or CD based approach. This allows the dashboard to be put under version control. - [Creating a Persistent Grafana Dashboard](#creating-a-persistent-grafana-dashboard) diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md index 617af0263554..b59e7dd45f34 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md @@ -2,6 +2,10 @@ title: Customizing Grafana Dashboards --- + + + + In this section, you'll learn how to customize the Grafana dashboard to show metrics that apply to a certain container. ### Prerequisites diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md index d411a0d92fcb..020b06924d68 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md @@ -2,6 +2,10 @@ title: Debugging High Memory Usage --- + + + + Every time series in Prometheus is uniquely identified by its [metric name](https://prometheus.io/docs/practices/naming/#metric-names) and optional key-value pairs called [labels.](https://prometheus.io/docs/practices/naming/#labels) The labels allow the ability to filter and aggregate the time series data, but they also multiply the amount of data that Prometheus collects. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md index 47a2e72a1f89..4fccf5549169 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md @@ -2,6 +2,10 @@ title: Enable Monitoring --- + + + + As an [administrator](../../new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) or [cluster owner](../../new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), you can configure Rancher to deploy Prometheus to monitor your Kubernetes cluster. This page describes how to enable monitoring and alerting within a cluster using the new monitoring application. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md index 0b32bfdf2d7c..b9a655dd913f 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md @@ -2,6 +2,10 @@ title: Setting up Monitoring for a Workload --- + + + + If you only need CPU and memory time series for the workload, you don't need to deploy a ServiceMonitor or PodMonitor because the monitoring application already collects metrics data on resource usage by default. The steps for setting up monitoring for workloads depend on whether you want basic metrics such as CPU and memory for the workload, or whether you want to scrape custom metrics from the workload. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md index a016e5b3f807..a04f87d8ed1d 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md @@ -2,6 +2,10 @@ title: Uninstall Monitoring --- + + + + diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md index 791247ff0344..fe129e27bb4f 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md @@ -2,6 +2,10 @@ title: Persistent Grafana Dashboards --- + + + + To allow the Grafana dashboard to persist after the Grafana instance restarts, add the dashboard configuration JSON into a ConfigMap. ConfigMaps also allow the dashboards to be deployed with a GitOps or CD based approach. This allows the dashboard to be put under version control. - [Creating a Persistent Grafana Dashboard](#creating-a-persistent-grafana-dashboard) diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md index 2309f9d694be..c3b315f3e5bb 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md @@ -2,6 +2,10 @@ title: Customizing Grafana Dashboards --- + + + + In this section, you'll learn how to customize the Grafana dashboard to show metrics that apply to a certain container. ### Prerequisites diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md index d411a0d92fcb..020b06924d68 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md @@ -2,6 +2,10 @@ title: Debugging High Memory Usage --- + + + + Every time series in Prometheus is uniquely identified by its [metric name](https://prometheus.io/docs/practices/naming/#metric-names) and optional key-value pairs called [labels.](https://prometheus.io/docs/practices/naming/#labels) The labels allow the ability to filter and aggregate the time series data, but they also multiply the amount of data that Prometheus collects. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md index 878ce4071768..2e4ab117ca0d 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md @@ -2,6 +2,10 @@ title: Enable Monitoring --- + + + + As an [administrator](../../new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) or [cluster owner](../../new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), you can configure Rancher to deploy Prometheus to monitor your Kubernetes cluster. This page describes how to enable monitoring and alerting within a cluster using the new monitoring application. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md index 0b32bfdf2d7c..b9a655dd913f 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md @@ -2,6 +2,10 @@ title: Setting up Monitoring for a Workload --- + + + + If you only need CPU and memory time series for the workload, you don't need to deploy a ServiceMonitor or PodMonitor because the monitoring application already collects metrics data on resource usage by default. The steps for setting up monitoring for workloads depend on whether you want basic metrics such as CPU and memory for the workload, or whether you want to scrape custom metrics from the workload. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md index 6f5fad2f12c5..688d54a0506b 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md @@ -2,6 +2,10 @@ title: Uninstall Monitoring --- + + + + 1. Click **☰ > Cluster Management**. 1. Go to the cluster that you created and click **Explore**. 1. In the left navigation bar, click **Apps**. From 59ebb1143860dbaa543a1a12651828a69e4e7bc2 Mon Sep 17 00:00:00 2001 From: Billy Tat Date: Wed, 23 Aug 2023 15:42:19 -0700 Subject: [PATCH 16/54] Sync 'Updated contribute-to-rancher' (700f07) to versioned docs --- .../version-2.0-2.4/contribute-to-rancher.md | 77 +++++++++++-------- .../version-2.5/contribute-to-rancher.md | 77 +++++++++++-------- .../version-2.6/contribute-to-rancher.md | 26 +++---- .../version-2.7/contribute-to-rancher.md | 26 +++---- 4 files changed, 120 insertions(+), 86 deletions(-) diff --git a/versioned_docs/version-2.0-2.4/contribute-to-rancher.md b/versioned_docs/version-2.0-2.4/contribute-to-rancher.md index 26132b9e9859..45874694b861 100644 --- a/versioned_docs/version-2.0-2.4/contribute-to-rancher.md +++ b/versioned_docs/version-2.0-2.4/contribute-to-rancher.md @@ -2,7 +2,7 @@ title: Contributing to Rancher --- -This section explains the repositories used for Rancher, how to build the repositories, and what information to include when you file an issue. +Learn about the repositories used for Rancher and Rancher docs, how to build Rancher repositories, and what information to include when you file an issue. For more detailed information on how to contribute to the development of Rancher projects, refer to the [Rancher Developer Wiki](https://github.com/rancher/rancher/wiki). The wiki has resources on many topics, including the following: @@ -14,7 +14,15 @@ For more detailed information on how to contribute to the development of Rancher On the Rancher Users Slack, the channel for developers is **#developer**. -## Repositories +## Rancher Docs + +If you have suggestions for the documentation on this website, [open](https://github.com/rancher/rancher-docs/issues/new/choose) an issue in the main [Rancher docs](https://github.com/rancher/rancher-docs) repository. This repo contains documentation for Rancher v2.0 and later. + +See the [Rancher docs README](https://github.com/rancher/rancher-docs#readme) for more details on contributing to and building the Rancher v2.x docs repo. + +For documentation describing Rancher v1.6 and earlier, see the [Rancher 1.x docs](https://github.com/rancher/rancher.github.io) repo, which contains source files for https://rancher.com/docs/rancher/v1.6/en/. + +## Rancher Repositories All of repositories are located within our main GitHub organization. There are many repositories used for Rancher, but we'll provide descriptions of some of the main ones used in Rancher. @@ -23,7 +31,7 @@ Repository | URL | Description Rancher | https://github.com/rancher/rancher | This repository is the main source code for Rancher 2.x. Types | https://github.com/rancher/types | This repository is the repository that has all the API types for Rancher 2.x. API Framework | https://github.com/rancher/norman | This repository is an API framework for building Rancher style APIs backed by Kubernetes Custom Resources. -User Interface | https://github.com/rancher/ui | This repository is the source of the UI. +User Interface | https://github.com/rancher/dashboard/ | This repository is the source of the Dashboard UI. (Rancher) Docker Machine | https://github.com/rancher/machine | This repository is the source of the Docker Machine binary used when using Node Drivers. This is a fork of the `docker/machine` repository. machine-package | https://github.com/rancher/machine-package | This repository is used to build the Rancher Docker Machine binary. kontainer-engine | https://github.com/rancher/kontainer-engine | This repository is the source of kontainer-engine, the tool to provision hosted Kubernetes clusters. @@ -35,27 +43,36 @@ loglevel repository | https://github.com/rancher/loglevel | This repository is t To see all libraries/projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. -![Rancher diagram](/img/ranchercomponentsdiagram.svg)
+![Rancher diagram](/img/ranchercomponentsdiagram-2.6.svg)
Rancher components used for provisioning/managing Kubernetes clusters. -## Building +### Building Rancher Repositories Every repository should have a Makefile and can be built using the `make` command. The `make` targets are based on the scripts in the `/scripts` directory in the repository, and each target will use [Dapper](https://github.com/rancher/dapper) to run the target in an isolated environment. The `Dockerfile.dapper` will be used for this process, and includes all the necessary build tooling needed. The default target is `ci`, and will run `./scripts/validate`, `./scripts/build`, `./scripts/test` and `./scripts/package`. The resulting binaries of the build will be in `./build/bin` and are usually also packaged in a Docker image. -## Bugs, Issues or Questions +### Rancher Bugs, Issues or Questions If you find any bugs or are having any trouble, please search the [reported issue](https://github.com/rancher/rancher/issues) as someone may have experienced the same issue or we are actively working on a solution. If you can't find anything related to your issue, contact us by [filing an issue](https://github.com/rancher/rancher/issues/new). Though we have many repositories related to Rancher, we want the bugs filed in the Rancher repository so we won't miss them! If you want to ask a question or ask fellow users about an use case, we suggest creating a post on the [Rancher Forums](https://forums.rancher.com). -### Checklist for Filing Issues +#### Checklist for Filing Issues Please follow this checklist when filing an issue which will helps us investigate and fix the issue. More info means more data we can use to determine what is causing the issue or what might be related to the issue. ->**Note:** For large amounts of data, please use [GitHub Gist](https://gist.github.com/) or similar and link the created resource in the issue. ->**Important:** Please remove any sensitive data as it will be publicly viewable. +:::note + +For large amounts of data, please use [GitHub Gist](https://gist.github.com/) or similar and link the created resource in the issue. + +::: + +:::note Important: + +Please remove any sensitive data as it will be publicly viewable. + +::: - **Resources:** Provide as much as detail as possible on the used resources. As the source of the issue can be many things, including as much of detail as possible helps to determine the root cause. See some examples below: - **Hosts:** What specifications does the host have, like CPU/memory/disk, what cloud does it happen on, what Amazon Machine Image are you using, what DigitalOcean droplet are you using, what image are you provisioning that we can rebuild or use when we try to reproduce @@ -67,17 +84,21 @@ Please follow this checklist when filing an issue which will helps us investigat - **Steps to reproduce the issue:** Provide as much detail on how you got into the reported situation. This helps the person to reproduce the situation you are in. - Provide manual steps or automation scripts used to get from a newly created setup to the situation you reported. - **Logs:** Provide data/logs from the used resources. - - Rancher - - Docker install + - Rancher + - Docker install ``` docker logs \ --timestamps \ $(docker ps | grep -E "rancher/rancher:|rancher/rancher " | awk '{ print $1 }') ``` - - Kubernetes install using `kubectl` + - Kubernetes install using `kubectl` - > **Note:** Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` if Rancher is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. + :::note + + Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` if Rancher is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. + + ::: ``` kubectl -n cattle-system \ @@ -85,16 +106,20 @@ Please follow this checklist when filing an issue which will helps us investigat -l app=rancher \ --timestamps=true ``` - - Docker install using `docker` on each of the nodes in the RKE cluster + - Docker install using `docker` on each of the nodes in the RKE cluster ``` docker logs \ --timestamps \ $(docker ps | grep -E "rancher/rancher@|rancher_rancher" | awk '{ print $1 }') ``` - - Kubernetes Install with RKE Add-On + - Kubernetes Install with RKE Add-On + + :::note + + Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` if the Rancher server is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. - > **Note:** Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` if the Rancher server is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. + ::: ``` kubectl -n cattle-system \ @@ -102,18 +127,10 @@ Please follow this checklist when filing an issue which will helps us investigat --timestamps=true \ -f $(kubectl --kubeconfig $KUBECONFIG get pods -n cattle-system -o json | jq -r '.items[] | select(.spec.containers[].name="cattle-server") | .metadata.name') ``` - - System logging (these might not all exist, depending on operating system) - - `/var/log/messages` - - `/var/log/syslog` - - `/var/log/kern.log` - - Docker daemon logging (these might not all exist, depending on operating system) - - `/var/log/docker.log` + - System logging (these might not all exist, depending on operating system) + - `/var/log/messages` + - `/var/log/syslog` + - `/var/log/kern.log` + - Docker daemon logging (these might not all exist, depending on operating system) + - `/var/log/docker.log` - **Metrics:** If you are experiencing performance issues, please provide as much of data (files or screenshots) of metrics which can help determining what is going on. If you have an issue related to a machine, it helps to supply output of `top`, `free -m`, `df` which shows processes/memory/disk usage. - -## Docs - -If you have any updates to our documentation, please make any pull request to our docs repo. - -- [Rancher 2.x Docs repository](https://github.com/rancher/docs): This repo is where all the docs for Rancher 2.x are located. They are located in the `content` folder in the repo. - -- [Rancher 1.x Docs repository](https://github.com/rancher/rancher.github.io): This repo is where all the docs for Rancher 1.x are located. They are located in the `rancher` folder in the repo. diff --git a/versioned_docs/version-2.5/contribute-to-rancher.md b/versioned_docs/version-2.5/contribute-to-rancher.md index 84b51360dba2..45874694b861 100644 --- a/versioned_docs/version-2.5/contribute-to-rancher.md +++ b/versioned_docs/version-2.5/contribute-to-rancher.md @@ -2,7 +2,7 @@ title: Contributing to Rancher --- -This section explains the repositories used for Rancher, how to build the repositories, and what information to include when you file an issue. +Learn about the repositories used for Rancher and Rancher docs, how to build Rancher repositories, and what information to include when you file an issue. For more detailed information on how to contribute to the development of Rancher projects, refer to the [Rancher Developer Wiki](https://github.com/rancher/rancher/wiki). The wiki has resources on many topics, including the following: @@ -14,7 +14,15 @@ For more detailed information on how to contribute to the development of Rancher On the Rancher Users Slack, the channel for developers is **#developer**. -## Repositories +## Rancher Docs + +If you have suggestions for the documentation on this website, [open](https://github.com/rancher/rancher-docs/issues/new/choose) an issue in the main [Rancher docs](https://github.com/rancher/rancher-docs) repository. This repo contains documentation for Rancher v2.0 and later. + +See the [Rancher docs README](https://github.com/rancher/rancher-docs#readme) for more details on contributing to and building the Rancher v2.x docs repo. + +For documentation describing Rancher v1.6 and earlier, see the [Rancher 1.x docs](https://github.com/rancher/rancher.github.io) repo, which contains source files for https://rancher.com/docs/rancher/v1.6/en/. + +## Rancher Repositories All of repositories are located within our main GitHub organization. There are many repositories used for Rancher, but we'll provide descriptions of some of the main ones used in Rancher. @@ -23,7 +31,7 @@ Repository | URL | Description Rancher | https://github.com/rancher/rancher | This repository is the main source code for Rancher 2.x. Types | https://github.com/rancher/types | This repository is the repository that has all the API types for Rancher 2.x. API Framework | https://github.com/rancher/norman | This repository is an API framework for building Rancher style APIs backed by Kubernetes Custom Resources. -User Interface | https://github.com/rancher/ui | This repository is the source of the UI. +User Interface | https://github.com/rancher/dashboard/ | This repository is the source of the Dashboard UI. (Rancher) Docker Machine | https://github.com/rancher/machine | This repository is the source of the Docker Machine binary used when using Node Drivers. This is a fork of the `docker/machine` repository. machine-package | https://github.com/rancher/machine-package | This repository is used to build the Rancher Docker Machine binary. kontainer-engine | https://github.com/rancher/kontainer-engine | This repository is the source of kontainer-engine, the tool to provision hosted Kubernetes clusters. @@ -35,27 +43,36 @@ loglevel repository | https://github.com/rancher/loglevel | This repository is t To see all libraries/projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. -![Rancher diagram](/img/ranchercomponentsdiagram.svg)
+![Rancher diagram](/img/ranchercomponentsdiagram-2.6.svg)
Rancher components used for provisioning/managing Kubernetes clusters. -## Building +### Building Rancher Repositories Every repository should have a Makefile and can be built using the `make` command. The `make` targets are based on the scripts in the `/scripts` directory in the repository, and each target will use [Dapper](https://github.com/rancher/dapper) to run the target in an isolated environment. The `Dockerfile.dapper` will be used for this process, and includes all the necessary build tooling needed. The default target is `ci`, and will run `./scripts/validate`, `./scripts/build`, `./scripts/test` and `./scripts/package`. The resulting binaries of the build will be in `./build/bin` and are usually also packaged in a Docker image. -## Bugs, Issues or Questions +### Rancher Bugs, Issues or Questions If you find any bugs or are having any trouble, please search the [reported issue](https://github.com/rancher/rancher/issues) as someone may have experienced the same issue or we are actively working on a solution. If you can't find anything related to your issue, contact us by [filing an issue](https://github.com/rancher/rancher/issues/new). Though we have many repositories related to Rancher, we want the bugs filed in the Rancher repository so we won't miss them! If you want to ask a question or ask fellow users about an use case, we suggest creating a post on the [Rancher Forums](https://forums.rancher.com). -### Checklist for Filing Issues +#### Checklist for Filing Issues Please follow this checklist when filing an issue which will helps us investigate and fix the issue. More info means more data we can use to determine what is causing the issue or what might be related to the issue. ->**Note:** For large amounts of data, please use [GitHub Gist](https://gist.github.com/) or similar and link the created resource in the issue. ->**Important:** Please remove any sensitive data as it will be publicly viewable. +:::note + +For large amounts of data, please use [GitHub Gist](https://gist.github.com/) or similar and link the created resource in the issue. + +::: + +:::note Important: + +Please remove any sensitive data as it will be publicly viewable. + +::: - **Resources:** Provide as much as detail as possible on the used resources. As the source of the issue can be many things, including as much of detail as possible helps to determine the root cause. See some examples below: - **Hosts:** What specifications does the host have, like CPU/memory/disk, what cloud does it happen on, what Amazon Machine Image are you using, what DigitalOcean droplet are you using, what image are you provisioning that we can rebuild or use when we try to reproduce @@ -67,17 +84,21 @@ Please follow this checklist when filing an issue which will helps us investigat - **Steps to reproduce the issue:** Provide as much detail on how you got into the reported situation. This helps the person to reproduce the situation you are in. - Provide manual steps or automation scripts used to get from a newly created setup to the situation you reported. - **Logs:** Provide data/logs from the used resources. - - Rancher - - Docker install + - Rancher + - Docker install ``` docker logs \ --timestamps \ $(docker ps | grep -E "rancher/rancher:|rancher/rancher " | awk '{ print $1 }') ``` - - Kubernetes install using `kubectl` + - Kubernetes install using `kubectl` - > **Note:** Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` if Rancher is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. + :::note + + Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` if Rancher is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. + + ::: ``` kubectl -n cattle-system \ @@ -85,16 +106,20 @@ Please follow this checklist when filing an issue which will helps us investigat -l app=rancher \ --timestamps=true ``` - - Docker install using `docker` on each of the nodes in the RKE cluster + - Docker install using `docker` on each of the nodes in the RKE cluster ``` docker logs \ --timestamps \ $(docker ps | grep -E "rancher/rancher@|rancher_rancher" | awk '{ print $1 }') ``` - - Kubernetes Install with RKE Add-On + - Kubernetes Install with RKE Add-On + + :::note + + Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` if the Rancher server is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. - > **Note:** Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` if the Rancher server is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. + ::: ``` kubectl -n cattle-system \ @@ -102,18 +127,10 @@ Please follow this checklist when filing an issue which will helps us investigat --timestamps=true \ -f $(kubectl --kubeconfig $KUBECONFIG get pods -n cattle-system -o json | jq -r '.items[] | select(.spec.containers[].name="cattle-server") | .metadata.name') ``` - - System logging (these might not all exist, depending on operating system) - - `/var/log/messages` - - `/var/log/syslog` - - `/var/log/kern.log` - - Docker daemon logging (these might not all exist, depending on operating system) - - `/var/log/docker.log` + - System logging (these might not all exist, depending on operating system) + - `/var/log/messages` + - `/var/log/syslog` + - `/var/log/kern.log` + - Docker daemon logging (these might not all exist, depending on operating system) + - `/var/log/docker.log` - **Metrics:** If you are experiencing performance issues, please provide as much of data (files or screenshots) of metrics which can help determining what is going on. If you have an issue related to a machine, it helps to supply output of `top`, `free -m`, `df` which shows processes/memory/disk usage. - -## Docs - -If you have any updates to our documentation, please make any pull request to our docs repo. - -- [Rancher 2.x Docs repository](https://github.com/rancher/docs): This repo is where all the docs for Rancher 2.x are located. They are located in the `content` folder in the repo. - -- [Rancher 1.x Docs repository](https://github.com/rancher/rancher.github.io): This repo is where all the docs for Rancher 1.x are located. They are located in the `rancher` folder in the repo. diff --git a/versioned_docs/version-2.6/contribute-to-rancher.md b/versioned_docs/version-2.6/contribute-to-rancher.md index 2d5e97ab3e66..45874694b861 100644 --- a/versioned_docs/version-2.6/contribute-to-rancher.md +++ b/versioned_docs/version-2.6/contribute-to-rancher.md @@ -2,7 +2,7 @@ title: Contributing to Rancher --- -This section explains the repositories used for Rancher, how to build the repositories, and what information to include when you file an issue. +Learn about the repositories used for Rancher and Rancher docs, how to build Rancher repositories, and what information to include when you file an issue. For more detailed information on how to contribute to the development of Rancher projects, refer to the [Rancher Developer Wiki](https://github.com/rancher/rancher/wiki). The wiki has resources on many topics, including the following: @@ -14,7 +14,15 @@ For more detailed information on how to contribute to the development of Rancher On the Rancher Users Slack, the channel for developers is **#developer**. -## Repositories +## Rancher Docs + +If you have suggestions for the documentation on this website, [open](https://github.com/rancher/rancher-docs/issues/new/choose) an issue in the main [Rancher docs](https://github.com/rancher/rancher-docs) repository. This repo contains documentation for Rancher v2.0 and later. + +See the [Rancher docs README](https://github.com/rancher/rancher-docs#readme) for more details on contributing to and building the Rancher v2.x docs repo. + +For documentation describing Rancher v1.6 and earlier, see the [Rancher 1.x docs](https://github.com/rancher/rancher.github.io) repo, which contains source files for https://rancher.com/docs/rancher/v1.6/en/. + +## Rancher Repositories All of repositories are located within our main GitHub organization. There are many repositories used for Rancher, but we'll provide descriptions of some of the main ones used in Rancher. @@ -38,19 +46,19 @@ To see all libraries/projects used in Rancher, see the [`go.mod` file](https://g ![Rancher diagram](/img/ranchercomponentsdiagram-2.6.svg)
Rancher components used for provisioning/managing Kubernetes clusters. -## Building +### Building Rancher Repositories Every repository should have a Makefile and can be built using the `make` command. The `make` targets are based on the scripts in the `/scripts` directory in the repository, and each target will use [Dapper](https://github.com/rancher/dapper) to run the target in an isolated environment. The `Dockerfile.dapper` will be used for this process, and includes all the necessary build tooling needed. The default target is `ci`, and will run `./scripts/validate`, `./scripts/build`, `./scripts/test` and `./scripts/package`. The resulting binaries of the build will be in `./build/bin` and are usually also packaged in a Docker image. -## Bugs, Issues or Questions +### Rancher Bugs, Issues or Questions If you find any bugs or are having any trouble, please search the [reported issue](https://github.com/rancher/rancher/issues) as someone may have experienced the same issue or we are actively working on a solution. If you can't find anything related to your issue, contact us by [filing an issue](https://github.com/rancher/rancher/issues/new). Though we have many repositories related to Rancher, we want the bugs filed in the Rancher repository so we won't miss them! If you want to ask a question or ask fellow users about an use case, we suggest creating a post on the [Rancher Forums](https://forums.rancher.com). -### Checklist for Filing Issues +#### Checklist for Filing Issues Please follow this checklist when filing an issue which will helps us investigate and fix the issue. More info means more data we can use to determine what is causing the issue or what might be related to the issue. @@ -126,11 +134,3 @@ Please remove any sensitive data as it will be publicly viewable. - Docker daemon logging (these might not all exist, depending on operating system) - `/var/log/docker.log` - **Metrics:** If you are experiencing performance issues, please provide as much of data (files or screenshots) of metrics which can help determining what is going on. If you have an issue related to a machine, it helps to supply output of `top`, `free -m`, `df` which shows processes/memory/disk usage. - -## Docs - -If you have any updates to our documentation, please make any pull request to our docs repo. - -- [Rancher 2.x Docs repository](https://github.com/rancher/docs): This repo is where all the docs for Rancher 2.x are located. They are located in the `content` folder in the repo. - -- [Rancher 1.x Docs repository](https://github.com/rancher/rancher.github.io): This repo is where all the docs for Rancher 1.x are located. They are located in the `rancher` folder in the repo. diff --git a/versioned_docs/version-2.7/contribute-to-rancher.md b/versioned_docs/version-2.7/contribute-to-rancher.md index 2d5e97ab3e66..45874694b861 100644 --- a/versioned_docs/version-2.7/contribute-to-rancher.md +++ b/versioned_docs/version-2.7/contribute-to-rancher.md @@ -2,7 +2,7 @@ title: Contributing to Rancher --- -This section explains the repositories used for Rancher, how to build the repositories, and what information to include when you file an issue. +Learn about the repositories used for Rancher and Rancher docs, how to build Rancher repositories, and what information to include when you file an issue. For more detailed information on how to contribute to the development of Rancher projects, refer to the [Rancher Developer Wiki](https://github.com/rancher/rancher/wiki). The wiki has resources on many topics, including the following: @@ -14,7 +14,15 @@ For more detailed information on how to contribute to the development of Rancher On the Rancher Users Slack, the channel for developers is **#developer**. -## Repositories +## Rancher Docs + +If you have suggestions for the documentation on this website, [open](https://github.com/rancher/rancher-docs/issues/new/choose) an issue in the main [Rancher docs](https://github.com/rancher/rancher-docs) repository. This repo contains documentation for Rancher v2.0 and later. + +See the [Rancher docs README](https://github.com/rancher/rancher-docs#readme) for more details on contributing to and building the Rancher v2.x docs repo. + +For documentation describing Rancher v1.6 and earlier, see the [Rancher 1.x docs](https://github.com/rancher/rancher.github.io) repo, which contains source files for https://rancher.com/docs/rancher/v1.6/en/. + +## Rancher Repositories All of repositories are located within our main GitHub organization. There are many repositories used for Rancher, but we'll provide descriptions of some of the main ones used in Rancher. @@ -38,19 +46,19 @@ To see all libraries/projects used in Rancher, see the [`go.mod` file](https://g ![Rancher diagram](/img/ranchercomponentsdiagram-2.6.svg)
Rancher components used for provisioning/managing Kubernetes clusters. -## Building +### Building Rancher Repositories Every repository should have a Makefile and can be built using the `make` command. The `make` targets are based on the scripts in the `/scripts` directory in the repository, and each target will use [Dapper](https://github.com/rancher/dapper) to run the target in an isolated environment. The `Dockerfile.dapper` will be used for this process, and includes all the necessary build tooling needed. The default target is `ci`, and will run `./scripts/validate`, `./scripts/build`, `./scripts/test` and `./scripts/package`. The resulting binaries of the build will be in `./build/bin` and are usually also packaged in a Docker image. -## Bugs, Issues or Questions +### Rancher Bugs, Issues or Questions If you find any bugs or are having any trouble, please search the [reported issue](https://github.com/rancher/rancher/issues) as someone may have experienced the same issue or we are actively working on a solution. If you can't find anything related to your issue, contact us by [filing an issue](https://github.com/rancher/rancher/issues/new). Though we have many repositories related to Rancher, we want the bugs filed in the Rancher repository so we won't miss them! If you want to ask a question or ask fellow users about an use case, we suggest creating a post on the [Rancher Forums](https://forums.rancher.com). -### Checklist for Filing Issues +#### Checklist for Filing Issues Please follow this checklist when filing an issue which will helps us investigate and fix the issue. More info means more data we can use to determine what is causing the issue or what might be related to the issue. @@ -126,11 +134,3 @@ Please remove any sensitive data as it will be publicly viewable. - Docker daemon logging (these might not all exist, depending on operating system) - `/var/log/docker.log` - **Metrics:** If you are experiencing performance issues, please provide as much of data (files or screenshots) of metrics which can help determining what is going on. If you have an issue related to a machine, it helps to supply output of `top`, `free -m`, `df` which shows processes/memory/disk usage. - -## Docs - -If you have any updates to our documentation, please make any pull request to our docs repo. - -- [Rancher 2.x Docs repository](https://github.com/rancher/docs): This repo is where all the docs for Rancher 2.x are located. They are located in the `content` folder in the repo. - -- [Rancher 1.x Docs repository](https://github.com/rancher/rancher.github.io): This repo is where all the docs for Rancher 1.x are located. They are located in the `rancher` folder in the repo. From c48f9f4162360f7fe6f60d59cd33fad2a71cee59 Mon Sep 17 00:00:00 2001 From: Andy Pitcher Date: Thu, 24 Aug 2023 10:55:32 +0200 Subject: [PATCH 17/54] Update notes in k3s-hardening-guide.md Co-authored-by: Marty Hernandez Avedon --- docs/pages-for-subheaders/k3s-hardening-guide.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/pages-for-subheaders/k3s-hardening-guide.md b/docs/pages-for-subheaders/k3s-hardening-guide.md index c3b8eb51ef11..d91eaff90489 100644 --- a/docs/pages-for-subheaders/k3s-hardening-guide.md +++ b/docs/pages-for-subheaders/k3s-hardening-guide.md @@ -15,8 +15,8 @@ This hardening guide is intended to be used for K3s clusters and is associated w | Rancher v2.7 | Benchmark v1.7 | Kubernetes v1.24 up to v1.25 | :::note -- Since Benchmark v1.24, some check ids might fail due to file permission new requirements (600 instead of 644). Impacted check ids: `1.1.1`, `1.1.3`, `1.1.5`, `1.1.7`, `1.1.13`, `1.1.15`, `4.1.7`, `4.1.9`, `4.1.15`. - - Since Benchmark v1.7 (latest), `--protect-kernel-defaults` (check id 4.2.6) parameter is not required anymore, and was replaced. +- In Benchmark v1.24 and later, some check ids might fail due to new file permission requirements (600 instead of 644). Impacted check ids: `1.1.15`, `1.1.17` and `4.1.15`. + - In Benchmark v1.7, the `--protect-kernel-defaults` (`4.2.6`) parameter isn't required anymore, and was removed by CIS. ::: For more details on how to evaluate a hardened K3s cluster against the official CIS benchmark, refer to the K3s self-assessment guides for specific Kubernetes and CIS benchmark versions. From fef03ce751ba2c7a11186a57d8aa41049dd685a8 Mon Sep 17 00:00:00 2001 From: Andy Pitcher Date: Thu, 24 Aug 2023 10:56:01 +0200 Subject: [PATCH 18/54] Update k8s version in k3s-hardening-guide.md --- docs/pages-for-subheaders/k3s-hardening-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/pages-for-subheaders/k3s-hardening-guide.md b/docs/pages-for-subheaders/k3s-hardening-guide.md index d91eaff90489..17d79026580d 100644 --- a/docs/pages-for-subheaders/k3s-hardening-guide.md +++ b/docs/pages-for-subheaders/k3s-hardening-guide.md @@ -12,7 +12,7 @@ This hardening guide is intended to be used for K3s clusters and is associated w | Rancher Version | CIS Benchmark Version | Kubernetes Version | |-----------------|-----------------------|------------------------------| -| Rancher v2.7 | Benchmark v1.7 | Kubernetes v1.24 up to v1.25 | +| Rancher v2.7 | Benchmark v1.7 | Kubernetes v1.25 | :::note - In Benchmark v1.24 and later, some check ids might fail due to new file permission requirements (600 instead of 644). Impacted check ids: `1.1.15`, `1.1.17` and `4.1.15`. From f7557d8e99d164805bedc037f40bde873db5edad Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Fri, 25 Aug 2023 14:07:44 -0400 Subject: [PATCH 19/54] #420 - Canonical links for manage-projects (#810) * canonicized manage-project-resource-quotas * canonicized manage-projects --- .../manage-projects/manage-pod-security-policies.md | 4 ++++ .../about-project-resource-quotas.md | 4 ++++ .../override-default-limit-in-namespaces.md | 4 ++++ .../manage-project-resource-quotas/resource-quota-types.md | 4 ++++ .../set-container-default-resource-limits.md | 4 ++++ .../manage-projects/manage-pod-security-policies.md | 4 ++++ .../about-project-resource-quotas.md | 4 ++++ .../override-default-limit-in-namespaces.md | 4 ++++ .../manage-project-resource-quotas/resource-quota-types.md | 4 ++++ .../set-container-default-resource-limits.md | 4 ++++ .../manage-projects/manage-pod-security-policies.md | 4 ++++ .../about-project-resource-quotas.md | 4 ++++ .../override-default-limit-in-namespaces.md | 4 ++++ .../manage-project-resource-quotas/resource-quota-types.md | 4 ++++ .../set-container-default-resource-limits.md | 4 ++++ .../manage-projects/manage-pod-security-policies.md | 4 ++++ .../about-project-resource-quotas.md | 4 ++++ .../override-default-limit-in-namespaces.md | 4 ++++ .../manage-project-resource-quotas/resource-quota-types.md | 4 ++++ .../set-container-default-resource-limits.md | 4 ++++ .../manage-projects/manage-pod-security-policies.md | 4 ++++ .../about-project-resource-quotas.md | 4 ++++ .../override-default-limit-in-namespaces.md | 4 ++++ .../manage-project-resource-quotas/resource-quota-types.md | 4 ++++ .../set-container-default-resource-limits.md | 4 ++++ 25 files changed, 100 insertions(+) diff --git a/docs/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md b/docs/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md index 32f4dffd780f..664ee75b85ee 100644 --- a/docs/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md +++ b/docs/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md @@ -2,6 +2,10 @@ title: Pod Security Policies --- + + + + :::note These cluster options are only available for [clusters in which Rancher has launched Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). diff --git a/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md b/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md index 3c5c50e97df9..be7d1b5a57a7 100644 --- a/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md +++ b/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md @@ -2,6 +2,10 @@ title: How Resource Quotas Work in Rancher Projects --- + + + + Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). However, in Rancher, resource quotas have been extended so that you can apply them to projects. In a standard Kubernetes deployment, resource quotas are applied to individual namespaces. However, you cannot apply the quota to your namespaces simultaneously with a single action. Instead, the resource quota must be applied multiple times. diff --git a/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md b/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md index 626c9ecef384..9f6c8ca56555 100644 --- a/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md +++ b/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md @@ -2,6 +2,10 @@ title: Overriding the Default Limit for a Namespace --- + + + + Although the **Namespace Default Limit** propagates from the project to each namespace when created, in some cases, you may need to increase (or decrease) the quotas for a specific namespace. In this situation, you can override the default limits by editing the namespace. In the diagram below, the Rancher administrator has a resource quota in effect for their project. However, the administrator wants to override the namespace limits for `Namespace 3` so that it has more resources available. Therefore, the administrator [raises the namespace limits](../../../new-user-guides/manage-clusters/projects-and-namespaces.md) for `Namespace 3` so that the namespace can access more resources. diff --git a/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md b/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md index ea629b510db7..e789f5bf38bb 100644 --- a/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md +++ b/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md @@ -2,6 +2,10 @@ title: Resource Quota Type Reference --- + + + + When you create a resource quota, you are configuring the pool of resources available to the project. You can set the following resource limits for the following resource types. | Resource Type | Description | diff --git a/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md b/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md index dcd0df91382e..dd4e32413194 100644 --- a/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md +++ b/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md @@ -2,6 +2,10 @@ title: Setting Container Default Resource Limits --- + + + + When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project / namespace, all containers will require a respective CPU or Memory field set during creation. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#requests-vs-limits) for more details on why this is required. To avoid setting these limits on each and every container during workload creation, a default container resource limit can be specified on the namespace. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md index fe08d0afe49b..a006269ccbea 100644 --- a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md @@ -2,6 +2,10 @@ title: Pod Security Policies --- + + + + > These cluster options are only available for [clusters in which Rancher has launched Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). You can always assign a pod security policy (PSP) to an existing project if you didn't assign one during creation. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md index 640e612dbc1e..5fc377ddc144 100644 --- a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md @@ -2,6 +2,10 @@ title: How Resource Quotas Work in Rancher Projects --- + + + + Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). However, in Rancher, resource quotas have been extended so that you can apply them to projects. In a standard Kubernetes deployment, resource quotas are applied to individual namespaces. However, you cannot apply the quota to your namespaces simultaneously with a single action. Instead, the resource quota must be applied multiple times. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md index d0bb519de80a..fc857d11c17f 100644 --- a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md @@ -2,6 +2,10 @@ title: Overriding the Default Limit for a Namespace --- + + + + Although the **Namespace Default Limit** propagates from the project to each namespace when created, in some cases, you may need to increase (or decrease) the quotas for a specific namespace. In this situation, you can override the default limits by editing the namespace. In the diagram below, the Rancher administrator has a resource quota in effect for their project. However, the administrator wants to override the namespace limits for `Namespace 3` so that it has more resources available. Therefore, the administrator [raises the namespace limits](../../manage-clusters/projects-and-namespaces.md) for `Namespace 3` so that the namespace can access more resources. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md index 4a67e20a9bd4..8f4416615880 100644 --- a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md @@ -2,6 +2,10 @@ title: Resource Quota Type Reference --- + + + + When you create a resource quota, you are configuring the pool of resources available to the project. You can set the following resource limits for the following resource types. | Resource Type | Description | diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md index c1fecad96245..0d1d2779f690 100644 --- a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md @@ -2,6 +2,10 @@ title: Setting Container Default Resource Limits --- + + + + _Available as of v2.2.0_ When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project / namespace, all containers will require a respective CPU or Memory field set during creation. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#requests-vs-limits) for more details on why this is required. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md index fe08d0afe49b..a006269ccbea 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md @@ -2,6 +2,10 @@ title: Pod Security Policies --- + + + + > These cluster options are only available for [clusters in which Rancher has launched Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). You can always assign a pod security policy (PSP) to an existing project if you didn't assign one during creation. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md index 640e612dbc1e..5fc377ddc144 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md @@ -2,6 +2,10 @@ title: How Resource Quotas Work in Rancher Projects --- + + + + Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). However, in Rancher, resource quotas have been extended so that you can apply them to projects. In a standard Kubernetes deployment, resource quotas are applied to individual namespaces. However, you cannot apply the quota to your namespaces simultaneously with a single action. Instead, the resource quota must be applied multiple times. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md index d0bb519de80a..fc857d11c17f 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md @@ -2,6 +2,10 @@ title: Overriding the Default Limit for a Namespace --- + + + + Although the **Namespace Default Limit** propagates from the project to each namespace when created, in some cases, you may need to increase (or decrease) the quotas for a specific namespace. In this situation, you can override the default limits by editing the namespace. In the diagram below, the Rancher administrator has a resource quota in effect for their project. However, the administrator wants to override the namespace limits for `Namespace 3` so that it has more resources available. Therefore, the administrator [raises the namespace limits](../../manage-clusters/projects-and-namespaces.md) for `Namespace 3` so that the namespace can access more resources. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md index 7c6c072146d6..2562a6e31de2 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md @@ -2,6 +2,10 @@ title: Resource Quota Type Reference --- + + + + When you create a resource quota, you are configuring the pool of resources available to the project. You can set the following resource limits for the following resource types. | Resource Type | Description | diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md index d7034958bdbe..574f6d806a0c 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md @@ -2,6 +2,10 @@ title: Setting Container Default Resource Limits --- + + + + When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project / namespace, all containers will require a respective CPU or Memory field set during creation. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#requests-vs-limits) for more details on why this is required. To avoid setting these limits on each and every container during workload creation, a default container resource limit can be specified on the namespace. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md index 32f4dffd780f..664ee75b85ee 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md @@ -2,6 +2,10 @@ title: Pod Security Policies --- + + + + :::note These cluster options are only available for [clusters in which Rancher has launched Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md index 3c5c50e97df9..be7d1b5a57a7 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md @@ -2,6 +2,10 @@ title: How Resource Quotas Work in Rancher Projects --- + + + + Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). However, in Rancher, resource quotas have been extended so that you can apply them to projects. In a standard Kubernetes deployment, resource quotas are applied to individual namespaces. However, you cannot apply the quota to your namespaces simultaneously with a single action. Instead, the resource quota must be applied multiple times. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md index 626c9ecef384..9f6c8ca56555 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md @@ -2,6 +2,10 @@ title: Overriding the Default Limit for a Namespace --- + + + + Although the **Namespace Default Limit** propagates from the project to each namespace when created, in some cases, you may need to increase (or decrease) the quotas for a specific namespace. In this situation, you can override the default limits by editing the namespace. In the diagram below, the Rancher administrator has a resource quota in effect for their project. However, the administrator wants to override the namespace limits for `Namespace 3` so that it has more resources available. Therefore, the administrator [raises the namespace limits](../../../new-user-guides/manage-clusters/projects-and-namespaces.md) for `Namespace 3` so that the namespace can access more resources. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md index ea629b510db7..e789f5bf38bb 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md @@ -2,6 +2,10 @@ title: Resource Quota Type Reference --- + + + + When you create a resource quota, you are configuring the pool of resources available to the project. You can set the following resource limits for the following resource types. | Resource Type | Description | diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md index dcd0df91382e..dd4e32413194 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md @@ -2,6 +2,10 @@ title: Setting Container Default Resource Limits --- + + + + When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project / namespace, all containers will require a respective CPU or Memory field set during creation. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#requests-vs-limits) for more details on why this is required. To avoid setting these limits on each and every container during workload creation, a default container resource limit can be specified on the namespace. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md index 32f4dffd780f..664ee75b85ee 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md @@ -2,6 +2,10 @@ title: Pod Security Policies --- + + + + :::note These cluster options are only available for [clusters in which Rancher has launched Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md index 3c5c50e97df9..be7d1b5a57a7 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md @@ -2,6 +2,10 @@ title: How Resource Quotas Work in Rancher Projects --- + + + + Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). However, in Rancher, resource quotas have been extended so that you can apply them to projects. In a standard Kubernetes deployment, resource quotas are applied to individual namespaces. However, you cannot apply the quota to your namespaces simultaneously with a single action. Instead, the resource quota must be applied multiple times. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md index 626c9ecef384..9f6c8ca56555 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md @@ -2,6 +2,10 @@ title: Overriding the Default Limit for a Namespace --- + + + + Although the **Namespace Default Limit** propagates from the project to each namespace when created, in some cases, you may need to increase (or decrease) the quotas for a specific namespace. In this situation, you can override the default limits by editing the namespace. In the diagram below, the Rancher administrator has a resource quota in effect for their project. However, the administrator wants to override the namespace limits for `Namespace 3` so that it has more resources available. Therefore, the administrator [raises the namespace limits](../../../new-user-guides/manage-clusters/projects-and-namespaces.md) for `Namespace 3` so that the namespace can access more resources. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md index ea629b510db7..e789f5bf38bb 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md @@ -2,6 +2,10 @@ title: Resource Quota Type Reference --- + + + + When you create a resource quota, you are configuring the pool of resources available to the project. You can set the following resource limits for the following resource types. | Resource Type | Description | diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md index dcd0df91382e..dd4e32413194 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md @@ -2,6 +2,10 @@ title: Setting Container Default Resource Limits --- + + + + When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project / namespace, all containers will require a respective CPU or Memory field set during creation. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#requests-vs-limits) for more details on why this is required. To avoid setting these limits on each and every container during workload creation, a default container resource limit can be specified on the namespace. From 1e4d576296390aba82ab9dd2b3c2e2a3b5dad11b Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Fri, 25 Aug 2023 14:34:43 -0400 Subject: [PATCH 20/54] canonicized istio-setup-guide (#811) --- .../istio-setup-guide/enable-istio-in-cluster.md | 4 ++++ .../istio-setup-guide/enable-istio-in-namespace.md | 4 ++++ .../istio-setup-guide/generate-and-view-traffic.md | 4 ++++ .../istio-setup-guide/set-up-istio-gateway.md | 4 ++++ .../istio-setup-guide/set-up-traffic-management.md | 4 ++++ .../istio-setup-guide/use-istio-sidecar.md | 4 ++++ .../istio-setup-guide/enable-istio-in-cluster.md | 4 ++++ .../istio-setup-guide/enable-istio-in-namespace.md | 4 ++++ .../istio-setup-guide/generate-and-view-traffic.md | 4 ++++ .../istio-setup-guide/set-up-istio-gateway.md | 4 ++++ .../istio-setup-guide/set-up-traffic-management.md | 4 ++++ .../istio-setup-guide/use-istio-sidecar.md | 4 ++++ .../istio-setup-guide/enable-istio-in-cluster.md | 4 ++++ .../istio-setup-guide/enable-istio-in-namespace.md | 4 ++++ .../istio-setup-guide/generate-and-view-traffic.md | 4 ++++ .../istio-setup-guide/set-up-istio-gateway.md | 4 ++++ .../istio-setup-guide/set-up-traffic-management.md | 4 ++++ .../istio-setup-guide/use-istio-sidecar.md | 4 ++++ .../istio-setup-guide/enable-istio-in-cluster.md | 4 ++++ .../istio-setup-guide/enable-istio-in-namespace.md | 4 ++++ .../istio-setup-guide/generate-and-view-traffic.md | 4 ++++ .../istio-setup-guide/set-up-istio-gateway.md | 4 ++++ .../istio-setup-guide/set-up-traffic-management.md | 4 ++++ .../istio-setup-guide/use-istio-sidecar.md | 4 ++++ .../istio-setup-guide/enable-istio-in-cluster.md | 4 ++++ .../istio-setup-guide/enable-istio-in-namespace.md | 4 ++++ .../istio-setup-guide/generate-and-view-traffic.md | 4 ++++ .../istio-setup-guide/set-up-istio-gateway.md | 4 ++++ .../istio-setup-guide/set-up-traffic-management.md | 4 ++++ .../istio-setup-guide/use-istio-sidecar.md | 4 ++++ 30 files changed, 120 insertions(+) diff --git a/docs/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md index 7b4a05166e11..09624abc1bc0 100644 --- a/docs/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md +++ b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md @@ -2,6 +2,10 @@ title: 1. Enable Istio in the Cluster --- + + + + :::note Prerequisites: - Only a user with the `cluster-admin` [Kubernetes default role](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) assigned can configure and install Istio in a Kubernetes cluster. diff --git a/docs/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md index 2ced1c1591b0..13100f501093 100644 --- a/docs/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md +++ b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md @@ -2,6 +2,10 @@ title: 2. Enable Istio in a Namespace --- + + + + You will need to manually enable Istio in each namespace that you want to be tracked or controlled by Istio. When Istio is enabled in a namespace, the Envoy sidecar proxy will be automatically injected into all new workloads that are deployed in the namespace. This namespace setting will only affect new workloads in the namespace. Any preexisting workloads will need to be re-deployed to leverage the sidecar auto injection. diff --git a/docs/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md index 070dc3a557b6..7203d827091a 100644 --- a/docs/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md +++ b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md @@ -2,6 +2,10 @@ title: 6. Generate and View Traffic --- + + + + This section describes how to view the traffic that is being managed by Istio. ## The Kiali Traffic Graph diff --git a/docs/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md index bcafaaca525a..38548a4921d5 100644 --- a/docs/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md +++ b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md @@ -2,6 +2,10 @@ title: 4. Set up the Istio Gateway --- + + + + The gateway to each cluster can have its own port or load balancer, which is unrelated to a service mesh. By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. You can use the Nginx Ingress controller with or without Istio installed. If this is the only gateway to your cluster, Istio will be able to route traffic from service to service, but Istio will not be able to receive traffic from outside the cluster. diff --git a/docs/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md index 38e7ff642a30..150289745d7d 100644 --- a/docs/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md +++ b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md @@ -2,6 +2,10 @@ title: 5. Set up Istio's Components for Traffic Management --- + + + + A central advantage of traffic management in Istio is that it allows dynamic request routing. Some common applications for dynamic request routing include canary deployments and blue/green deployments. The two key resources in Istio traffic management are *virtual services* and *destination rules*. - [Virtual services](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/) intercept and direct traffic to your Kubernetes services, allowing you to divide percentages of traffic from a request to different services. You can use them to define a set of routing rules to apply when a host is addressed. diff --git a/docs/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md index e885762ba9ed..af5b22672c59 100644 --- a/docs/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md +++ b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md @@ -2,6 +2,10 @@ title: 3. Add Deployments and Services with the Istio Sidecar --- + + + + :::note Prerequisite: To enable Istio for a workload, the cluster and namespace must have the Istio app installed. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md index 042356d3c03a..d6a00bb9afde 100644 --- a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md @@ -2,6 +2,10 @@ title: 1. Enable Istio in the Cluster --- + + + + This cluster uses the default Nginx controller to allow traffic into the cluster. A Rancher [administrator](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) or [cluster owner](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) can configure Rancher to deploy Istio in a Kubernetes cluster. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md index 6ff01cc125a5..929f03ca7cea 100644 --- a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md @@ -2,6 +2,10 @@ title: 2. Enable Istio in a Namespace --- + + + + You will need to manually enable Istio in each namespace that you want to be tracked or controlled by Istio. When Istio is enabled in a namespace, the Envoy sidecar proxy will be automatically injected into all new workloads that are deployed in the namespace. This namespace setting will only affect new workloads in the namespace. Any preexisting workloads will need to be re-deployed to leverage the sidecar auto injection. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md index 9eb37070717e..becffde4401f 100644 --- a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md @@ -2,6 +2,10 @@ title: 7. Generate and View Traffic --- + + + + This section describes how to view the traffic that is being managed by Istio. ## The Kiali Traffic Graph diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md index 3e8996509f1b..f90135bcf223 100644 --- a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md @@ -2,6 +2,10 @@ title: 5. Set up the Istio Gateway --- + + + + The gateway to each cluster can have its own port or load balancer, which is unrelated to a service mesh. By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. You can use the NGINX ingress controller with or without Istio installed. If this is the only gateway to your cluster, Istio will be able to route traffic from service to service, but Istio will not be able to receive traffic from outside the cluster. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md index 6255a7675766..37c0d47eeab0 100644 --- a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md @@ -2,6 +2,10 @@ title: 6. Set up Istio's Components for Traffic Management --- + + + + A central advantage of traffic management in Istio is that it allows dynamic request routing. Some common applications for dynamic request routing include canary deployments and blue/green deployments. The two key resources in Istio traffic management are *virtual services* and *destination rules*. - [Virtual services](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/) intercept and direct traffic to your Kubernetes services, allowing you to divide percentages of traffic from a request to different services. You can use them to define a set of routing rules to apply when a host is addressed. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md index 9e0de0f856c2..29bf3db97d1e 100644 --- a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md @@ -2,6 +2,10 @@ title: 4. Add Deployments and Services with the Istio Sidecar --- + + + + > **Prerequisite:** To enable Istio for a workload, the cluster and namespace must have Istio enabled. Enabling Istio in a namespace only enables automatic sidecar injection for new workloads. To enable the Envoy sidecar for existing workloads, you need to enable it manually for each workload. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md index d49f98858026..a6c4d1091219 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md @@ -2,6 +2,10 @@ title: 1. Enable Istio in the Cluster --- + + + + >**Prerequisites:** > >- Only a user with the `cluster-admin` [Kubernetes default role](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) assigned can configure and install Istio in a Kubernetes cluster. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md index c7cd51836f9a..d0d02e1e1265 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md @@ -2,6 +2,10 @@ title: 2. Enable Istio in a Namespace --- + + + + You will need to manually enable Istio in each namespace that you want to be tracked or controlled by Istio. When Istio is enabled in a namespace, the Envoy sidecar proxy will be automatically injected into all new workloads that are deployed in the namespace. This namespace setting will only affect new workloads in the namespace. Any preexisting workloads will need to be re-deployed to leverage the sidecar auto injection. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md index 5dca79353e04..849197b482d3 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md @@ -2,6 +2,10 @@ title: 6. Generate and View Traffic --- + + + + This section describes how to view the traffic that is being managed by Istio. ## The Kiali Traffic Graph diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md index 1de19a102713..3f0ede71d7f1 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md @@ -2,6 +2,10 @@ title: 4. Set up the Istio Gateway --- + + + + The gateway to each cluster can have its own port or load balancer, which is unrelated to a service mesh. By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. You can use the Nginx Ingress controller with or without Istio installed. If this is the only gateway to your cluster, Istio will be able to route traffic from service to service, but Istio will not be able to receive traffic from outside the cluster. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md index 387da5da6970..e1e57f5983ce 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md @@ -2,6 +2,10 @@ title: 5. Set up Istio's Components for Traffic Management --- + + + + A central advantage of traffic management in Istio is that it allows dynamic request routing. Some common applications for dynamic request routing include canary deployments and blue/green deployments. The two key resources in Istio traffic management are *virtual services* and *destination rules*. - [Virtual services](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/) intercept and direct traffic to your Kubernetes services, allowing you to divide percentages of traffic from a request to different services. You can use them to define a set of routing rules to apply when a host is addressed. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md index ebc410ce9e46..0996a8f6c6a0 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md @@ -2,6 +2,10 @@ title: 3. Add Deployments and Services with the Istio Sidecar --- + + + + > **Prerequisite:** To enable Istio for a workload, the cluster and namespace must have the Istio app installed. Enabling Istio in a namespace only enables automatic sidecar injection for new workloads. To enable the Envoy sidecar for existing workloads, you need to enable it manually for each workload. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md index 5958a56b8b3c..291aa93f1238 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md @@ -2,6 +2,10 @@ title: 1. Enable Istio in the Cluster --- + + + + :::note Prerequisites: - Only a user with the `cluster-admin` [Kubernetes default role](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) assigned can configure and install Istio in a Kubernetes cluster. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md index 2ced1c1591b0..13100f501093 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md @@ -2,6 +2,10 @@ title: 2. Enable Istio in a Namespace --- + + + + You will need to manually enable Istio in each namespace that you want to be tracked or controlled by Istio. When Istio is enabled in a namespace, the Envoy sidecar proxy will be automatically injected into all new workloads that are deployed in the namespace. This namespace setting will only affect new workloads in the namespace. Any preexisting workloads will need to be re-deployed to leverage the sidecar auto injection. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md index 070dc3a557b6..7203d827091a 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md @@ -2,6 +2,10 @@ title: 6. Generate and View Traffic --- + + + + This section describes how to view the traffic that is being managed by Istio. ## The Kiali Traffic Graph diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md index bcafaaca525a..38548a4921d5 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md @@ -2,6 +2,10 @@ title: 4. Set up the Istio Gateway --- + + + + The gateway to each cluster can have its own port or load balancer, which is unrelated to a service mesh. By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. You can use the Nginx Ingress controller with or without Istio installed. If this is the only gateway to your cluster, Istio will be able to route traffic from service to service, but Istio will not be able to receive traffic from outside the cluster. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md index 38e7ff642a30..150289745d7d 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md @@ -2,6 +2,10 @@ title: 5. Set up Istio's Components for Traffic Management --- + + + + A central advantage of traffic management in Istio is that it allows dynamic request routing. Some common applications for dynamic request routing include canary deployments and blue/green deployments. The two key resources in Istio traffic management are *virtual services* and *destination rules*. - [Virtual services](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/) intercept and direct traffic to your Kubernetes services, allowing you to divide percentages of traffic from a request to different services. You can use them to define a set of routing rules to apply when a host is addressed. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md index e885762ba9ed..af5b22672c59 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md @@ -2,6 +2,10 @@ title: 3. Add Deployments and Services with the Istio Sidecar --- + + + + :::note Prerequisite: To enable Istio for a workload, the cluster and namespace must have the Istio app installed. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md index 7b4a05166e11..09624abc1bc0 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md @@ -2,6 +2,10 @@ title: 1. Enable Istio in the Cluster --- + + + + :::note Prerequisites: - Only a user with the `cluster-admin` [Kubernetes default role](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) assigned can configure and install Istio in a Kubernetes cluster. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md index 2ced1c1591b0..13100f501093 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md @@ -2,6 +2,10 @@ title: 2. Enable Istio in a Namespace --- + + + + You will need to manually enable Istio in each namespace that you want to be tracked or controlled by Istio. When Istio is enabled in a namespace, the Envoy sidecar proxy will be automatically injected into all new workloads that are deployed in the namespace. This namespace setting will only affect new workloads in the namespace. Any preexisting workloads will need to be re-deployed to leverage the sidecar auto injection. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md index 070dc3a557b6..7203d827091a 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md @@ -2,6 +2,10 @@ title: 6. Generate and View Traffic --- + + + + This section describes how to view the traffic that is being managed by Istio. ## The Kiali Traffic Graph diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md index bcafaaca525a..38548a4921d5 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md @@ -2,6 +2,10 @@ title: 4. Set up the Istio Gateway --- + + + + The gateway to each cluster can have its own port or load balancer, which is unrelated to a service mesh. By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. You can use the Nginx Ingress controller with or without Istio installed. If this is the only gateway to your cluster, Istio will be able to route traffic from service to service, but Istio will not be able to receive traffic from outside the cluster. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md index 38e7ff642a30..150289745d7d 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md @@ -2,6 +2,10 @@ title: 5. Set up Istio's Components for Traffic Management --- + + + + A central advantage of traffic management in Istio is that it allows dynamic request routing. Some common applications for dynamic request routing include canary deployments and blue/green deployments. The two key resources in Istio traffic management are *virtual services* and *destination rules*. - [Virtual services](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/) intercept and direct traffic to your Kubernetes services, allowing you to divide percentages of traffic from a request to different services. You can use them to define a set of routing rules to apply when a host is addressed. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md index e885762ba9ed..af5b22672c59 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md @@ -2,6 +2,10 @@ title: 3. Add Deployments and Services with the Istio Sidecar --- + + + + :::note Prerequisite: To enable Istio for a workload, the cluster and namespace must have the Istio app installed. From bea15afa17f5af289aa3bd84a0dbbd27771cc14e Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Fri, 25 Aug 2023 14:35:52 -0400 Subject: [PATCH 21/54] canonicized cis-scan-guides (#812) --- .../configure-alerts-for-periodic-scan-on-a-schedule.md | 4 ++++ .../create-a-custom-benchmark-version-to-run.md | 4 ++++ .../enable-alerting-for-rancher-cis-benchmark.md | 4 ++++ .../cis-scan-guides/install-rancher-cis-benchmark.md | 4 ++++ .../run-a-scan-periodically-on-a-schedule.md | 4 ++++ .../advanced-user-guides/cis-scan-guides/run-a-scan.md | 4 ++++ .../advanced-user-guides/cis-scan-guides/skip-tests.md | 4 ++++ .../cis-scan-guides/uninstall-rancher-cis-benchmark.md | 4 ++++ .../advanced-user-guides/cis-scan-guides/view-reports.md | 4 ++++ .../configure-alerts-for-periodic-scan-on-a-schedule.md | 4 ++++ .../run-a-scan-periodically-on-a-schedule.md | 4 ++++ .../advanced-user-guides/cis-scan-guides/run-a-scan.md | 4 ++++ .../advanced-user-guides/cis-scan-guides/skip-tests.md | 4 ++++ .../configure-alerts-for-periodic-scan-on-a-schedule.md | 4 ++++ .../create-a-custom-benchmark-version-to-run.md | 6 +++++- .../enable-alerting-for-rancher-cis-benchmark.md | 4 ++++ .../cis-scan-guides/install-rancher-cis-benchmark.md | 4 ++++ .../run-a-scan-periodically-on-a-schedule.md | 4 ++++ .../advanced-user-guides/cis-scan-guides/run-a-scan.md | 4 ++++ .../advanced-user-guides/cis-scan-guides/skip-tests.md | 4 ++++ .../cis-scan-guides/uninstall-rancher-cis-benchmark.md | 4 ++++ .../advanced-user-guides/cis-scan-guides/view-reports.md | 4 ++++ .../configure-alerts-for-periodic-scan-on-a-schedule.md | 4 ++++ .../create-a-custom-benchmark-version-to-run.md | 4 ++++ .../enable-alerting-for-rancher-cis-benchmark.md | 4 ++++ .../cis-scan-guides/install-rancher-cis-benchmark.md | 4 ++++ .../run-a-scan-periodically-on-a-schedule.md | 4 ++++ .../advanced-user-guides/cis-scan-guides/run-a-scan.md | 4 ++++ .../advanced-user-guides/cis-scan-guides/skip-tests.md | 4 ++++ .../cis-scan-guides/uninstall-rancher-cis-benchmark.md | 4 ++++ .../advanced-user-guides/cis-scan-guides/view-reports.md | 4 ++++ .../configure-alerts-for-periodic-scan-on-a-schedule.md | 4 ++++ .../create-a-custom-benchmark-version-to-run.md | 4 ++++ .../enable-alerting-for-rancher-cis-benchmark.md | 4 ++++ .../cis-scan-guides/install-rancher-cis-benchmark.md | 4 ++++ .../run-a-scan-periodically-on-a-schedule.md | 4 ++++ .../advanced-user-guides/cis-scan-guides/run-a-scan.md | 4 ++++ .../advanced-user-guides/cis-scan-guides/skip-tests.md | 4 ++++ .../cis-scan-guides/uninstall-rancher-cis-benchmark.md | 4 ++++ .../advanced-user-guides/cis-scan-guides/view-reports.md | 4 ++++ 40 files changed, 161 insertions(+), 1 deletion(-) diff --git a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md index 43848173c89d..204f95c05bd6 100644 --- a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md +++ b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md @@ -2,6 +2,10 @@ title: Configure Alerts for Periodic Scan on a Schedule --- + + + + It is possible to run a ClusterScan on a schedule. A scheduled scan can also specify if you should receive alerts when the scan completes. diff --git a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md index a0f1d181ec80..8d3b66c7e4e6 100644 --- a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md +++ b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md @@ -2,6 +2,10 @@ title: Create a Custom Benchmark Version for Running a Cluster Scan --- + + + + There could be some Kubernetes cluster setups that require custom configurations of the Benchmark tests. For example, the path to the Kubernetes config files or certs might be different than the standard location where the upstream CIS Benchmarks look for them. It is now possible to create a custom Benchmark Version for running a cluster scan using the `rancher-cis-benchmark` application. diff --git a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md index 0a22816035d5..ef2b5ae330d9 100644 --- a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md +++ b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md @@ -2,6 +2,10 @@ title: Enable Alerting for Rancher CIS Benchmark --- + + + + Alerts can be configured to be sent out for a scan that runs on a schedule. :::note Prerequisite: diff --git a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md index 00adb621ed51..cd260325f206 100644 --- a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md +++ b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md @@ -2,6 +2,10 @@ title: Install Rancher CIS Benchmark --- + + + + 1. In the upper left corner, click **☰ > Cluster Management**. 1. On the **Clusters** page, go to the cluster where you want to install CIS Benchmark and click **Explore**. 1. In the left navigation bar, click **Apps > Charts**. diff --git a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md index ddfdd4402e66..076fbdf409b7 100644 --- a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md +++ b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md @@ -2,6 +2,10 @@ title: Run a Scan Periodically on a Schedule --- + + + + To run a ClusterScan on a schedule, 1. In the upper left corner, click **☰ > Cluster Management**. diff --git a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md index 86e1c7256e3d..2fede69bee6a 100644 --- a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md +++ b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md @@ -2,6 +2,10 @@ title: Run a Scan --- + + + + When a ClusterScan custom resource is created, it launches a new CIS scan on the cluster for the chosen ClusterScanProfile. :::note diff --git a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md index 2da3f4cf4837..7492bc03f0b8 100644 --- a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md +++ b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md @@ -2,6 +2,10 @@ title: Skip Tests --- + + + + CIS scans can be run using test profiles with user-defined skips. To skip tests, you will create a custom CIS scan profile. A profile contains the configuration for the CIS scan, which includes the benchmark versions to use and any specific tests to skip in that benchmark. diff --git a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md index 35da50a5a6e2..df23f7abbdc7 100644 --- a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md +++ b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md @@ -2,6 +2,10 @@ title: Uninstall Rancher CIS Benchmark --- + + + + 1. From the **Cluster Dashboard,** go to the left navigation bar and click **Apps > Installed Apps**. 1. Go to the `cis-operator-system` namespace and check the boxes next to `rancher-cis-benchmark-crd` and `rancher-cis-benchmark`. 1. Click **Delete** and confirm **Delete**. diff --git a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md index 0ffc0c38811c..57dc1183deed 100644 --- a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md +++ b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md @@ -2,6 +2,10 @@ title: View Reports --- + + + + To view the generated CIS scan reports, 1. In the upper left corner, click **☰ > Cluster Management**. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md index e25e87aa986e..8583edc79d3e 100644 --- a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md @@ -2,6 +2,10 @@ title: Configure Alerts for Periodic Scan on a Schedule --- + + + + Rancher provides a set of alerts for cluster scans. which are not configured to have notifiers by default: - A manual cluster scan was completed diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md index 25f9c3443861..d44c9d764b89 100644 --- a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md @@ -2,6 +2,10 @@ title: Run a Scan Periodically on a Schedule --- + + + + Recurring scans can be scheduled to run on any RKE Kubernetes cluster. To enable recurring scans, edit the advanced options in the cluster configuration during cluster creation or after the cluster has been created. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md index ad39adae73f5..a54ab122d917 100644 --- a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md @@ -2,6 +2,10 @@ title: Run a Scan --- + + + + ## Run a Scan 1. From the cluster view in Rancher, click **Tools > CIS Scans.** diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md index 78961a12c691..f59ebac5bfe6 100644 --- a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md @@ -2,6 +2,10 @@ title: Skip Tests --- + + + + You can define a set of tests that will be skipped by the CIS scan when the next report is generated. These tests will be skipped for subsequent CIS scans, including both manually triggered and scheduled scans, and the tests will be skipped with any profile. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md index fe0e742420dd..9f66864ce161 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md @@ -2,6 +2,10 @@ title: Configure Alerts for Periodic Scan on a Schedule --- + + + + It is possible to run a ClusterScan on a schedule. A scheduled scan can also specify if you should receive alerts when the scan completes. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md index 2397d4b1c5de..1a1ae2656e60 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md @@ -1,6 +1,10 @@ --- title: Create a Custom Benchmark Version for Running a Cluster Scan ---- +--- + + + + There could be some Kubernetes cluster setups that require custom configurations of the Benchmark tests. For example, the path to the Kubernetes config files or certs might be different than the standard location where the upstream CIS Benchmarks look for them. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md index ea88aa54b77b..76a35929836c 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md @@ -2,6 +2,10 @@ title: Enable Alerting for Rancher CIS Benchmark --- + + + + Alerts can be configured to be sent out for a scan that runs on a schedule. :::note Prerequisite: diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md index 76be21bf8a89..19507e105cc4 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md @@ -2,6 +2,10 @@ title: Install Rancher CIS Benchmark --- + + + + 1. In the upper left corner, click **☰ > Cluster Management**. 1. On the **Clusters** page, go to the cluster where you want to install CIS Benchmark and click **Explore**. 1. In the left navigation bar, click **Apps & Marketplace > Charts**. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md index ddfdd4402e66..076fbdf409b7 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md @@ -2,6 +2,10 @@ title: Run a Scan Periodically on a Schedule --- + + + + To run a ClusterScan on a schedule, 1. In the upper left corner, click **☰ > Cluster Management**. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md index 86e1c7256e3d..2fede69bee6a 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md @@ -2,6 +2,10 @@ title: Run a Scan --- + + + + When a ClusterScan custom resource is created, it launches a new CIS scan on the cluster for the chosen ClusterScanProfile. :::note diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md index ffdb302f9129..20129184b966 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md @@ -2,6 +2,10 @@ title: Skip Tests --- + + + + CIS scans can be run using test profiles with user-defined skips. To skip tests, you will create a custom CIS scan profile. A profile contains the configuration for the CIS scan, which includes the benchmark versions to use and any specific tests to skip in that benchmark. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md index 3a53bf20ff90..cb55249913a7 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md @@ -2,6 +2,10 @@ title: Uninstall Rancher CIS Benchmark --- + + + + 1. From the **Cluster Dashboard,** go to the left navigation bar and click **Apps & Marketplace > Installed Apps**. 1. Go to the `cis-operator-system` namespace and check the boxes next to `rancher-cis-benchmark-crd` and `rancher-cis-benchmark`. 1. Click **Delete** and confirm **Delete**. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md index 0ffc0c38811c..57dc1183deed 100644 --- a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md @@ -2,6 +2,10 @@ title: View Reports --- + + + + To view the generated CIS scan reports, 1. In the upper left corner, click **☰ > Cluster Management**. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md index 43848173c89d..204f95c05bd6 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md @@ -2,6 +2,10 @@ title: Configure Alerts for Periodic Scan on a Schedule --- + + + + It is possible to run a ClusterScan on a schedule. A scheduled scan can also specify if you should receive alerts when the scan completes. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md index a0f1d181ec80..8d3b66c7e4e6 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md @@ -2,6 +2,10 @@ title: Create a Custom Benchmark Version for Running a Cluster Scan --- + + + + There could be some Kubernetes cluster setups that require custom configurations of the Benchmark tests. For example, the path to the Kubernetes config files or certs might be different than the standard location where the upstream CIS Benchmarks look for them. It is now possible to create a custom Benchmark Version for running a cluster scan using the `rancher-cis-benchmark` application. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md index 0a22816035d5..ef2b5ae330d9 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md @@ -2,6 +2,10 @@ title: Enable Alerting for Rancher CIS Benchmark --- + + + + Alerts can be configured to be sent out for a scan that runs on a schedule. :::note Prerequisite: diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md index 5246936482cc..568aed2b24c3 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md @@ -2,6 +2,10 @@ title: Install Rancher CIS Benchmark --- + + + + diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md index ddfdd4402e66..076fbdf409b7 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md @@ -2,6 +2,10 @@ title: Run a Scan Periodically on a Schedule --- + + + + To run a ClusterScan on a schedule, 1. In the upper left corner, click **☰ > Cluster Management**. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md index 86e1c7256e3d..2fede69bee6a 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md @@ -2,6 +2,10 @@ title: Run a Scan --- + + + + When a ClusterScan custom resource is created, it launches a new CIS scan on the cluster for the chosen ClusterScanProfile. :::note diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md index 2da3f4cf4837..7492bc03f0b8 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md @@ -2,6 +2,10 @@ title: Skip Tests --- + + + + CIS scans can be run using test profiles with user-defined skips. To skip tests, you will create a custom CIS scan profile. A profile contains the configuration for the CIS scan, which includes the benchmark versions to use and any specific tests to skip in that benchmark. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md index f62692d67f33..0e0b2444a251 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md @@ -2,6 +2,10 @@ title: Uninstall Rancher CIS Benchmark --- + + + + diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md index 0ffc0c38811c..57dc1183deed 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md @@ -2,6 +2,10 @@ title: View Reports --- + + + + To view the generated CIS scan reports, 1. In the upper left corner, click **☰ > Cluster Management**. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md index 43848173c89d..204f95c05bd6 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md @@ -2,6 +2,10 @@ title: Configure Alerts for Periodic Scan on a Schedule --- + + + + It is possible to run a ClusterScan on a schedule. A scheduled scan can also specify if you should receive alerts when the scan completes. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md index a0f1d181ec80..8d3b66c7e4e6 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md @@ -2,6 +2,10 @@ title: Create a Custom Benchmark Version for Running a Cluster Scan --- + + + + There could be some Kubernetes cluster setups that require custom configurations of the Benchmark tests. For example, the path to the Kubernetes config files or certs might be different than the standard location where the upstream CIS Benchmarks look for them. It is now possible to create a custom Benchmark Version for running a cluster scan using the `rancher-cis-benchmark` application. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md index 0a22816035d5..ef2b5ae330d9 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md @@ -2,6 +2,10 @@ title: Enable Alerting for Rancher CIS Benchmark --- + + + + Alerts can be configured to be sent out for a scan that runs on a schedule. :::note Prerequisite: diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md index 00adb621ed51..cd260325f206 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md @@ -2,6 +2,10 @@ title: Install Rancher CIS Benchmark --- + + + + 1. In the upper left corner, click **☰ > Cluster Management**. 1. On the **Clusters** page, go to the cluster where you want to install CIS Benchmark and click **Explore**. 1. In the left navigation bar, click **Apps > Charts**. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md index ddfdd4402e66..076fbdf409b7 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md @@ -2,6 +2,10 @@ title: Run a Scan Periodically on a Schedule --- + + + + To run a ClusterScan on a schedule, 1. In the upper left corner, click **☰ > Cluster Management**. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md index 86e1c7256e3d..2fede69bee6a 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md @@ -2,6 +2,10 @@ title: Run a Scan --- + + + + When a ClusterScan custom resource is created, it launches a new CIS scan on the cluster for the chosen ClusterScanProfile. :::note diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md index 2da3f4cf4837..7492bc03f0b8 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md @@ -2,6 +2,10 @@ title: Skip Tests --- + + + + CIS scans can be run using test profiles with user-defined skips. To skip tests, you will create a custom CIS scan profile. A profile contains the configuration for the CIS scan, which includes the benchmark versions to use and any specific tests to skip in that benchmark. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md index 35da50a5a6e2..df23f7abbdc7 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md @@ -2,6 +2,10 @@ title: Uninstall Rancher CIS Benchmark --- + + + + 1. From the **Cluster Dashboard,** go to the left navigation bar and click **Apps > Installed Apps**. 1. Go to the `cis-operator-system` namespace and check the boxes next to `rancher-cis-benchmark-crd` and `rancher-cis-benchmark`. 1. Click **Delete** and confirm **Delete**. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md index 0ffc0c38811c..57dc1183deed 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md @@ -2,6 +2,10 @@ title: View Reports --- + + + + To view the generated CIS scan reports, 1. In the upper left corner, click **☰ > Cluster Management**. From 36f84327ea68a6f0884c6542370e6c7e475ffcfb Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Fri, 25 Aug 2023 15:57:47 -0400 Subject: [PATCH 22/54] canonicized advanced user guides (#813) --- .../configure-layer-7-nginx-load-balancer.md | 4 ++++ .../advanced-user-guides/enable-api-audit-log.md | 4 ++++ .../enable-experimental-features/continuous-delivery.md | 4 ++++ .../istio-traffic-management-features.md | 4 ++++ .../enable-experimental-features/rancher-on-arm64.md | 4 ++++ .../unsupported-storage-drivers.md | 4 ++++ .../advanced-user-guides/open-ports-with-firewalld.md | 4 ++++ .../advanced-user-guides/tune-etcd-for-large-installs.md | 4 ++++ .../configure-layer-7-nginx-load-balancer.md | 4 ++++ .../advanced-user-guides/enable-api-audit-log.md | 4 ++++ .../enable-experimental-features/continuous-delivery.md | 4 ++++ .../istio-traffic-management-features.md | 4 ++++ .../enable-experimental-features/rancher-on-arm64.md | 4 ++++ .../unsupported-storage-drivers.md | 4 ++++ .../advanced-user-guides/open-ports-with-firewalld.md | 4 ++++ .../advanced-user-guides/tune-etcd-for-large-installs.md | 4 ++++ .../configure-layer-7-nginx-load-balancer.md | 4 ++++ .../advanced-user-guides/enable-api-audit-log.md | 4 ++++ .../enable-experimental-features/continuous-delivery.md | 4 ++++ .../istio-traffic-management-features.md | 4 ++++ .../enable-experimental-features/rancher-on-arm64.md | 4 ++++ .../unsupported-storage-drivers.md | 4 ++++ .../advanced-user-guides/open-ports-with-firewalld.md | 4 ++++ .../advanced-user-guides/tune-etcd-for-large-installs.md | 4 ++++ 24 files changed, 96 insertions(+) diff --git a/docs/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md b/docs/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md index 84b14758b358..98ea1abfb3e1 100644 --- a/docs/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md +++ b/docs/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md @@ -2,6 +2,10 @@ title: Docker Install with TLS Termination at Layer-7 NGINX Load Balancer --- + + + + For development and testing environments that have a special requirement to terminate TLS/SSL at a load balancer instead of your Rancher Server container, deploy Rancher and configure a load balancer to work with it conjunction. A layer-7 load balancer can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. diff --git a/docs/how-to-guides/advanced-user-guides/enable-api-audit-log.md b/docs/how-to-guides/advanced-user-guides/enable-api-audit-log.md index bd0dc8ab5d65..0697f327280a 100644 --- a/docs/how-to-guides/advanced-user-guides/enable-api-audit-log.md +++ b/docs/how-to-guides/advanced-user-guides/enable-api-audit-log.md @@ -2,6 +2,10 @@ title: Enabling the API Audit Log to Record System Events --- + + + + You can enable the API audit log to record the sequence of system events initiated by individual users. You can know what happened, when it happened, who initiated it, and what cluster it affected. When you enable this feature, all requests to the Rancher API and all responses from it are written to a log. You can enable API Auditing during Rancher installation or upgrade. diff --git a/docs/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md b/docs/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md index c6bb47180775..ccc1c4f4bffe 100644 --- a/docs/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md +++ b/docs/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md @@ -2,6 +2,10 @@ title: Continuous Delivery --- + + + + [Fleet](../../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md) comes preinstalled in Rancher can't be fully disabled. However, the Fleet feature for GitOps continuous delivery may be disabled using the `continuous-delivery` feature flag. To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.](../../../pages-for-subheaders/enable-experimental-features.md) diff --git a/docs/how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md b/docs/how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md index aba1c8c49b37..d34b03f676de 100644 --- a/docs/how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md +++ b/docs/how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md @@ -2,6 +2,10 @@ title: UI for Istio Virtual Services and Destination Rules --- + + + + This feature enables a UI that lets you create, read, update and delete virtual services and destination rules, which are traffic management features of Istio. > **Prerequisite:** Turning on this feature does not enable Istio. A cluster administrator needs to [enable Istio for the cluster](../../../pages-for-subheaders/istio-setup-guide.md) in order to use the feature. diff --git a/docs/how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md b/docs/how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md index b7bc98cab015..5c86ee60a662 100644 --- a/docs/how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md +++ b/docs/how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md @@ -2,6 +2,10 @@ title: "Running on ARM64 (Experimental)" --- + + + + :::caution Running on an ARM64 platform is currently an experimental feature and is not yet officially supported in Rancher. Therefore, we do not recommend using ARM64 based nodes in a production environment. diff --git a/docs/how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md b/docs/how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md index a5d5578df8d8..284c3f23bd91 100644 --- a/docs/how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md +++ b/docs/how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md @@ -2,6 +2,10 @@ title: Allow Unsupported Storage Drivers --- + + + + This feature allows you to use types for storage providers and provisioners that are not enabled by default. To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.](../../../pages-for-subheaders/enable-experimental-features.md) diff --git a/docs/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md b/docs/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md index bda07e5ecc6e..90ba4bd4e8ce 100644 --- a/docs/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md +++ b/docs/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md @@ -2,6 +2,10 @@ title: Opening Ports with firewalld --- + + + + > We recommend disabling firewalld. For Kubernetes 1.19.x and higher, firewalld must be turned off. Some distributions of Linux [derived from RHEL,](https://en.wikipedia.org/wiki/Red_Hat_Enterprise_Linux#Rebuilds) including Oracle Linux, may have default firewall rules that block communication with Helm. diff --git a/docs/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md b/docs/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md index 28ba391c4030..e024f1dd7795 100644 --- a/docs/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md +++ b/docs/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md @@ -2,6 +2,10 @@ title: Tuning etcd for Large Installations --- + + + + When running larger Rancher installations with 15 or more clusters it is recommended to increase the default keyspace for etcd from the default 2GB. The maximum setting is 8GB and the host should have enough RAM to keep the entire dataset in memory. When increasing this value you should also increase the size of the host. The keyspace size can also be adjusted in smaller installations if you anticipate a high rate of change of pods during the garbage collection interval. The etcd data set is automatically cleaned up on a five minute interval by Kubernetes. There are situations, e.g. deployment thrashing, where enough events could be written to etcd and deleted before garbage collection occurs and cleans things up causing the keyspace to fill up. If you see `mvcc: database space exceeded` errors, in the etcd logs or Kubernetes API server logs, you should consider increasing the keyspace size. This can be accomplished by setting the [quota-backend-bytes](https://etcd.io/docs/v3.4.0/op-guide/maintenance/#space-quota) setting on the etcd servers. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md index 0c5043ea9309..ce785ccee1c5 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md @@ -2,6 +2,10 @@ title: Docker Install with TLS Termination at Layer-7 NGINX Load Balancer --- + + + + For development and testing environments that have a special requirement to terminate TLS/SSL at a load balancer instead of your Rancher Server container, deploy Rancher and configure a load balancer to work with it conjunction. A layer-7 load balancer can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/enable-api-audit-log.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/enable-api-audit-log.md index bd0dc8ab5d65..0697f327280a 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/enable-api-audit-log.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/enable-api-audit-log.md @@ -2,6 +2,10 @@ title: Enabling the API Audit Log to Record System Events --- + + + + You can enable the API audit log to record the sequence of system events initiated by individual users. You can know what happened, when it happened, who initiated it, and what cluster it affected. When you enable this feature, all requests to the Rancher API and all responses from it are written to a log. You can enable API Auditing during Rancher installation or upgrade. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md index b9c7a1383ee3..c954513cbc6e 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md @@ -2,6 +2,10 @@ title: Continuous Delivery --- + + + + As of Rancher v2.5, [Fleet](../../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md) comes preinstalled in Rancher, and as of Rancher v2.6, Fleet can no longer be fully disabled. However, the Fleet feature for GitOps continuous delivery may be disabled using the `continuous-delivery` feature flag. To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.](../../../pages-for-subheaders/enable-experimental-features.md) diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md index aba1c8c49b37..d34b03f676de 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md @@ -2,6 +2,10 @@ title: UI for Istio Virtual Services and Destination Rules --- + + + + This feature enables a UI that lets you create, read, update and delete virtual services and destination rules, which are traffic management features of Istio. > **Prerequisite:** Turning on this feature does not enable Istio. A cluster administrator needs to [enable Istio for the cluster](../../../pages-for-subheaders/istio-setup-guide.md) in order to use the feature. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md index b7bc98cab015..5c86ee60a662 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md @@ -2,6 +2,10 @@ title: "Running on ARM64 (Experimental)" --- + + + + :::caution Running on an ARM64 platform is currently an experimental feature and is not yet officially supported in Rancher. Therefore, we do not recommend using ARM64 based nodes in a production environment. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md index a5d5578df8d8..284c3f23bd91 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md @@ -2,6 +2,10 @@ title: Allow Unsupported Storage Drivers --- + + + + This feature allows you to use types for storage providers and provisioners that are not enabled by default. To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.](../../../pages-for-subheaders/enable-experimental-features.md) diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md index bda07e5ecc6e..90ba4bd4e8ce 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md @@ -2,6 +2,10 @@ title: Opening Ports with firewalld --- + + + + > We recommend disabling firewalld. For Kubernetes 1.19.x and higher, firewalld must be turned off. Some distributions of Linux [derived from RHEL,](https://en.wikipedia.org/wiki/Red_Hat_Enterprise_Linux#Rebuilds) including Oracle Linux, may have default firewall rules that block communication with Helm. diff --git a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md index 28ba391c4030..e024f1dd7795 100644 --- a/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md +++ b/versioned_docs/version-2.6/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md @@ -2,6 +2,10 @@ title: Tuning etcd for Large Installations --- + + + + When running larger Rancher installations with 15 or more clusters it is recommended to increase the default keyspace for etcd from the default 2GB. The maximum setting is 8GB and the host should have enough RAM to keep the entire dataset in memory. When increasing this value you should also increase the size of the host. The keyspace size can also be adjusted in smaller installations if you anticipate a high rate of change of pods during the garbage collection interval. The etcd data set is automatically cleaned up on a five minute interval by Kubernetes. There are situations, e.g. deployment thrashing, where enough events could be written to etcd and deleted before garbage collection occurs and cleans things up causing the keyspace to fill up. If you see `mvcc: database space exceeded` errors, in the etcd logs or Kubernetes API server logs, you should consider increasing the keyspace size. This can be accomplished by setting the [quota-backend-bytes](https://etcd.io/docs/v3.4.0/op-guide/maintenance/#space-quota) setting on the etcd servers. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md index 84b14758b358..98ea1abfb3e1 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md @@ -2,6 +2,10 @@ title: Docker Install with TLS Termination at Layer-7 NGINX Load Balancer --- + + + + For development and testing environments that have a special requirement to terminate TLS/SSL at a load balancer instead of your Rancher Server container, deploy Rancher and configure a load balancer to work with it conjunction. A layer-7 load balancer can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-api-audit-log.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-api-audit-log.md index bd0dc8ab5d65..0697f327280a 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-api-audit-log.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-api-audit-log.md @@ -2,6 +2,10 @@ title: Enabling the API Audit Log to Record System Events --- + + + + You can enable the API audit log to record the sequence of system events initiated by individual users. You can know what happened, when it happened, who initiated it, and what cluster it affected. When you enable this feature, all requests to the Rancher API and all responses from it are written to a log. You can enable API Auditing during Rancher installation or upgrade. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md index c6bb47180775..ccc1c4f4bffe 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md @@ -2,6 +2,10 @@ title: Continuous Delivery --- + + + + [Fleet](../../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md) comes preinstalled in Rancher can't be fully disabled. However, the Fleet feature for GitOps continuous delivery may be disabled using the `continuous-delivery` feature flag. To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.](../../../pages-for-subheaders/enable-experimental-features.md) diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md index aba1c8c49b37..d34b03f676de 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md @@ -2,6 +2,10 @@ title: UI for Istio Virtual Services and Destination Rules --- + + + + This feature enables a UI that lets you create, read, update and delete virtual services and destination rules, which are traffic management features of Istio. > **Prerequisite:** Turning on this feature does not enable Istio. A cluster administrator needs to [enable Istio for the cluster](../../../pages-for-subheaders/istio-setup-guide.md) in order to use the feature. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md index b7bc98cab015..5c86ee60a662 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md @@ -2,6 +2,10 @@ title: "Running on ARM64 (Experimental)" --- + + + + :::caution Running on an ARM64 platform is currently an experimental feature and is not yet officially supported in Rancher. Therefore, we do not recommend using ARM64 based nodes in a production environment. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md index a5d5578df8d8..284c3f23bd91 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md @@ -2,6 +2,10 @@ title: Allow Unsupported Storage Drivers --- + + + + This feature allows you to use types for storage providers and provisioners that are not enabled by default. To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.](../../../pages-for-subheaders/enable-experimental-features.md) diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md index bda07e5ecc6e..90ba4bd4e8ce 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md @@ -2,6 +2,10 @@ title: Opening Ports with firewalld --- + + + + > We recommend disabling firewalld. For Kubernetes 1.19.x and higher, firewalld must be turned off. Some distributions of Linux [derived from RHEL,](https://en.wikipedia.org/wiki/Red_Hat_Enterprise_Linux#Rebuilds) including Oracle Linux, may have default firewall rules that block communication with Helm. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md index 28ba391c4030..e024f1dd7795 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md @@ -2,6 +2,10 @@ title: Tuning etcd for Large Installations --- + + + + When running larger Rancher installations with 15 or more clusters it is recommended to increase the default keyspace for etcd from the default 2GB. The maximum setting is 8GB and the host should have enough RAM to keep the entire dataset in memory. When increasing this value you should also increase the size of the host. The keyspace size can also be adjusted in smaller installations if you anticipate a high rate of change of pods during the garbage collection interval. The etcd data set is automatically cleaned up on a five minute interval by Kubernetes. There are situations, e.g. deployment thrashing, where enough events could be written to etcd and deleted before garbage collection occurs and cleans things up causing the keyspace to fill up. If you see `mvcc: database space exceeded` errors, in the etcd logs or Kubernetes API server logs, you should consider increasing the keyspace size. This can be accomplished by setting the [quota-backend-bytes](https://etcd.io/docs/v3.4.0/op-guide/maintenance/#space-quota) setting on the etcd servers. From 162d410af1cbef707b9382dcb687331d6a0cdc6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Lindh=C3=A9?= Date: Fri, 25 Aug 2023 22:47:45 +0200 Subject: [PATCH 23/54] Fix typo: vSphere port is required. (#802) * Fix typo: vSphere port is required. * Update versioned_docs/version-2.7/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md --------- Co-authored-by: Marty Hernandez Avedon --- .../node-template-configuration/vsphere.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/versioned_docs/version-2.7/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md b/versioned_docs/version-2.7/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md index 616b0d419e1c..226532a20b04 100644 --- a/versioned_docs/version-2.7/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md +++ b/versioned_docs/version-2.7/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md @@ -13,7 +13,7 @@ Your cloud credential has these fields: | Credential Field | Description | |-----------------|--------------| | vCenter or ESXi Server | Enter the vCenter or ESXi hostname/IP. ESXi is the virtualization platform where you create and run virtual machines and virtual appliances. vCenter Server is the service through which you manage multiple hosts connected in a network and pool host resources. | -| Port | Optional: configure configure the port of the vCenter or ESXi server. | +| Port | Configure the port of the vCenter or ESXi server. | | Username and password | Enter your vSphere login username and password. | ## Scheduling @@ -92,4 +92,4 @@ In the **Engine Options** section of the node template, you can configure the co If you're provisioning Red Hat Enterprise Linux (RHEL) or CentOS nodes, leave the **Docker Install URL** field as the default value, or select **none**. This will bypass a check for Docker installation, as Docker is already installed on these node types. If you set **Docker Install URL** to a value other than the default or **none**, you might see an error message such as the following: `Error creating machine: RHEL ssh command error: command: sudo -E yum install -y curl err: exit status 1 output: Updating Subscription Management repositories.` -::: \ No newline at end of file +::: From b36876110e056be68c37b5c6d834f1465195d90e Mon Sep 17 00:00:00 2001 From: Andy Pitcher Date: Mon, 4 Sep 2023 10:15:24 -0400 Subject: [PATCH 24/54] Update docs/pages-for-subheaders/k3s-hardening-guide.md Co-authored-by: Guilherme Macedo --- docs/pages-for-subheaders/k3s-hardening-guide.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/pages-for-subheaders/k3s-hardening-guide.md b/docs/pages-for-subheaders/k3s-hardening-guide.md index 17d79026580d..0d46e5112362 100644 --- a/docs/pages-for-subheaders/k3s-hardening-guide.md +++ b/docs/pages-for-subheaders/k3s-hardening-guide.md @@ -12,7 +12,9 @@ This hardening guide is intended to be used for K3s clusters and is associated w | Rancher Version | CIS Benchmark Version | Kubernetes Version | |-----------------|-----------------------|------------------------------| -| Rancher v2.7 | Benchmark v1.7 | Kubernetes v1.25 | +| Rancher v2.7 | Benchmark v1.23 | Kubernetes v1.23 | +| Rancher v2.7 | Benchmark v1.24 | Kubernetes v1.24 | +| Rancher v2.7 | Benchmark v1.7 | Kubernetes v1.25 up to v1.26 | :::note - In Benchmark v1.24 and later, some check ids might fail due to new file permission requirements (600 instead of 644). Impacted check ids: `1.1.15`, `1.1.17` and `4.1.15`. From 4ca8cfada38f38908ba6e186fb874b5d8fbb275d Mon Sep 17 00:00:00 2001 From: Andy Pitcher Date: Mon, 4 Sep 2023 10:27:34 -0400 Subject: [PATCH 25/54] Update docs/pages-for-subheaders/rke1-hardening-guide.md Co-authored-by: Guilherme Macedo --- docs/pages-for-subheaders/rke1-hardening-guide.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/pages-for-subheaders/rke1-hardening-guide.md b/docs/pages-for-subheaders/rke1-hardening-guide.md index d842af828594..783d4c85a3e5 100644 --- a/docs/pages-for-subheaders/rke1-hardening-guide.md +++ b/docs/pages-for-subheaders/rke1-hardening-guide.md @@ -12,7 +12,9 @@ This hardening guide is intended to be used for RKE clusters and is associated w | Rancher Version | CIS Benchmark Version | Kubernetes Version | |-----------------|-----------------------|------------------------------| -| Rancher v2.7 | Benchmark v1.7 | Kubernetes v1.24 up to v1.25 | +| Rancher v2.7 | Benchmark v1.23 | Kubernetes v1.23 | +| Rancher v2.7 | Benchmark v1.24 | Kubernetes v1.24 | +| Rancher v2.7 | Benchmark v1.7 | Kubernetes v1.25 up to v1.26 | :::note - Since Benchmark v1.24, check id `4.1.7 Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)` might fail, as /etc/kubernetes/ssl/kube-ca.pem is provisioned in 644 by default. From 5274e4e00068e7b6e26767e8b19a9ae82c64d1aa Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Wed, 6 Sep 2023 13:33:36 -0400 Subject: [PATCH 26/54] #726 - Getting Started UI instructions added to docs (#739) * added ui instructions for cluster list to access-clusters * re-org access-clusters so that 'Cluster List' becomes a searchable heading * rewording * added instructions and links for Global Settings * added global config + retitle/rephrase retitle to disambiguate global settings from global config listed some relevant global config items that need more info * correction: updated access-clusters page in /docs, not /versioned_docs * added Nodes UI instructions * download kubeconfig UI instructions added/modified one file contained correct instructions but they were somewhat convoluted, as you can download the file from a page mentioned earlier in the suggested instructions and the button thery describe is an icon, not labeled with any text * restoring formatting * updated pathway for image list downloads * finding rancher-images.txt * added instructions for Apps Marketplace * link pointing to v1 monitoring * link pointing to v1 dashboards * added links for v1 notifiers, logging tools * global dns entries, global dns providers, catalog links added * Rancher portal > Rancher UI * updated access-clusters page * rm note & add third way to access cluster dashboard * heading levels * addressed comments in global-configuration * addressed comments for nodes-and-node-pools * addressed comment on authorized-cluster-endpoint * rm'd create apps steps * updated kubeconfig download instructions * adjusting headers * updated images list instructions as version # is now an About link * restored intro to monitoring-and-alerting * KubeConfig > kubeconfig (except for naming the UI label) * left navigation sidebar > left navigation * Update docs/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md Co-authored-by: Billy Tat * applied changes to v2.7 docs --------- Co-authored-by: Billy Tat --- ...de-kubernetes-without-upgrading-rancher.md | 5 ++- .../helm-charts-in-rancher/create-apps.md | 4 -- .../authorized-cluster-endpoint.md | 13 ++++-- .../use-kubectl-and-kubeconfig.md | 7 ++-- .../manage-clusters/nodes-and-node-pools.md | 9 +++- .../built-in-dashboards.md | 6 ++- docs/pages-for-subheaders/access-clusters.md | 42 ++++++++++++++++--- ...on-permissions-and-global-configuration.md | 34 +++++++++++++-- .../monitoring-and-alerting.md | 9 ++-- .../pages-for-subheaders/access-clusters.md | 1 - ...de-kubernetes-without-upgrading-rancher.md | 5 ++- .../helm-charts-in-rancher/create-apps.md | 4 -- .../authorized-cluster-endpoint.md | 13 ++++-- .../use-kubectl-and-kubeconfig.md | 7 ++-- .../manage-clusters/nodes-and-node-pools.md | 9 +++- .../built-in-dashboards.md | 6 ++- .../pages-for-subheaders/access-clusters.md | 40 +++++++++++++++--- ...on-permissions-and-global-configuration.md | 34 +++++++++++++-- .../monitoring-and-alerting.md | 9 ++-- 19 files changed, 207 insertions(+), 50 deletions(-) diff --git a/docs/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md b/docs/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md index 0bcba1a22a74..42c0b6348a2a 100644 --- a/docs/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md +++ b/docs/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md @@ -84,7 +84,10 @@ To sync Rancher with a local mirror of the RKE metadata, an administrator would After new Kubernetes versions are loaded into the Rancher setup, additional steps would be required in order to use them for launching clusters. Rancher needs access to updated system images. While the metadata settings can only be changed by administrators, any user can download the Rancher system images and prepare a private container image registry for them. -1. To download the system images for the private registry, click the Rancher server version at the bottom left corner of the Rancher UI. +To download the system images for the private registry: + +1. Click **☰** in the top left corner. +1. Click **About** at the bottom of the left navigation. 1. Download the OS specific image lists for Linux or Windows. 1. Download `rancher-images.txt`. 1. Prepare the private registry using the same steps during the [air gap install](other-installation-methods/air-gapped-helm-cli-install/publish-images.md), but instead of using the `rancher-images.txt` from the releases page, use the one obtained from the previous steps. diff --git a/docs/how-to-guides/new-user-guides/helm-charts-in-rancher/create-apps.md b/docs/how-to-guides/new-user-guides/helm-charts-in-rancher/create-apps.md index b74d1d7d4e92..bc5cc83d471d 100644 --- a/docs/how-to-guides/new-user-guides/helm-charts-in-rancher/create-apps.md +++ b/docs/how-to-guides/new-user-guides/helm-charts-in-rancher/create-apps.md @@ -6,16 +6,12 @@ title: Creating Apps -Rancher's App Marketplace is based on Helm Repositories and Helm Charts. You can add HTTP based standard Helm Repositories as well as any Git Repository which contains charts. - :::tip For a complete walkthrough of developing charts, see the [Chart Template Developer's Guide](https://helm.sh/docs/chart_template_guide/) in the official Helm documentation. ::: - - ## Chart Types Rancher supports two different types of charts: Helm charts and Rancher charts. diff --git a/docs/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md b/docs/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md index 980c0cfa978f..543ed4d10b40 100644 --- a/docs/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md +++ b/docs/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md @@ -8,11 +8,18 @@ title: How the Authorized Cluster Endpoint Works This section describes how the kubectl CLI, the kubeconfig file, and the authorized cluster endpoint work together to allow you to access a downstream Kubernetes cluster directly, without authenticating through the Rancher server. It is intended to provide background information and context to the instructions for [how to set up kubectl to directly access a cluster.](use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) -### About the kubeconfig File +### About the Kubeconfig File -The _kubeconfig file_ is a file used to configure access to Kubernetes when used in conjunction with the kubectl command line tool (or other clients). +The kubeconfig file is used to configure access to Kubernetes when used in conjunction with the kubectl command line tool (or other clients). -This kubeconfig file and its contents are specific to the cluster you are viewing. It can be downloaded from the cluster view in Rancher. You will need a separate kubeconfig file for each cluster that you have access to in Rancher. +The kubeconfig file and its contents are specific to each cluster. It can be downloaded from the **Clusters** page in Rancher: + +1. Click **☰** in the top left corner. +1. Select **Cluster Management**. +1. Find the cluster whose kubeconfig you want to download, and select **⁝** at the end of the row. +1. Select **Download KubeConfig** from the submenu. + +You will need a separate kubeconfig file for each cluster that you have access to in Rancher. After you download the kubeconfig file, you will be able to use the kubeconfig file and its Kubernetes [contexts](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#kubectl-context-and-configuration) to access your downstream cluster. diff --git a/docs/how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md b/docs/how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md index de70bb8e3421..6099a7d33d34 100644 --- a/docs/how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md +++ b/docs/how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md @@ -31,9 +31,10 @@ These instructions assume that you have already created a Kubernetes cluster, an ::: -1. Log into Rancher. Click **☰ > Cluster Management**. -1. Go to the cluster that you want to access with kubectl and click **Explore**. -1. In the top navigation bar, click **Download KubeConfig** button. +1. Click **☰** in the top left corner. +1. Select **Cluster Management**. +1. Find the cluster whose kubeconfig you want to download, and select **⁝** at the end of the row. +1. Select **Download KubeConfig** from the submenu. 1. Save the YAML file on your local computer. Move the file to `~/.kube/config`. Note: The default location that kubectl uses for the kubeconfig file is `~/.kube/config`, but you can use any directory and specify it using the `--kubeconfig` flag, as in this command: ``` kubectl --kubeconfig /custom/path/kube.config get pods diff --git a/docs/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md b/docs/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md index e76f3694228b..2e7f151e5c22 100644 --- a/docs/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md +++ b/docs/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md @@ -6,7 +6,14 @@ title: Nodes and Node Pools -After you launch a Kubernetes cluster in Rancher, you can manage individual nodes from the cluster's **Node** tab. Depending on the [option used](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) to provision the cluster, there are different node options available. +After you launch a Kubernetes cluster in Rancher, you can manage individual nodes from the cluster's **Node** tab. + +1. Click **☰** in the top left corner. +1. Select **Cluster Management**. +1. Find the cluster whose nodes you want to manage, and click the **Explore** button at the end of the row. +1. Select **Nodes** from the left navigation. + +Depending on the [option used](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) to provision the cluster, there are different node options available. :::note diff --git a/docs/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md b/docs/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md index d5fd42aa05e0..83bf8987e16b 100644 --- a/docs/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md +++ b/docs/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md @@ -110,4 +110,8 @@ You can also see the rules in the Prometheus UI: ![PrometheusRules UI](/img/prometheus-rules-ui.png) -For more information on configuring PrometheusRules in Rancher, see [this page.](../../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md) \ No newline at end of file +For more information on configuring PrometheusRules in Rancher, see [this page.](../../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md) + +## Legacy UI + +For information on the dashboards available in v2.2 to v2.4 of Rancher, before the introduction of the `rancher-monitoring` application, see the [Rancher v2.0—v2.4 docs](../../versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/viewing-metrics.md). diff --git a/docs/pages-for-subheaders/access-clusters.md b/docs/pages-for-subheaders/access-clusters.md index 04edb86702c0..43ea26c3cab3 100644 --- a/docs/pages-for-subheaders/access-clusters.md +++ b/docs/pages-for-subheaders/access-clusters.md @@ -14,22 +14,52 @@ For more information on roles-based access control, see [this section.](manage-r For information on how to set up an authentication system, see [this section.](authentication-config.md) +## Clusters in Rancher UI -### Rancher UI +There are several paths to view and manage clusters through the Rancher UI. -Rancher provides an intuitive user interface for interacting with your clusters. All options available in the UI use the Rancher API. Therefore any action possible in the UI is also possible in the Rancher CLI or Rancher API. +### Clusters Page -### kubectl +You can access the **Clusters** page from the **☰** menu: + +1. Click **☰**. +1. Select **Cluster Management**. + +You can also access the **Clusters** page by clicking the **Manage** button above the clusters table on the Rancher UI **Home** page. + +On the **Clusters** page, select **⁝** at the end of each row to view a submenu with the following options: + +* [Kubectl Shell](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) +* Download KubeConfig +* Copy KubeConfig to Clipboard +* Edit Config +* View YAML +* Download YAML + +### Cluster Dashboard + +On the **Clusters** page, select the **Explore** button at the end of each row to view that cluster's **Cluster Dashboard**. You can also view the dashboard by clicking the name of a cluster in the table, then clicking the **Explore** buttton on the **Cluster** page. + +The **Cluster Dashboard** is also accessible from the Rancher UI **Home** page, by clicking on the name of a cluster. + +You can also access the **Cluster Dashboard** from the **☰** in the top navigation bar: + +1. Click **☰**. +1. Select the name of a cluster from the **Explore Cluster** menu option. + +The **Cluster Dashboard** lists information about a specific cluster, such as number of nodes, memory usage, events, and resources. + +## kubectl You can use the Kubernetes command-line tool, [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/), to manage your clusters. You have two options for using kubectl: - **Rancher kubectl shell:** Interact with your clusters by launching a kubectl shell available in the Rancher UI. This option requires no configuration actions on your part. For more information, see [Accessing Clusters with kubectl Shell](../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md). - **Terminal remote connection:** You can also interact with your clusters by installing [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your local desktop and then copying the cluster's kubeconfig file to your local `~/.kube/config` directory. For more information, see [Accessing Clusters with kubectl and a kubeconfig File](../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md). -### Rancher CLI +## Rancher CLI You can control your clusters by downloading Rancher's own command-line interface, [Rancher CLI](cli-with-rancher.md). This CLI tool can interact directly with different clusters and projects or pass them `kubectl` commands. -### Rancher API +## Rancher API -Finally, you can interact with your clusters over the Rancher API. Before you use the API, you must obtain an [API key](../reference-guides/user-settings/api-keys.md). To view the different resource fields and actions for an API object, open the API UI, which can be accessed by clicking on **View in API** for any Rancher UI object. \ No newline at end of file +Finally, you can interact with your clusters over the Rancher API. Before you use the API, you must obtain an [API key](../reference-guides/user-settings/api-keys.md). To view the different resource fields and actions for an API object, open the API UI, which can be accessed by clicking on **View in API** for any Rancher UI object. diff --git a/docs/pages-for-subheaders/authentication-permissions-and-global-configuration.md b/docs/pages-for-subheaders/authentication-permissions-and-global-configuration.md index 2df94806af94..98494a94cf90 100644 --- a/docs/pages-for-subheaders/authentication-permissions-and-global-configuration.md +++ b/docs/pages-for-subheaders/authentication-permissions-and-global-configuration.md @@ -1,5 +1,5 @@ --- -title: Authentication, Permissions and Global Configuration +title: Authentication, Permissions and Global Settings --- @@ -52,6 +52,34 @@ Rancher Kubernetes Metadata contains Kubernetes version information which Ranche For more information on how metadata works and how to configure metadata config, see [Rancher Kubernetes Metadata](../getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md). -## Enabling Experimental Features +## Global Settings -Rancher includes some features that are experimental and disabled by default. Feature flags were introduced to allow you to try these features. For more information, refer to the section about [feature flags.](enable-experimental-features.md) +Options that control certain global-level Rancher settings are available from the top navigation bar. + +Click **☰** in the top left corner, then select **Global Settings**, to view and configure the following settings: + +- **Settings**: Various Rancher defaults, such as the minimum length for a user's password (`password-min-length`). You should be cautious when modifying these settings, as invalid values may break your Rancher installation. +- **Feature Flags**: Rancher features that can be toggled on or off. Some of these flags are for [experimental features](#enabling-experimental-features). +- **Banners**: Elements you can add to fixed locations on the portal. For example, you can use these options to [set a custom banner](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/custom-branding.md#fixed-banners) for users when they login to Rancher. +- **Branding**: Rancher UI design elements that you can [customize](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/custom-branding.md). You can add a custom logo or favicon, and modify UI colors. +- **Performance**: Performance settings for the Rancher UI, such as incremental resource loading. +- **Home Links**: Links displayed on the Rancher UI **Home** page. You can modify visibility for the default links or add your own links. + +### Enabling Experimental Features + +Rancher includes some features that are experimental and/or disabled by default. Feature flags allow you to enable these features. For more information, refer to the section about [feature flags.](enable-experimental-features.md) + +### Global Configuration + +**Global Configuration** options aren't visible unless you activate the **legacy** [feature flag](enable-experimental-features.md). The **legacy** flag is disabled by default on fresh Rancher installs of v2.6 and later. If you upgrade from an earlier Rancher version, or activate the **legacy** feature flag on Rancher v2.6 and later, **Global Configuration** is available from the top navigation menu: + +1. Click **☰** in the top left corner. +1. Select **Global Configuration** from the **Legacy Apps**. + +The following features are available under **Global Configuration**: + +- **Catalogs** +- **Global DNS Entries** +- **Global DNS Providers** + +As these are legacy features, please see the Rancher v2.0—v2.4 docs on [catalogs](../../versioned_docs/v2.0-v2.4/pages-for-subheaders/helm-charts-in-rancher.md), [global DNS entries](../../versioned_docs/v2.0-v2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#adding-a-global-dns-entry), and [global DNS providers](../../versioned_docs/v2.0-v2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#editing-a-global-dns-provider) for more details. \ No newline at end of file diff --git a/docs/pages-for-subheaders/monitoring-and-alerting.md b/docs/pages-for-subheaders/monitoring-and-alerting.md index 773796fc0959..d9d00e1aa4eb 100644 --- a/docs/pages-for-subheaders/monitoring-and-alerting.md +++ b/docs/pages-for-subheaders/monitoring-and-alerting.md @@ -7,8 +7,13 @@ description: Prometheus lets you view metrics from your different Rancher and Ku -Using the `rancher-monitoring` application, you can quickly deploy leading open-source monitoring and alerting solutions onto your cluster. +The `rancher-monitoring` application can quickly deploy leading open-source monitoring and alerting solutions onto your cluster. + +Introduced in Rancher v2.5, the application is powered by [Prometheus](https://prometheus.io/), [Grafana](https://grafana.com/grafana/), [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/), the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator), and the [Prometheus adapter.](https://github.com/DirectXMan12/k8s-prometheus-adapter) +For information on V1 monitoring and alerting, available in Rancher v2.2 up to v2.4, please see the Rancher v2.0—v2.4 docs on [cluster monitoring](../../versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-monitoring.md), [alerting](../../versioned_docs/v2.0-v2.4/pages-for-subheaders/cluster-alerts.md), [notifiers](../../versioned_docs/v2.0-v2.4/explanations/integrations-in-rancher/notifiers) and other [tools](../../versioned_docs/v2.0-v2.4/pages-for-subheaders/project-tools.md). + +Using the `rancher-monitoring` application, you can quickly deploy leading open-source monitoring and alerting solutions onto your cluster. ### Features @@ -16,8 +21,6 @@ Prometheus lets you view metrics from your Rancher and Kubernetes objects. Using By viewing data that Prometheus scrapes from your cluster control plane, nodes, and deployments, you can stay on top of everything happening in your cluster. You can then use these analytics to better run your organization: stop system emergencies before they start, develop maintenance strategies, or restore crashed servers. -The `rancher-monitoring` operator, introduced in Rancher v2.5, is powered by [Prometheus](https://prometheus.io/), [Grafana](https://grafana.com/grafana/), [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/), the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator), and the [Prometheus adapter.](https://github.com/DirectXMan12/k8s-prometheus-adapter) - The monitoring application: - Monitors the state and processes of your cluster nodes, Kubernetes components, and software deployments. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/access-clusters.md b/versioned_docs/version-2.5/pages-for-subheaders/access-clusters.md index fca157a83205..d83e8c138b01 100644 --- a/versioned_docs/version-2.5/pages-for-subheaders/access-clusters.md +++ b/versioned_docs/version-2.5/pages-for-subheaders/access-clusters.md @@ -14,7 +14,6 @@ For more information on roles-based access control, see [this section.](manage-r For information on how to set up an authentication system, see [this section.](about-authentication.md) - ### Rancher UI Rancher provides an intuitive user interface for interacting with your clusters. All options available in the UI use the Rancher API. Therefore any action possible in the UI is also possible in the Rancher CLI or Rancher API. diff --git a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md index 0bcba1a22a74..42c0b6348a2a 100644 --- a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md +++ b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md @@ -84,7 +84,10 @@ To sync Rancher with a local mirror of the RKE metadata, an administrator would After new Kubernetes versions are loaded into the Rancher setup, additional steps would be required in order to use them for launching clusters. Rancher needs access to updated system images. While the metadata settings can only be changed by administrators, any user can download the Rancher system images and prepare a private container image registry for them. -1. To download the system images for the private registry, click the Rancher server version at the bottom left corner of the Rancher UI. +To download the system images for the private registry: + +1. Click **☰** in the top left corner. +1. Click **About** at the bottom of the left navigation. 1. Download the OS specific image lists for Linux or Windows. 1. Download `rancher-images.txt`. 1. Prepare the private registry using the same steps during the [air gap install](other-installation-methods/air-gapped-helm-cli-install/publish-images.md), but instead of using the `rancher-images.txt` from the releases page, use the one obtained from the previous steps. diff --git a/versioned_docs/version-2.7/how-to-guides/new-user-guides/helm-charts-in-rancher/create-apps.md b/versioned_docs/version-2.7/how-to-guides/new-user-guides/helm-charts-in-rancher/create-apps.md index b74d1d7d4e92..bc5cc83d471d 100644 --- a/versioned_docs/version-2.7/how-to-guides/new-user-guides/helm-charts-in-rancher/create-apps.md +++ b/versioned_docs/version-2.7/how-to-guides/new-user-guides/helm-charts-in-rancher/create-apps.md @@ -6,16 +6,12 @@ title: Creating Apps -Rancher's App Marketplace is based on Helm Repositories and Helm Charts. You can add HTTP based standard Helm Repositories as well as any Git Repository which contains charts. - :::tip For a complete walkthrough of developing charts, see the [Chart Template Developer's Guide](https://helm.sh/docs/chart_template_guide/) in the official Helm documentation. ::: - - ## Chart Types Rancher supports two different types of charts: Helm charts and Rancher charts. diff --git a/versioned_docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md b/versioned_docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md index 980c0cfa978f..543ed4d10b40 100644 --- a/versioned_docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md +++ b/versioned_docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md @@ -8,11 +8,18 @@ title: How the Authorized Cluster Endpoint Works This section describes how the kubectl CLI, the kubeconfig file, and the authorized cluster endpoint work together to allow you to access a downstream Kubernetes cluster directly, without authenticating through the Rancher server. It is intended to provide background information and context to the instructions for [how to set up kubectl to directly access a cluster.](use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) -### About the kubeconfig File +### About the Kubeconfig File -The _kubeconfig file_ is a file used to configure access to Kubernetes when used in conjunction with the kubectl command line tool (or other clients). +The kubeconfig file is used to configure access to Kubernetes when used in conjunction with the kubectl command line tool (or other clients). -This kubeconfig file and its contents are specific to the cluster you are viewing. It can be downloaded from the cluster view in Rancher. You will need a separate kubeconfig file for each cluster that you have access to in Rancher. +The kubeconfig file and its contents are specific to each cluster. It can be downloaded from the **Clusters** page in Rancher: + +1. Click **☰** in the top left corner. +1. Select **Cluster Management**. +1. Find the cluster whose kubeconfig you want to download, and select **⁝** at the end of the row. +1. Select **Download KubeConfig** from the submenu. + +You will need a separate kubeconfig file for each cluster that you have access to in Rancher. After you download the kubeconfig file, you will be able to use the kubeconfig file and its Kubernetes [contexts](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#kubectl-context-and-configuration) to access your downstream cluster. diff --git a/versioned_docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md b/versioned_docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md index de70bb8e3421..6099a7d33d34 100644 --- a/versioned_docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md +++ b/versioned_docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md @@ -31,9 +31,10 @@ These instructions assume that you have already created a Kubernetes cluster, an ::: -1. Log into Rancher. Click **☰ > Cluster Management**. -1. Go to the cluster that you want to access with kubectl and click **Explore**. -1. In the top navigation bar, click **Download KubeConfig** button. +1. Click **☰** in the top left corner. +1. Select **Cluster Management**. +1. Find the cluster whose kubeconfig you want to download, and select **⁝** at the end of the row. +1. Select **Download KubeConfig** from the submenu. 1. Save the YAML file on your local computer. Move the file to `~/.kube/config`. Note: The default location that kubectl uses for the kubeconfig file is `~/.kube/config`, but you can use any directory and specify it using the `--kubeconfig` flag, as in this command: ``` kubectl --kubeconfig /custom/path/kube.config get pods diff --git a/versioned_docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md b/versioned_docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md index e76f3694228b..2e7f151e5c22 100644 --- a/versioned_docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md +++ b/versioned_docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md @@ -6,7 +6,14 @@ title: Nodes and Node Pools -After you launch a Kubernetes cluster in Rancher, you can manage individual nodes from the cluster's **Node** tab. Depending on the [option used](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) to provision the cluster, there are different node options available. +After you launch a Kubernetes cluster in Rancher, you can manage individual nodes from the cluster's **Node** tab. + +1. Click **☰** in the top left corner. +1. Select **Cluster Management**. +1. Find the cluster whose nodes you want to manage, and click the **Explore** button at the end of the row. +1. Select **Nodes** from the left navigation. + +Depending on the [option used](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) to provision the cluster, there are different node options available. :::note diff --git a/versioned_docs/version-2.7/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md b/versioned_docs/version-2.7/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md index d5fd42aa05e0..3d1096a8bed3 100644 --- a/versioned_docs/version-2.7/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md +++ b/versioned_docs/version-2.7/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md @@ -110,4 +110,8 @@ You can also see the rules in the Prometheus UI: ![PrometheusRules UI](/img/prometheus-rules-ui.png) -For more information on configuring PrometheusRules in Rancher, see [this page.](../../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md) \ No newline at end of file +For more information on configuring PrometheusRules in Rancher, see [this page.](../../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md) + +## Legacy UI + +For information on the dashboards available in v2.2 to v2.4 of Rancher, before the introduction of the `rancher-monitoring` application, see the [Rancher v2.0—v2.4 docs](../../versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/viewing-metrics.md). \ No newline at end of file diff --git a/versioned_docs/version-2.7/pages-for-subheaders/access-clusters.md b/versioned_docs/version-2.7/pages-for-subheaders/access-clusters.md index 04edb86702c0..b1c72ddc266a 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/access-clusters.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/access-clusters.md @@ -14,22 +14,52 @@ For more information on roles-based access control, see [this section.](manage-r For information on how to set up an authentication system, see [this section.](authentication-config.md) +On the **Clusters** page, select **⁝** at the end of each row to view a submenu with the following options: -### Rancher UI +* [Kubectl Shell](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) +* Download KubeConfig +* Copy KubeConfig to Clipboard +* Edit Config +* View YAML +* Download YAML -Rancher provides an intuitive user interface for interacting with your clusters. All options available in the UI use the Rancher API. Therefore any action possible in the UI is also possible in the Rancher CLI or Rancher API. +### Cluster Dashboard -### kubectl +On the **Clusters** page, select the **Explore** button at the end of each row to view that cluster's **Cluster Dashboard**. You can also view the dashboard by clicking the name of a cluster in the table, then clicking the **Explore** buttton on the **Cluster** page. + +The **Cluster Dashboard** is also accessible from the Rancher UI **Home** page, by clicking on the name of a cluster. + +You can also access the **Cluster Dashboard** from the **☰** in the top navigation bar: + +1. Click **☰**. +1. Select the name of a cluster from the **Explore Cluster** menu option. + +The **Cluster Dashboard** lists information about a specific cluster, such as number of nodes, memory usage, events, and resources. + +## kubectl + +## Clusters in Rancher UI + +There are several paths to view and manage clusters through the Rancher UI. + +### Clusters Page + +You can access the **Clusters** page from the **☰** menu: + +1. Click **☰**. +1. Select **Cluster Management**. + +You can also access the **Clusters** page by clicking the **Manage** button above the clusters table on the Rancher UI **Home** page. You can use the Kubernetes command-line tool, [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/), to manage your clusters. You have two options for using kubectl: - **Rancher kubectl shell:** Interact with your clusters by launching a kubectl shell available in the Rancher UI. This option requires no configuration actions on your part. For more information, see [Accessing Clusters with kubectl Shell](../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md). - **Terminal remote connection:** You can also interact with your clusters by installing [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your local desktop and then copying the cluster's kubeconfig file to your local `~/.kube/config` directory. For more information, see [Accessing Clusters with kubectl and a kubeconfig File](../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md). -### Rancher CLI +## Rancher CLI You can control your clusters by downloading Rancher's own command-line interface, [Rancher CLI](cli-with-rancher.md). This CLI tool can interact directly with different clusters and projects or pass them `kubectl` commands. -### Rancher API +## Rancher API Finally, you can interact with your clusters over the Rancher API. Before you use the API, you must obtain an [API key](../reference-guides/user-settings/api-keys.md). To view the different resource fields and actions for an API object, open the API UI, which can be accessed by clicking on **View in API** for any Rancher UI object. \ No newline at end of file diff --git a/versioned_docs/version-2.7/pages-for-subheaders/authentication-permissions-and-global-configuration.md b/versioned_docs/version-2.7/pages-for-subheaders/authentication-permissions-and-global-configuration.md index 2df94806af94..98494a94cf90 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/authentication-permissions-and-global-configuration.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/authentication-permissions-and-global-configuration.md @@ -1,5 +1,5 @@ --- -title: Authentication, Permissions and Global Configuration +title: Authentication, Permissions and Global Settings --- @@ -52,6 +52,34 @@ Rancher Kubernetes Metadata contains Kubernetes version information which Ranche For more information on how metadata works and how to configure metadata config, see [Rancher Kubernetes Metadata](../getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md). -## Enabling Experimental Features +## Global Settings -Rancher includes some features that are experimental and disabled by default. Feature flags were introduced to allow you to try these features. For more information, refer to the section about [feature flags.](enable-experimental-features.md) +Options that control certain global-level Rancher settings are available from the top navigation bar. + +Click **☰** in the top left corner, then select **Global Settings**, to view and configure the following settings: + +- **Settings**: Various Rancher defaults, such as the minimum length for a user's password (`password-min-length`). You should be cautious when modifying these settings, as invalid values may break your Rancher installation. +- **Feature Flags**: Rancher features that can be toggled on or off. Some of these flags are for [experimental features](#enabling-experimental-features). +- **Banners**: Elements you can add to fixed locations on the portal. For example, you can use these options to [set a custom banner](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/custom-branding.md#fixed-banners) for users when they login to Rancher. +- **Branding**: Rancher UI design elements that you can [customize](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/custom-branding.md). You can add a custom logo or favicon, and modify UI colors. +- **Performance**: Performance settings for the Rancher UI, such as incremental resource loading. +- **Home Links**: Links displayed on the Rancher UI **Home** page. You can modify visibility for the default links or add your own links. + +### Enabling Experimental Features + +Rancher includes some features that are experimental and/or disabled by default. Feature flags allow you to enable these features. For more information, refer to the section about [feature flags.](enable-experimental-features.md) + +### Global Configuration + +**Global Configuration** options aren't visible unless you activate the **legacy** [feature flag](enable-experimental-features.md). The **legacy** flag is disabled by default on fresh Rancher installs of v2.6 and later. If you upgrade from an earlier Rancher version, or activate the **legacy** feature flag on Rancher v2.6 and later, **Global Configuration** is available from the top navigation menu: + +1. Click **☰** in the top left corner. +1. Select **Global Configuration** from the **Legacy Apps**. + +The following features are available under **Global Configuration**: + +- **Catalogs** +- **Global DNS Entries** +- **Global DNS Providers** + +As these are legacy features, please see the Rancher v2.0—v2.4 docs on [catalogs](../../versioned_docs/v2.0-v2.4/pages-for-subheaders/helm-charts-in-rancher.md), [global DNS entries](../../versioned_docs/v2.0-v2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#adding-a-global-dns-entry), and [global DNS providers](../../versioned_docs/v2.0-v2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#editing-a-global-dns-provider) for more details. \ No newline at end of file diff --git a/versioned_docs/version-2.7/pages-for-subheaders/monitoring-and-alerting.md b/versioned_docs/version-2.7/pages-for-subheaders/monitoring-and-alerting.md index 773796fc0959..d9d00e1aa4eb 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/monitoring-and-alerting.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/monitoring-and-alerting.md @@ -7,8 +7,13 @@ description: Prometheus lets you view metrics from your different Rancher and Ku -Using the `rancher-monitoring` application, you can quickly deploy leading open-source monitoring and alerting solutions onto your cluster. +The `rancher-monitoring` application can quickly deploy leading open-source monitoring and alerting solutions onto your cluster. + +Introduced in Rancher v2.5, the application is powered by [Prometheus](https://prometheus.io/), [Grafana](https://grafana.com/grafana/), [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/), the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator), and the [Prometheus adapter.](https://github.com/DirectXMan12/k8s-prometheus-adapter) +For information on V1 monitoring and alerting, available in Rancher v2.2 up to v2.4, please see the Rancher v2.0—v2.4 docs on [cluster monitoring](../../versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-monitoring.md), [alerting](../../versioned_docs/v2.0-v2.4/pages-for-subheaders/cluster-alerts.md), [notifiers](../../versioned_docs/v2.0-v2.4/explanations/integrations-in-rancher/notifiers) and other [tools](../../versioned_docs/v2.0-v2.4/pages-for-subheaders/project-tools.md). + +Using the `rancher-monitoring` application, you can quickly deploy leading open-source monitoring and alerting solutions onto your cluster. ### Features @@ -16,8 +21,6 @@ Prometheus lets you view metrics from your Rancher and Kubernetes objects. Using By viewing data that Prometheus scrapes from your cluster control plane, nodes, and deployments, you can stay on top of everything happening in your cluster. You can then use these analytics to better run your organization: stop system emergencies before they start, develop maintenance strategies, or restore crashed servers. -The `rancher-monitoring` operator, introduced in Rancher v2.5, is powered by [Prometheus](https://prometheus.io/), [Grafana](https://grafana.com/grafana/), [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/), the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator), and the [Prometheus adapter.](https://github.com/DirectXMan12/k8s-prometheus-adapter) - The monitoring application: - Monitors the state and processes of your cluster nodes, Kubernetes components, and software deployments. From 02912f266c0d303d8e8e143c3114922f40c5bfd0 Mon Sep 17 00:00:00 2001 From: Tejeev Date: Wed, 6 Sep 2023 15:49:29 -0600 Subject: [PATCH 27/54] clarified location of project ID field (#823) * clarified location of project ID field * v2.7 page sync --------- Co-authored-by: martyav --- .../prometheus-federator-guides/enable-prometheus-federator.md | 2 +- .../prometheus-federator-guides/enable-prometheus-federator.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md index b8decaf2e181..129cfde943ab 100644 --- a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md +++ b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md @@ -79,7 +79,7 @@ There are no specific recommendations on how much memory the Cluster Prometheus 1. Click the **Prometheus Federator** chart. 1. Click **Install**. 1. On the **Metadata** page, click **Next**. -1. In the **Project Release Namespace Project ID** field, the `System Project` is used as the default but can be overridden with another project with similarly [limited access](#ensure-the-cattle-monitoring-system-namespace-is-placed-into-the-system-project-or-a-similarly-locked-down-project-that-has-access-to-other-projects-in-the-cluster). Project IDs can be found with the following command run in the local upstream cluster: +1. In the **Namespaces** > **Project Release Namespace Project ID** field, the `System Project` is used as the default but can be overridden with another project with similarly [limited access](#ensure-the-cattle-monitoring-system-namespace-is-placed-into-the-system-project-or-a-similarly-locked-down-project-that-has-access-to-other-projects-in-the-cluster). Project IDs can be found with the following command run in the local upstream cluster: ```plain kubectl get projects -A -o custom-columns="NAMESPACE":.metadata.namespace,"ID":.metadata.name,"NAME":.spec.displayName diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md index b8decaf2e181..129cfde943ab 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md @@ -79,7 +79,7 @@ There are no specific recommendations on how much memory the Cluster Prometheus 1. Click the **Prometheus Federator** chart. 1. Click **Install**. 1. On the **Metadata** page, click **Next**. -1. In the **Project Release Namespace Project ID** field, the `System Project` is used as the default but can be overridden with another project with similarly [limited access](#ensure-the-cattle-monitoring-system-namespace-is-placed-into-the-system-project-or-a-similarly-locked-down-project-that-has-access-to-other-projects-in-the-cluster). Project IDs can be found with the following command run in the local upstream cluster: +1. In the **Namespaces** > **Project Release Namespace Project ID** field, the `System Project` is used as the default but can be overridden with another project with similarly [limited access](#ensure-the-cattle-monitoring-system-namespace-is-placed-into-the-system-project-or-a-similarly-locked-down-project-that-has-access-to-other-projects-in-the-cluster). Project IDs can be found with the following command run in the local upstream cluster: ```plain kubectl get projects -A -o custom-columns="NAMESPACE":.metadata.namespace,"ID":.metadata.name,"NAME":.spec.displayName From ec43045b579f64f0d69207d3b7158ebbf53ea73e Mon Sep 17 00:00:00 2001 From: Andy Pitcher Date: Thu, 7 Sep 2023 14:50:49 -0400 Subject: [PATCH 28/54] k3s doc: Update protect-kernel-defaults and remove failing checks note --- .../k3s-hardening-guide.md | 26 +++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/docs/pages-for-subheaders/k3s-hardening-guide.md b/docs/pages-for-subheaders/k3s-hardening-guide.md index 36596a4ae07a..6eeb92f201df 100644 --- a/docs/pages-for-subheaders/k3s-hardening-guide.md +++ b/docs/pages-for-subheaders/k3s-hardening-guide.md @@ -21,8 +21,7 @@ This hardening guide is intended to be used for K3s clusters and is associated w | Rancher v2.7 | Benchmark v1.7 | Kubernetes v1.25 up to v1.26 | :::note -- In Benchmark v1.24 and later, some check ids might fail due to new file permission requirements (600 instead of 644). Impacted check ids: `1.1.15`, `1.1.17` and `4.1.15`. - - In Benchmark v1.7, the `--protect-kernel-defaults` (`4.2.6`) parameter isn't required anymore, and was removed by CIS. +- In Benchmark v1.7, the `--protect-kernel-defaults` (`4.2.6`) parameter isn't required anymore, and was removed by CIS. ::: For more details on how to evaluate a hardened K3s cluster against the official CIS benchmark, refer to the K3s self-assessment guides for specific Kubernetes and CIS benchmark versions. @@ -38,6 +37,28 @@ The first section (1.1) of the CIS Benchmark primarily focuses on pod manifest ## Host-level Requirements +### Ensure `protect-kernel-defaults` is set + + + + +`protect-kernel-defaults` is no longer required since CIS benchmark 1.7. + + + + +This is a kubelet flag that will cause the kubelet to exit if the required kernel parameters are unset or are set to values that are different from the kubelet's defaults. + +The `protect-kernel-defaults` flag can be set in the cluster configuration in Rancher. + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + protect-kernel-defaults: true +``` + ### Set kernel parameters The following `sysctl` configuration is recommended for all nodes type in the cluster. Set the following parameters in `/etc/sysctl.d/90-kubelet.conf`: @@ -709,6 +730,7 @@ spec: - config: kubelet-arg: - make-iptables-util-chains=true # CIS 4.2.7 + protect-kernel-defaults: true # CIS 4.2.6 ``` From a9da000dfe4562124717d84f6dad5ae915a5328a Mon Sep 17 00:00:00 2001 From: Andy Pitcher Date: Thu, 7 Sep 2023 15:34:58 -0400 Subject: [PATCH 29/54] Fix markdown --- docs/pages-for-subheaders/k3s-hardening-guide.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/docs/pages-for-subheaders/k3s-hardening-guide.md b/docs/pages-for-subheaders/k3s-hardening-guide.md index 6eeb92f201df..ee564ada2b4c 100644 --- a/docs/pages-for-subheaders/k3s-hardening-guide.md +++ b/docs/pages-for-subheaders/k3s-hardening-guide.md @@ -21,7 +21,7 @@ This hardening guide is intended to be used for K3s clusters and is associated w | Rancher v2.7 | Benchmark v1.7 | Kubernetes v1.25 up to v1.26 | :::note -- In Benchmark v1.7, the `--protect-kernel-defaults` (`4.2.6`) parameter isn't required anymore, and was removed by CIS. +- In Benchmark v1.7, the `--protect-kernel-defaults` (4.2.6) parameter isn't required anymore, and was removed by CIS. ::: For more details on how to evaluate a hardened K3s cluster against the official CIS benchmark, refer to the K3s self-assessment guides for specific Kubernetes and CIS benchmark versions. @@ -42,7 +42,7 @@ The first section (1.1) of the CIS Benchmark primarily focuses on pod manifest -`protect-kernel-defaults` is no longer required since CIS benchmark 1.7. +The `protect-kernel-defaults` is no longer required since CIS benchmark 1.7. @@ -59,6 +59,9 @@ spec: protect-kernel-defaults: true ``` + + + ### Set kernel parameters The following `sysctl` configuration is recommended for all nodes type in the cluster. Set the following parameters in `/etc/sysctl.d/90-kubelet.conf`: From f308f802e16baf5785b63cbd9b2fcf57a8f3cda4 Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Thu, 7 Sep 2023 17:00:00 -0400 Subject: [PATCH 30/54] #790 highlight alert about folders for s3 backup config (#821) * note added & revised wording on section about s3 backups * added warning to backup-configuration.md * Apply suggestions from code review Co-authored-by: Sunil Singh --------- Co-authored-by: Sunil Singh --- .../back-up-rancher-launched-kubernetes-clusters.md | 12 ++++++++++-- .../backup-configuration.md | 6 ++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md index e7a4999a53f9..a9046a1e64da 100644 --- a/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md +++ b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md @@ -260,11 +260,19 @@ By default, the `local` backup target is selected. The benefits of this option i ### S3 Backup Target -The `S3` backup target allows users to configure a S3 compatible backend to store the snapshots. The primary benefit of this option is that if the cluster loses all the etcd nodes, the cluster can still be restored as the snapshots are stored externally. Rancher recommends external targets like `S3` backup, however its configuration requirements do require additional effort that should be considered. Additionally, it is recommended to ensure that every cluster has a unique bucket and/or folder, as Rancher will populate snapshot information for any available snapshot that is listed in the S3 bucket/folder that is configured for the cluster. +We recommend that you use the `S3` backup target. It lets you store snapshots externally, on an S3 compatible backend. Since the snapshots aren't stored locally, you can still restore the cluster even if you lose all etcd nodes. + +Although the `S3` target offers advantages over local backup, it does require extra configuration. + +:::caution + +If you use an S3 backup target, make sure that every cluster has its own bucket or folder. Rancher populates snapshot information from any available snapshot listed in the S3 bucket or folder configured for that cluster. + +::: | Option | Description | Required| |---|---|---| -|S3 Bucket Name| S3 bucket name where backups will be stored| *| +|S3 Bucket Name| Name of S3 bucket to store backups| *| |S3 Region|S3 region for the backup bucket| | |S3 Region Endpoint|S3 regions endpoint for the backup bucket|* | |S3 Access Key|S3 access key with permission to access the backup bucket|*| diff --git a/docs/reference-guides/backup-restore-configuration/backup-configuration.md b/docs/reference-guides/backup-restore-configuration/backup-configuration.md index dcf4d2bbd91b..37ae6f2ab457 100644 --- a/docs/reference-guides/backup-restore-configuration/backup-configuration.md +++ b/docs/reference-guides/backup-restore-configuration/backup-configuration.md @@ -68,6 +68,12 @@ Selecting the first option stores this backup in the storage location configured ### S3 +:::caution + +If you use an S3 backup target, make sure that every cluster has its own bucket or folder. Rancher populates snapshot information from any available snapshot listed in the S3 bucket or folder configured for that cluster. + +::: + The S3 storage location contains the following configuration fields: 1. **Credential Secret** (optional): If you need to use the AWS Access keys Secret keys to access s3 bucket, create a secret with your credentials with keys and the directives `accessKey` and `secretKey`. It can be in any namespace. An example secret is [here.](#example-credentialsecret) This directive is unnecessary if the nodes running your operator are in EC2 and set up with IAM permissions that allow them to access S3, as described in [this section.](#iam-permissions-for-ec2-nodes-to-access-s3) The Credential Secret dropdown lists the secrets in all namespaces. From 039fa9c1dbf9adf2ed7c3a36401134b012987cda Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Thu, 7 Sep 2023 21:03:12 -0400 Subject: [PATCH 31/54] Convert links to https://rancher.com/docs/rancher/v2.6 to use correct Markdown syntax (#830) --- .../install-upgrade-on-a-kubernetes-cluster/rollbacks.md | 2 +- .../enable-prometheus-federator.md | 2 +- .../set-up-monitoring-for-workloads.md | 2 +- .../backup-restore-and-disaster-recovery/restore-rancher.md | 2 +- .../rancher-managed-clusters/monitoring-best-practices.md | 2 +- .../gke-cluster-configuration/gke-private-clusters.md | 2 +- .../rke2-cluster-configuration.md | 2 +- .../rancher-security/security-advisories-and-cves.md | 6 +++--- .../install-upgrade-on-a-kubernetes-cluster/rollbacks.md | 2 +- .../enable-prometheus-federator.md | 2 +- .../set-up-monitoring-for-workloads.md | 2 +- .../backup-restore-and-disaster-recovery/restore-rancher.md | 2 +- .../rancher-managed-clusters/monitoring-best-practices.md | 2 +- .../gke-cluster-configuration/gke-private-clusters.md | 2 +- .../rke2-cluster-configuration.md | 2 +- .../rancher-security/security-advisories-and-cves.md | 6 +++--- 16 files changed, 20 insertions(+), 20 deletions(-) diff --git a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md index f6e491dde2eb..bb7fed4fe0a6 100644 --- a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md +++ b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md @@ -26,7 +26,7 @@ See the [rancher/rancher-cleanup repo](https://github.com/rancher/rancher-cleanu ### Rolling back from v2.6.4+ to lower versions of v2.6.x 1. Follow these [instructions](https://github.com/rancher/rancher-cleanup/blob/main/README.md) to run the scripts. -1. Follow these [instructions](https://rancher.com/docs/rancher/v2.6/en/backups/migrating-rancher/) to install the rancher-backup Helm chart on the existing cluster and restore the previous state. +1. Follow these [instructions](../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md) to install the rancher-backup Helm chart on the existing cluster and restore the previous state. 1. Omit Step 3. 1. When you reach Step 4, install the Rancher v2.6.x version on the local cluster you intend to roll back to. diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md index 129cfde943ab..b4f9fcc166ff 100644 --- a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md +++ b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md @@ -8,7 +8,7 @@ title: Enable Prometheus Federator ## Requirements -By default, Prometheus Federator is configured and intended to be deployed alongside [rancher-monitoring](https://rancher.com/docs/rancher/v2.6/en/monitoring-alerting/), which deploys Prometheus Operator alongside a Cluster Prometheus that each Project Monitoring Stack is configured to federate namespace-scoped metrics from by default. +By default, Prometheus Federator is configured and intended to be deployed alongside [rancher-monitoring](../../../../pages-for-subheaders/monitoring-and-alerting.md), which deploys Prometheus Operator alongside a Cluster Prometheus that each Project Monitoring Stack is configured to federate namespace-scoped metrics from by default. For instructions on installing rancher-monitoring, refer to [this page](../enable-monitoring.md). diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md index b9a655dd913f..34d45c06e5e6 100644 --- a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md +++ b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md @@ -12,7 +12,7 @@ The steps for setting up monitoring for workloads depend on whether you want bas If you only need CPU and memory time series for the workload, you don't need to deploy a ServiceMonitor or PodMonitor because the monitoring application already collects metrics data on resource usage by default. The resource usage time series data is in Prometheus's local time series database. -Grafana shows the data in aggregate, but you can see the data for the individual workload by using a PromQL query that extracts the data for that workload. Once you have the PromQL query, you can execute the query individually in the Prometheus UI and see the time series visualized there, or you can use the query to customize a Grafana dashboard to display the workload metrics. For examples of PromQL queries for workload metrics, see [this section.](https://rancher.com/docs/rancher/v2.6/en/monitoring-alerting/expression/#workload-metrics) +Grafana shows the data in aggregate, but you can see the data for the individual workload by using a PromQL query that extracts the data for that workload. Once you have the PromQL query, you can execute the query individually in the Prometheus UI and see the time series visualized there, or you can use the query to customize a Grafana dashboard to display the workload metrics. For examples of PromQL queries for workload metrics, see [this section.](../../../integrations-in-rancher/monitoring-and-alerting/promql-expressions.md#workload-metrics) To set up custom metrics for your workload, you will need to set up an exporter and create a new ServiceMonitor custom resource to configure Prometheus to scrape metrics from your exporter. diff --git a/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md index aaa7b2cfea98..6e2179a5d5fe 100644 --- a/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md +++ b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md @@ -36,7 +36,7 @@ See the [rancher/rancher-cleanup repo](https://github.com/rancher/rancher-cleanu ### Rolling back from v2.6.4+ to lower versions of v2.6.x 1. Follow these [instructions](https://github.com/rancher/rancher-cleanup/blob/main/README.md) to run the scripts. -1. Follow these [instructions](https://rancher.com/docs/rancher/v2.6/en/backups/migrating-rancher/) to install the rancher-backup Helm chart on the existing cluster and restore the previous state. +1. Follow these [instructions](../backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md) to install the rancher-backup Helm chart on the existing cluster and restore the previous state. 1. Omit Step 3. 1. When you reach Step 4, install the Rancher v2.6.x version on the local cluster you intend to roll back to. diff --git a/docs/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md b/docs/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md index 7f641e9d917c..993543cd07f4 100644 --- a/docs/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md +++ b/docs/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md @@ -86,7 +86,7 @@ Sometimes it is useful to monitor workloads from the outside. For this, you can If you have a (micro)service architecture where multiple individual workloads within your cluster are communicating with each other, it is really important to have detailed metrics and traces about this traffic to understand how all these workloads are communicating with each other and where a problem or bottleneck may be. -Of course you can monitor all this internal traffic in all your workloads and expose these metrics to Prometheus. But this can quickly become quite work intensive. Service Meshes like Istio, which can be installed with [a click](https://rancher.com/docs/rancher/v2.6/en/istio/) in Rancher, can do this automatically and provide rich telemetry about the traffic between all services. +Of course you can monitor all this internal traffic in all your workloads and expose these metrics to Prometheus. But this can quickly become quite work intensive. Service Meshes like Istio, which can be installed with [a click](../../../pages-for-subheaders/istio.md) in Rancher, can do this automatically and provide rich telemetry about the traffic between all services. ## Real User Monitoring diff --git a/docs/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md b/docs/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md index 7a11ccdd76bd..8413e1166a0c 100644 --- a/docs/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md +++ b/docs/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md @@ -26,7 +26,7 @@ This scenario is not officially supported, but is described for cases in which u ::: -If restricting both incoming and outgoing traffic to nodes is a requirement, follow the air-gapped installation instructions to set up a private container image [registry](https://rancher.com/docs/rancher/v2.6/en/installation/other-installation-methods/air-gap/) on the VPC where the cluster is going to be, allowing the cluster nodes to access and download the images they need to run the cluster agent. If the control plane endpoint is also private, Rancher will need [direct access](#direct-access) to it. +If restricting both incoming and outgoing traffic to nodes is a requirement, follow the air-gapped installation instructions to set up a private container image [registry](../../../../pages-for-subheaders/air-gapped-helm-cli-install.md) on the VPC where the cluster is going to be, allowing the cluster nodes to access and download the images they need to run the cluster agent. If the control plane endpoint is also private, Rancher will need [direct access](#direct-access) to it. ### Private Control Plane Endpoint diff --git a/docs/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md b/docs/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md index 1fa87b8e54ff..c279eccf9786 100644 --- a/docs/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md +++ b/docs/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md @@ -164,7 +164,7 @@ Additional Kubernetes manifests, managed as an [Add-on](https://kubernetes.io/do ### Agent Environment Vars -Option to set environment variables for [Rancher agents](https://rancher.com/docs/rancher/v2.6/en/cluster-provisioning/rke-clusters/rancher-agents/). The environment variables can be set using key value pairs. Refer to the [RKE2 documentation](https://docs.rke2.io/reference/linux_agent_config) for more details. +Option to set environment variables for [Rancher agents](../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/about-rancher-agents.md). The environment variables can be set using key value pairs. Refer to the [RKE2 documentation](https://docs.rke2.io/reference/linux_agent_config) for more details. ### etcd diff --git a/docs/reference-guides/rancher-security/security-advisories-and-cves.md b/docs/reference-guides/rancher-security/security-advisories-and-cves.md index 558f80d7dfc4..872a908f6331 100644 --- a/docs/reference-guides/rancher-security/security-advisories-and-cves.md +++ b/docs/reference-guides/rancher-security/security-advisories-and-cves.md @@ -19,10 +19,10 @@ Rancher is committed to informing the community of security issues in our produc | [CVE-2022-31247](https://github.com/rancher/rancher/security/advisories/GHSA-6x34-89p7-95wg) | An issue was discovered in Rancher versions up to and including 2.5.15 and 2.6.6 where a flaw with authorization logic allows privilege escalation in downstream clusters through cluster role template binding (CRTB) and project role template binding (PRTB). The vulnerability can be exploited by any user who has permissions to create/edit CRTB or PRTB (such as `cluster-owner`, `manage cluster members`, `project-owner`, and `manage project members`) to gain owner permission in another project in the same cluster or in another project on a different downstream cluster. | 18 August 2022 | [Rancher v2.6.7](https://github.com/rancher/rancher/releases/tag/v2.6.7) and [Rancher v2.5.16](https://github.com/rancher/rancher/releases/tag/v2.5.16) | | [CVE-2021-36783](https://github.com/rancher/rancher/security/advisories/GHSA-8w87-58w6-hfv8) | It was discovered that in Rancher versions up to and including 2.5.12 and 2.6.3, there is a failure to properly sanitize credentials in cluster template answers. This failure can lead to plaintext storage and exposure of credentials, passwords, and API tokens. The exposed credentials are visible in Rancher to authenticated `Cluster Owners`, `Cluster Members`, `Project Owners`, and `Project Members` on the endpoints `/v1/management.cattle.io.clusters`, `/v3/clusters`, and `/k8s/clusters/local/apis/management.cattle.io/v3/clusters`. | 18 August 2022 | [Rancher v2.6.7](https://github.com/rancher/rancher/releases/tag/v2.6.7) and [Rancher v2.5.16](https://github.com/rancher/rancher/releases/tag/v2.5.16) | | [CVE-2021-36782](https://github.com/rancher/rancher/security/advisories/GHSA-g7j7-h4q8-8w2f) | An issue was discovered in Rancher versions up to and including 2.5.15 and 2.6.6 where sensitive fields like passwords, API keys, and Rancher's service account token (used to provision clusters) were stored in plaintext directly on Kubernetes objects like `Clusters` (e.g., `cluster.management.cattle.io`). Anyone with read access to those objects in the Kubernetes API could retrieve the plaintext version of those sensitive data. The issue was partially found and reported by Florian Struck (from [Continum AG](https://www.continum.net/)) and [Marco Stuurman](https://github.com/fe-ax) (from [Shock Media B.V.](https://www.shockmedia.nl/)). | 18 August 2022 | [Rancher v2.6.7](https://github.com/rancher/rancher/releases/tag/v2.6.7) and [Rancher v2.5.16](https://github.com/rancher/rancher/releases/tag/v2.5.16) | -| [CVE-2022-21951](https://github.com/rancher/rancher/security/advisories/GHSA-vrph-m5jj-c46c) | This vulnerability only affects customers using [Weave](https://rancher.com/docs/rancher/v2.6/en/faq/networking/cni-providers/#weave) Container Network Interface (CNI) when configured through [RKE templates](https://rancher.com/docs/rancher/v2.6/en/admin-settings/rke-templates/). A vulnerability was discovered in Rancher versions 2.5.0 up to and including 2.5.13, and 2.6.0 up to and including 2.6.4, where a user interface (UI) issue with RKE templates does not include a value for the Weave password when Weave is chosen as the CNI. If a cluster is created based on the mentioned template, and Weave is configured as the CNI, no password will be created for [network encryption](https://www.weave.works/docs/net/latest/tasks/manage/security-untrusted-networks/) in Weave; therefore, network traffic in the cluster will be sent unencrypted. | 24 May 2022 | [Rancher v2.6.5](https://github.com/rancher/rancher/releases/tag/v2.6.5) and [Rancher v2.5.14](https://github.com/rancher/rancher/releases/tag/v2.5.14) | -| [CVE-2021-36784](https://github.com/rancher/rancher/security/advisories/GHSA-jwvr-vv7p-gpwq) | A vulnerability was discovered in Rancher versions from 2.5.0 up to and including 2.5.12 and from 2.6.0 up to and including 2.6.3 which allows users who have create or update permissions on [Global Roles](https://rancher.com/docs/rancher/v2.6/en/admin-settings/rbac/) to escalate their permissions, or those of another user, to admin-level permissions. Global Roles grant users Rancher-wide permissions, such as the ability to create clusters. In the identified versions of Rancher, when users are given permission to edit or create Global Roles, they are not restricted to only granting permissions which they already posses. This vulnerability affects customers who utilize non-admin users that are able to create or edit Global Roles. The most common use case for this scenario is the `restricted-admin` role. | 14 Apr 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) and [Rancher v2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | +| [CVE-2022-21951](https://github.com/rancher/rancher/security/advisories/GHSA-vrph-m5jj-c46c) | This vulnerability only affects customers using [Weave](../../faq/container-network-interface-providers.md#weave) Container Network Interface (CNI) when configured through [RKE templates](../../pages-for-subheaders/about-rke1-templates.md). A vulnerability was discovered in Rancher versions 2.5.0 up to and including 2.5.13, and 2.6.0 up to and including 2.6.4, where a user interface (UI) issue with RKE templates does not include a value for the Weave password when Weave is chosen as the CNI. If a cluster is created based on the mentioned template, and Weave is configured as the CNI, no password will be created for [network encryption](https://www.weave.works/docs/net/latest/tasks/manage/security-untrusted-networks/) in Weave; therefore, network traffic in the cluster will be sent unencrypted. | 24 May 2022 | [Rancher v2.6.5](https://github.com/rancher/rancher/releases/tag/v2.6.5) and [Rancher v2.5.14](https://github.com/rancher/rancher/releases/tag/v2.5.14) | +| [CVE-2021-36784](https://github.com/rancher/rancher/security/advisories/GHSA-jwvr-vv7p-gpwq) | A vulnerability was discovered in Rancher versions from 2.5.0 up to and including 2.5.12 and from 2.6.0 up to and including 2.6.3 which allows users who have create or update permissions on [Global Roles](../../pages-for-subheaders/manage-role-based-access-control-rbac.md) to escalate their permissions, or those of another user, to admin-level permissions. Global Roles grant users Rancher-wide permissions, such as the ability to create clusters. In the identified versions of Rancher, when users are given permission to edit or create Global Roles, they are not restricted to only granting permissions which they already posses. This vulnerability affects customers who utilize non-admin users that are able to create or edit Global Roles. The most common use case for this scenario is the `restricted-admin` role. | 14 Apr 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) and [Rancher v2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | | [CVE-2021-4200](https://github.com/rancher/rancher/security/advisories/GHSA-hx8w-ghh8-r4xf) | This vulnerability only affects customers using the `restricted-admin` role in Rancher. A vulnerability was discovered in Rancher versions from 2.5.0 up to and including 2.5.12 and from 2.6.0 up to and including 2.6.3 where the `global-data` role in `cattle-global-data` namespace grants write access to the Catalogs. Since each user with any level of catalog access was bound to the `global-data` role, this grants write access to templates (`CatalogTemplates`) and template versions (`CatalogTemplateVersions`) for any user with any level of catalog access. New users created in Rancher are by default assigned to the `user` role (standard user), which is not designed to grant write catalog access. This vulnerability effectively elevates the privilege of any user to write access for the catalog template and catalog template version resources. | 14 Apr 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) and [Rancher v2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | -| [GHSA-wm2r-rp98-8pmh](https://github.com/rancher/rancher/security/advisories/GHSA-wm2r-rp98-8pmh) | This vulnerability only affects customers using [Fleet](https://rancher.com/docs/rancher/v2.6/en/deploy-across-clusters/fleet/) for continuous delivery with authenticated Git and/or Helm repositories. An issue was discovered in `go-getter` library in versions prior to [`v1.5.11`](https://github.com/hashicorp/go-getter/releases/tag/v1.5.11) that exposes SSH private keys in base64 format due to a failure in redacting such information from error messages. The vulnerable version of this library is used in Rancher through Fleet in versions of Fleet prior to [`v0.3.9`](https://github.com/rancher/fleet/releases/tag/v0.3.9). This issue affects Rancher versions 2.5.0 up to and including 2.5.12 and from 2.6.0 up to and including 2.6.3. The issue was found and reported by Dagan Henderson from Raft Engineering. | 14 Apr 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) and [Rancher v2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | +| [GHSA-wm2r-rp98-8pmh](https://github.com/rancher/rancher/security/advisories/GHSA-wm2r-rp98-8pmh) | This vulnerability only affects customers using [Fleet](../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md) for continuous delivery with authenticated Git and/or Helm repositories. An issue was discovered in `go-getter` library in versions prior to [`v1.5.11`](https://github.com/hashicorp/go-getter/releases/tag/v1.5.11) that exposes SSH private keys in base64 format due to a failure in redacting such information from error messages. The vulnerable version of this library is used in Rancher through Fleet in versions of Fleet prior to [`v0.3.9`](https://github.com/rancher/fleet/releases/tag/v0.3.9). This issue affects Rancher versions 2.5.0 up to and including 2.5.12 and from 2.6.0 up to and including 2.6.3. The issue was found and reported by Dagan Henderson from Raft Engineering. | 14 Apr 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) and [Rancher v2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | | [CVE-2021-36778](https://github.com/rancher/rancher/security/advisories/GHSA-4fc7-hc63-7fjg) | A vulnerability was discovered in Rancher versions from 2.5.0 up to and including 2.5.11 and from 2.6.0 up to and including 2.6.2, where an insufficient check of the same-origin policy when downloading Helm charts from a configured private repository can lead to exposure of the repository credentials to a third-party provider. This issue only happens when the user configures access credentials to a private repository in Rancher inside `Apps & Marketplace > Repositories`. The issue was found and reported by Martin Andreas Ullrich. | 14 Apr 2022 | [Rancher v2.6.3](https://github.com/rancher/rancher/releases/tag/v2.6.3) and [Rancher v2.5.12](https://github.com/rancher/rancher/releases/tag/v2.5.12) | | [GHSA-hwm2-4ph6-w6m5](https://github.com/rancher/rancher/security/advisories/GHSA-hwm2-4ph6-w6m5) | A vulnerability was discovered in versions of Rancher starting 2.0 up to and including 2.6.3. The `restricted` pod security policy (PSP) provided in Rancher deviated from the upstream `restricted` policy provided in Kubernetes on account of which Rancher's PSP had `runAsUser` set to `runAsAny`, while upstream had `runAsUser` set to `MustRunAsNonRoot`. This allowed containers to run as any user, including a privileged user (`root`), even when Rancher's `restricted` policy was enforced on a project or at the cluster level. | 31 Mar 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) | | [CVE-2021-36775](https://github.com/rancher/rancher/security/advisories/GHSA-28g7-896h-695v) | A vulnerability was discovered in Rancher versions up to and including 2.4.17, 2.5.11 and 2.6.2. After removing a `Project Role` associated with a group from the project, the bindings that granted access to cluster-scoped resources for those subjects were not deleted. This was due to an incomplete authorization logic check. A user who was a member of the affected group with authenticated access to Rancher could exploit this vulnerability to access resources they shouldn't have had access to. The exposure level would depend on the original permission level granted to the affected project role. This vulnerability only affected customers using group based authentication in Rancher. | 31 Mar 2022 | [Rancher v2.6.3](https://github.com/rancher/rancher/releases/tag/v2.6.3), [Rancher v2.5.12](https://github.com/rancher/rancher/releases/tag/v2.5.12) and [Rancher v2.4.18](https://github.com/rancher/rancher/releases/tag/v2.4.18) | diff --git a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md index f6e491dde2eb..bb7fed4fe0a6 100644 --- a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md +++ b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md @@ -26,7 +26,7 @@ See the [rancher/rancher-cleanup repo](https://github.com/rancher/rancher-cleanu ### Rolling back from v2.6.4+ to lower versions of v2.6.x 1. Follow these [instructions](https://github.com/rancher/rancher-cleanup/blob/main/README.md) to run the scripts. -1. Follow these [instructions](https://rancher.com/docs/rancher/v2.6/en/backups/migrating-rancher/) to install the rancher-backup Helm chart on the existing cluster and restore the previous state. +1. Follow these [instructions](../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md) to install the rancher-backup Helm chart on the existing cluster and restore the previous state. 1. Omit Step 3. 1. When you reach Step 4, install the Rancher v2.6.x version on the local cluster you intend to roll back to. diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md index 129cfde943ab..b4f9fcc166ff 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md @@ -8,7 +8,7 @@ title: Enable Prometheus Federator ## Requirements -By default, Prometheus Federator is configured and intended to be deployed alongside [rancher-monitoring](https://rancher.com/docs/rancher/v2.6/en/monitoring-alerting/), which deploys Prometheus Operator alongside a Cluster Prometheus that each Project Monitoring Stack is configured to federate namespace-scoped metrics from by default. +By default, Prometheus Federator is configured and intended to be deployed alongside [rancher-monitoring](../../../../pages-for-subheaders/monitoring-and-alerting.md), which deploys Prometheus Operator alongside a Cluster Prometheus that each Project Monitoring Stack is configured to federate namespace-scoped metrics from by default. For instructions on installing rancher-monitoring, refer to [this page](../enable-monitoring.md). diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md index b9a655dd913f..72206789fce4 100644 --- a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md @@ -12,7 +12,7 @@ The steps for setting up monitoring for workloads depend on whether you want bas If you only need CPU and memory time series for the workload, you don't need to deploy a ServiceMonitor or PodMonitor because the monitoring application already collects metrics data on resource usage by default. The resource usage time series data is in Prometheus's local time series database. -Grafana shows the data in aggregate, but you can see the data for the individual workload by using a PromQL query that extracts the data for that workload. Once you have the PromQL query, you can execute the query individually in the Prometheus UI and see the time series visualized there, or you can use the query to customize a Grafana dashboard to display the workload metrics. For examples of PromQL queries for workload metrics, see [this section.](https://rancher.com/docs/rancher/v2.6/en/monitoring-alerting/expression/#workload-metrics) +Grafana shows the data in aggregate, but you can see the data for the individual workload by using a PromQL query that extracts the data for that workload. Once you have the PromQL query, you can execute the query individually in the Prometheus UI and see the time series visualized there, or you can use the query to customize a Grafana dashboard to display the workload metrics. For examples of PromQL queries for workload metrics, see [this section.](../../../integrations-in-rancher/monitoring-and-alerting/promql-expressions.md#workload-metrics). To set up custom metrics for your workload, you will need to set up an exporter and create a new ServiceMonitor custom resource to configure Prometheus to scrape metrics from your exporter. diff --git a/versioned_docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md b/versioned_docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md index aaa7b2cfea98..d78bde42e1b3 100644 --- a/versioned_docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md +++ b/versioned_docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md @@ -36,7 +36,7 @@ See the [rancher/rancher-cleanup repo](https://github.com/rancher/rancher-cleanu ### Rolling back from v2.6.4+ to lower versions of v2.6.x 1. Follow these [instructions](https://github.com/rancher/rancher-cleanup/blob/main/README.md) to run the scripts. -1. Follow these [instructions](https://rancher.com/docs/rancher/v2.6/en/backups/migrating-rancher/) to install the rancher-backup Helm chart on the existing cluster and restore the previous state. +1. Follow these [instructions](./migrate-rancher-to-new-cluster.md) to install the rancher-backup Helm chart on the existing cluster and restore the previous state. 1. Omit Step 3. 1. When you reach Step 4, install the Rancher v2.6.x version on the local cluster you intend to roll back to. diff --git a/versioned_docs/version-2.7/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md b/versioned_docs/version-2.7/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md index 7f641e9d917c..993543cd07f4 100644 --- a/versioned_docs/version-2.7/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md +++ b/versioned_docs/version-2.7/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md @@ -86,7 +86,7 @@ Sometimes it is useful to monitor workloads from the outside. For this, you can If you have a (micro)service architecture where multiple individual workloads within your cluster are communicating with each other, it is really important to have detailed metrics and traces about this traffic to understand how all these workloads are communicating with each other and where a problem or bottleneck may be. -Of course you can monitor all this internal traffic in all your workloads and expose these metrics to Prometheus. But this can quickly become quite work intensive. Service Meshes like Istio, which can be installed with [a click](https://rancher.com/docs/rancher/v2.6/en/istio/) in Rancher, can do this automatically and provide rich telemetry about the traffic between all services. +Of course you can monitor all this internal traffic in all your workloads and expose these metrics to Prometheus. But this can quickly become quite work intensive. Service Meshes like Istio, which can be installed with [a click](../../../pages-for-subheaders/istio.md) in Rancher, can do this automatically and provide rich telemetry about the traffic between all services. ## Real User Monitoring diff --git a/versioned_docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md b/versioned_docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md index 7a11ccdd76bd..8413e1166a0c 100644 --- a/versioned_docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md +++ b/versioned_docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md @@ -26,7 +26,7 @@ This scenario is not officially supported, but is described for cases in which u ::: -If restricting both incoming and outgoing traffic to nodes is a requirement, follow the air-gapped installation instructions to set up a private container image [registry](https://rancher.com/docs/rancher/v2.6/en/installation/other-installation-methods/air-gap/) on the VPC where the cluster is going to be, allowing the cluster nodes to access and download the images they need to run the cluster agent. If the control plane endpoint is also private, Rancher will need [direct access](#direct-access) to it. +If restricting both incoming and outgoing traffic to nodes is a requirement, follow the air-gapped installation instructions to set up a private container image [registry](../../../../pages-for-subheaders/air-gapped-helm-cli-install.md) on the VPC where the cluster is going to be, allowing the cluster nodes to access and download the images they need to run the cluster agent. If the control plane endpoint is also private, Rancher will need [direct access](#direct-access) to it. ### Private Control Plane Endpoint diff --git a/versioned_docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md b/versioned_docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md index 1fa87b8e54ff..c279eccf9786 100644 --- a/versioned_docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md +++ b/versioned_docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md @@ -164,7 +164,7 @@ Additional Kubernetes manifests, managed as an [Add-on](https://kubernetes.io/do ### Agent Environment Vars -Option to set environment variables for [Rancher agents](https://rancher.com/docs/rancher/v2.6/en/cluster-provisioning/rke-clusters/rancher-agents/). The environment variables can be set using key value pairs. Refer to the [RKE2 documentation](https://docs.rke2.io/reference/linux_agent_config) for more details. +Option to set environment variables for [Rancher agents](../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/about-rancher-agents.md). The environment variables can be set using key value pairs. Refer to the [RKE2 documentation](https://docs.rke2.io/reference/linux_agent_config) for more details. ### etcd diff --git a/versioned_docs/version-2.7/reference-guides/rancher-security/security-advisories-and-cves.md b/versioned_docs/version-2.7/reference-guides/rancher-security/security-advisories-and-cves.md index 558f80d7dfc4..872a908f6331 100644 --- a/versioned_docs/version-2.7/reference-guides/rancher-security/security-advisories-and-cves.md +++ b/versioned_docs/version-2.7/reference-guides/rancher-security/security-advisories-and-cves.md @@ -19,10 +19,10 @@ Rancher is committed to informing the community of security issues in our produc | [CVE-2022-31247](https://github.com/rancher/rancher/security/advisories/GHSA-6x34-89p7-95wg) | An issue was discovered in Rancher versions up to and including 2.5.15 and 2.6.6 where a flaw with authorization logic allows privilege escalation in downstream clusters through cluster role template binding (CRTB) and project role template binding (PRTB). The vulnerability can be exploited by any user who has permissions to create/edit CRTB or PRTB (such as `cluster-owner`, `manage cluster members`, `project-owner`, and `manage project members`) to gain owner permission in another project in the same cluster or in another project on a different downstream cluster. | 18 August 2022 | [Rancher v2.6.7](https://github.com/rancher/rancher/releases/tag/v2.6.7) and [Rancher v2.5.16](https://github.com/rancher/rancher/releases/tag/v2.5.16) | | [CVE-2021-36783](https://github.com/rancher/rancher/security/advisories/GHSA-8w87-58w6-hfv8) | It was discovered that in Rancher versions up to and including 2.5.12 and 2.6.3, there is a failure to properly sanitize credentials in cluster template answers. This failure can lead to plaintext storage and exposure of credentials, passwords, and API tokens. The exposed credentials are visible in Rancher to authenticated `Cluster Owners`, `Cluster Members`, `Project Owners`, and `Project Members` on the endpoints `/v1/management.cattle.io.clusters`, `/v3/clusters`, and `/k8s/clusters/local/apis/management.cattle.io/v3/clusters`. | 18 August 2022 | [Rancher v2.6.7](https://github.com/rancher/rancher/releases/tag/v2.6.7) and [Rancher v2.5.16](https://github.com/rancher/rancher/releases/tag/v2.5.16) | | [CVE-2021-36782](https://github.com/rancher/rancher/security/advisories/GHSA-g7j7-h4q8-8w2f) | An issue was discovered in Rancher versions up to and including 2.5.15 and 2.6.6 where sensitive fields like passwords, API keys, and Rancher's service account token (used to provision clusters) were stored in plaintext directly on Kubernetes objects like `Clusters` (e.g., `cluster.management.cattle.io`). Anyone with read access to those objects in the Kubernetes API could retrieve the plaintext version of those sensitive data. The issue was partially found and reported by Florian Struck (from [Continum AG](https://www.continum.net/)) and [Marco Stuurman](https://github.com/fe-ax) (from [Shock Media B.V.](https://www.shockmedia.nl/)). | 18 August 2022 | [Rancher v2.6.7](https://github.com/rancher/rancher/releases/tag/v2.6.7) and [Rancher v2.5.16](https://github.com/rancher/rancher/releases/tag/v2.5.16) | -| [CVE-2022-21951](https://github.com/rancher/rancher/security/advisories/GHSA-vrph-m5jj-c46c) | This vulnerability only affects customers using [Weave](https://rancher.com/docs/rancher/v2.6/en/faq/networking/cni-providers/#weave) Container Network Interface (CNI) when configured through [RKE templates](https://rancher.com/docs/rancher/v2.6/en/admin-settings/rke-templates/). A vulnerability was discovered in Rancher versions 2.5.0 up to and including 2.5.13, and 2.6.0 up to and including 2.6.4, where a user interface (UI) issue with RKE templates does not include a value for the Weave password when Weave is chosen as the CNI. If a cluster is created based on the mentioned template, and Weave is configured as the CNI, no password will be created for [network encryption](https://www.weave.works/docs/net/latest/tasks/manage/security-untrusted-networks/) in Weave; therefore, network traffic in the cluster will be sent unencrypted. | 24 May 2022 | [Rancher v2.6.5](https://github.com/rancher/rancher/releases/tag/v2.6.5) and [Rancher v2.5.14](https://github.com/rancher/rancher/releases/tag/v2.5.14) | -| [CVE-2021-36784](https://github.com/rancher/rancher/security/advisories/GHSA-jwvr-vv7p-gpwq) | A vulnerability was discovered in Rancher versions from 2.5.0 up to and including 2.5.12 and from 2.6.0 up to and including 2.6.3 which allows users who have create or update permissions on [Global Roles](https://rancher.com/docs/rancher/v2.6/en/admin-settings/rbac/) to escalate their permissions, or those of another user, to admin-level permissions. Global Roles grant users Rancher-wide permissions, such as the ability to create clusters. In the identified versions of Rancher, when users are given permission to edit or create Global Roles, they are not restricted to only granting permissions which they already posses. This vulnerability affects customers who utilize non-admin users that are able to create or edit Global Roles. The most common use case for this scenario is the `restricted-admin` role. | 14 Apr 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) and [Rancher v2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | +| [CVE-2022-21951](https://github.com/rancher/rancher/security/advisories/GHSA-vrph-m5jj-c46c) | This vulnerability only affects customers using [Weave](../../faq/container-network-interface-providers.md#weave) Container Network Interface (CNI) when configured through [RKE templates](../../pages-for-subheaders/about-rke1-templates.md). A vulnerability was discovered in Rancher versions 2.5.0 up to and including 2.5.13, and 2.6.0 up to and including 2.6.4, where a user interface (UI) issue with RKE templates does not include a value for the Weave password when Weave is chosen as the CNI. If a cluster is created based on the mentioned template, and Weave is configured as the CNI, no password will be created for [network encryption](https://www.weave.works/docs/net/latest/tasks/manage/security-untrusted-networks/) in Weave; therefore, network traffic in the cluster will be sent unencrypted. | 24 May 2022 | [Rancher v2.6.5](https://github.com/rancher/rancher/releases/tag/v2.6.5) and [Rancher v2.5.14](https://github.com/rancher/rancher/releases/tag/v2.5.14) | +| [CVE-2021-36784](https://github.com/rancher/rancher/security/advisories/GHSA-jwvr-vv7p-gpwq) | A vulnerability was discovered in Rancher versions from 2.5.0 up to and including 2.5.12 and from 2.6.0 up to and including 2.6.3 which allows users who have create or update permissions on [Global Roles](../../pages-for-subheaders/manage-role-based-access-control-rbac.md) to escalate their permissions, or those of another user, to admin-level permissions. Global Roles grant users Rancher-wide permissions, such as the ability to create clusters. In the identified versions of Rancher, when users are given permission to edit or create Global Roles, they are not restricted to only granting permissions which they already posses. This vulnerability affects customers who utilize non-admin users that are able to create or edit Global Roles. The most common use case for this scenario is the `restricted-admin` role. | 14 Apr 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) and [Rancher v2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | | [CVE-2021-4200](https://github.com/rancher/rancher/security/advisories/GHSA-hx8w-ghh8-r4xf) | This vulnerability only affects customers using the `restricted-admin` role in Rancher. A vulnerability was discovered in Rancher versions from 2.5.0 up to and including 2.5.12 and from 2.6.0 up to and including 2.6.3 where the `global-data` role in `cattle-global-data` namespace grants write access to the Catalogs. Since each user with any level of catalog access was bound to the `global-data` role, this grants write access to templates (`CatalogTemplates`) and template versions (`CatalogTemplateVersions`) for any user with any level of catalog access. New users created in Rancher are by default assigned to the `user` role (standard user), which is not designed to grant write catalog access. This vulnerability effectively elevates the privilege of any user to write access for the catalog template and catalog template version resources. | 14 Apr 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) and [Rancher v2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | -| [GHSA-wm2r-rp98-8pmh](https://github.com/rancher/rancher/security/advisories/GHSA-wm2r-rp98-8pmh) | This vulnerability only affects customers using [Fleet](https://rancher.com/docs/rancher/v2.6/en/deploy-across-clusters/fleet/) for continuous delivery with authenticated Git and/or Helm repositories. An issue was discovered in `go-getter` library in versions prior to [`v1.5.11`](https://github.com/hashicorp/go-getter/releases/tag/v1.5.11) that exposes SSH private keys in base64 format due to a failure in redacting such information from error messages. The vulnerable version of this library is used in Rancher through Fleet in versions of Fleet prior to [`v0.3.9`](https://github.com/rancher/fleet/releases/tag/v0.3.9). This issue affects Rancher versions 2.5.0 up to and including 2.5.12 and from 2.6.0 up to and including 2.6.3. The issue was found and reported by Dagan Henderson from Raft Engineering. | 14 Apr 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) and [Rancher v2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | +| [GHSA-wm2r-rp98-8pmh](https://github.com/rancher/rancher/security/advisories/GHSA-wm2r-rp98-8pmh) | This vulnerability only affects customers using [Fleet](../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md) for continuous delivery with authenticated Git and/or Helm repositories. An issue was discovered in `go-getter` library in versions prior to [`v1.5.11`](https://github.com/hashicorp/go-getter/releases/tag/v1.5.11) that exposes SSH private keys in base64 format due to a failure in redacting such information from error messages. The vulnerable version of this library is used in Rancher through Fleet in versions of Fleet prior to [`v0.3.9`](https://github.com/rancher/fleet/releases/tag/v0.3.9). This issue affects Rancher versions 2.5.0 up to and including 2.5.12 and from 2.6.0 up to and including 2.6.3. The issue was found and reported by Dagan Henderson from Raft Engineering. | 14 Apr 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) and [Rancher v2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | | [CVE-2021-36778](https://github.com/rancher/rancher/security/advisories/GHSA-4fc7-hc63-7fjg) | A vulnerability was discovered in Rancher versions from 2.5.0 up to and including 2.5.11 and from 2.6.0 up to and including 2.6.2, where an insufficient check of the same-origin policy when downloading Helm charts from a configured private repository can lead to exposure of the repository credentials to a third-party provider. This issue only happens when the user configures access credentials to a private repository in Rancher inside `Apps & Marketplace > Repositories`. The issue was found and reported by Martin Andreas Ullrich. | 14 Apr 2022 | [Rancher v2.6.3](https://github.com/rancher/rancher/releases/tag/v2.6.3) and [Rancher v2.5.12](https://github.com/rancher/rancher/releases/tag/v2.5.12) | | [GHSA-hwm2-4ph6-w6m5](https://github.com/rancher/rancher/security/advisories/GHSA-hwm2-4ph6-w6m5) | A vulnerability was discovered in versions of Rancher starting 2.0 up to and including 2.6.3. The `restricted` pod security policy (PSP) provided in Rancher deviated from the upstream `restricted` policy provided in Kubernetes on account of which Rancher's PSP had `runAsUser` set to `runAsAny`, while upstream had `runAsUser` set to `MustRunAsNonRoot`. This allowed containers to run as any user, including a privileged user (`root`), even when Rancher's `restricted` policy was enforced on a project or at the cluster level. | 31 Mar 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) | | [CVE-2021-36775](https://github.com/rancher/rancher/security/advisories/GHSA-28g7-896h-695v) | A vulnerability was discovered in Rancher versions up to and including 2.4.17, 2.5.11 and 2.6.2. After removing a `Project Role` associated with a group from the project, the bindings that granted access to cluster-scoped resources for those subjects were not deleted. This was due to an incomplete authorization logic check. A user who was a member of the affected group with authenticated access to Rancher could exploit this vulnerability to access resources they shouldn't have had access to. The exposure level would depend on the original permission level granted to the affected project role. This vulnerability only affected customers using group based authentication in Rancher. | 31 Mar 2022 | [Rancher v2.6.3](https://github.com/rancher/rancher/releases/tag/v2.6.3), [Rancher v2.5.12](https://github.com/rancher/rancher/releases/tag/v2.5.12) and [Rancher v2.4.18](https://github.com/rancher/rancher/releases/tag/v2.4.18) | From 3b43125dba6a96f1e57792afe852f89fee824435 Mon Sep 17 00:00:00 2001 From: vickyhella Date: Fri, 1 Sep 2023 15:28:24 +0800 Subject: [PATCH 32/54] Update Chinese docs --- .../current/contribute-to-rancher.md | 26 +- .../rollbacks.md | 2 +- ...de-kubernetes-without-upgrading-rancher.md | 5 +- .../install-rancher-cis-benchmark.md | 2 +- .../enable-prometheus-federator.md | 4 +- .../set-up-monitoring-for-workloads.md | 2 +- ...up-rancher-launched-kubernetes-clusters.md | 12 +- .../restore-rancher.md | 2 +- .../helm-charts-in-rancher/create-apps.md | 4 - .../configure-out-of-tree-vsphere.md | 2 +- .../migrate-from-in-tree-to-out-of-tree.md | 3 +- .../kubernetes-and-docker-registries.md | 1 + .../ingress-configuration.md | 2 +- .../layer-4-and-layer-7-load-balancing.md | 1 + ...rovision-kubernetes-clusters-in-vsphere.md | 4 +- .../authorized-cluster-endpoint.md | 13 +- .../use-kubectl-and-kubeconfig.md | 7 +- .../manage-clusters/nodes-and-node-pools.md | 9 +- .../built-in-dashboards.md | 6 +- .../pages-for-subheaders/access-clusters.md | 42 +- ...on-permissions-and-global-configuration.md | 34 +- .../create-kubernetes-persistent-storage.md | 1 + .../deploy-apps-across-clusters.md | 1 + .../enable-experimental-features.md | 1 + .../monitoring-and-alerting.md | 9 +- .../quick-start-guides.md | 1 + .../pages-for-subheaders/rancher-security.md | 3 +- .../current/pages-for-subheaders/vsphere.md | 1 + .../backup-configuration.md | 6 + .../monitoring-best-practices.md | 2 +- .../cli-with-rancher/rancher-cli.md | 2 +- .../gke-private-clusters.md | 2 +- .../rke2-cluster-configuration.md | 2 +- ...ssessment-guide-with-cis-v1.7-k8s-v1.25.md | 3148 ++++++++++++++++ ...ssessment-guide-with-cis-v1.7-k8s-v1.25.md | 3085 ++++++++++++++++ ...ssessment-guide-with-cis-v1.7-k8s-v1.25.md | 3196 +++++++++++++++++ .../security-advisories-and-cves.md | 6 +- .../version-2.6/contribute-to-rancher.md | 26 +- .../configure-out-of-tree-vsphere.md | 2 +- .../migrate-from-in-tree-to-out-of-tree.md | 3 +- .../kubernetes-and-docker-registries.md | 1 + .../ingress-configuration.md | 3 +- .../layer-4-and-layer-7-load-balancing.md | 1 + .../create-kubernetes-persistent-storage.md | 1 + .../deploy-apps-across-clusters.md | 1 + .../enable-experimental-features.md | 1 + .../quick-start-guides.md | 1 + .../pages-for-subheaders/vsphere.md | 1 + .../version-2.7/contribute-to-rancher.md | 26 +- .../install-rancher-cis-benchmark.md | 2 +- .../configure-out-of-tree-vsphere.md | 2 +- .../migrate-from-in-tree-to-out-of-tree.md | 3 +- .../kubernetes-and-docker-registries.md | 1 + .../ingress-configuration.md | 3 +- .../layer-4-and-layer-7-load-balancing.md | 1 + .../create-kubernetes-persistent-storage.md | 1 + .../deploy-apps-across-clusters.md | 1 + .../enable-experimental-features.md | 1 + .../quick-start-guides.md | 1 + .../pages-for-subheaders/rancher-security.md | 3 +- .../pages-for-subheaders/vsphere.md | 1 + .../cli-with-rancher/rancher-cli.md | 2 +- .../node-template-configuration/vsphere.md | 4 +- 63 files changed, 9643 insertions(+), 99 deletions(-) create mode 100644 i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md create mode 100644 i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md create mode 100644 i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/contribute-to-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/current/contribute-to-rancher.md index f63c6a38e301..baff7b735b9f 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/contribute-to-rancher.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/contribute-to-rancher.md @@ -2,7 +2,7 @@ title: 参与 Rancher 社区贡献 --- -本节介绍 Rancher 使用的仓库、如何构建仓库以及提交 issue 时要包含的信息。 +本文介绍了 Rancher 仓库和 Rancher 文档、如何构建 Rancher 仓库以及提交 issue 时要包含哪些信息。 有关如何为 Rancher 项目开发做出贡献的更多详细信息,请参阅 [Rancher Developer Wiki](https://github.com/rancher/rancher/wiki)。Wiki 包含以下主题的资源: @@ -14,7 +14,15 @@ title: 参与 Rancher 社区贡献 在 Rancher Users Slack 上,开发者的频道是 **#developer**。 -## 仓库 +## Rancher 文档 + +如果你对此网站上的文档有建议,请在主 [Rancher 文档](https://github.com/rancher/rancher-docs)仓库中[提交 issue](https://github.com/rancher/rancher-docs/issues/new/choose)。此仓库包含 Rancher v2.0 及更高版本的文档。 + +有关贡献和构建 Rancher v2.x 文档仓库的更多详细信息,请参阅 [Rancher 文档 README](https://github.com/rancher/rancher-docs#readme)。 + +有关 Rancher v1.6 及更早版本的文档,请参阅 [Rancher 1.x docs](https://github.com/rancher/rancher.github.io) 仓库,其中包含 https://rancher.com/docs/rancher/v1.6/en/ 的源文件。 + +## Rancher 仓库 所有仓库都位于我们的主要 GitHub 组织内。Rancher 使用了很多仓库,以下是部分主要仓库的描述: @@ -38,19 +46,19 @@ title: 参与 Rancher 社区贡献 ![Rancher diagram](/img/ranchercomponentsdiagram-2.6.svg)
用于配置/管理 Kubernetes 集群的 Rancher 组件。 -## 构建 +### 构建 Rancher 仓库 每个仓库都应该有一个 Makefile,并且可以使用 `make` 命令进行构建。`make` 目标基于仓库中 `/scripts` 目录中的脚本,每个目标都使用 [Dapper](https://github.com/rancher/dapper) 在孤立的环境中运行。`Dockerfile.dapper` 将用于此操作,它包含了所需的所有构建工具。 默认目标是 `ci`,它将运行 `./scripts/validate`、`./scripts/build`、`./scripts/test ` 和 `./scripts/package`。生成的二进制文件将在 `./build/bin` 中,通常也打包在 Docker 镜像中。 -## Bug、Issue 和疑问 +### Rancher Bug、Issue 或疑问 如果你发现任何 bug 或问题,由于有人可能遇到过同样的问题,或者我们已经正在寻找解决方案,因此请先在[已报告 issue](https://github.com/rancher/rancher/issues) 中搜索。 如果找不到与你的问题相关的内容,请通过[提出 issue](https://github.com/rancher/rancher/issues/new) 与我们联系。与 Rancher 相关的仓库有很多,但请将 issue 提交到 Rancher 仓库中,这样能确保我们能看到这些 issue。如果你想就一个用例提出问题或询问其他用户,你可以在 [Rancher 论坛](https://forums.rancher.com)上发帖。 -### 提交 Issue 的检查清单 +#### 提交 Issue 的检查清单 提交问题时请遵循此清单,以便我们调查和解决问题。如果你能提供更多信息,我们就可以使用更多数据来确定导致问题的原因或发现更多相关的内容。 @@ -126,11 +134,3 @@ title: 参与 Rancher 社区贡献 - Docker Daemon 日志记录(可能并不全部存在,取决于操作系统) - `/var/log/docker.log` - **指标**:如果你遇到性能问题,请提供尽可能多的指标数据(文件或屏幕截图)来帮助我们确定问题。如果你遇到主机相关的问题,你可以提供 `top`、`free -m`、`df` 的输出,这些输出会显示进程/内存/磁盘的使用情况。 - -## 文档 - -如果你对我们的文档有修改意见,请在我们的文档仓库中提交 PR。 - -- [Rancher 2.x 文档仓库](https://github.com/rancher/docs):Rancher 2.x 所有文档都在这个仓库中。具体位于仓库的 `content` 文件夹中。 - -- [Rancher 1.x 文档仓库](https://github.com/rancher/rancher.github.io):Rancher 1.x 所有文档都在这个仓库中。具体位于仓库的 `rancher` 文件夹中。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md index 5570d3c47536..c306729f752d 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md @@ -22,7 +22,7 @@ Rancher v2.6.4 将 cluster-api 模块从 v0.4.4 升级到 v1.0.2。反过来,c ### 从 v2.6.4+ 回滚到较低版本的 v2.6.x 1. 按照[说明](https://github.com/rancher/rancher-cleanup/blob/main/README.md)运行脚本。 -1. 按照[说明](https://rancher.com/docs/rancher/v2.6/en/backups/migrating-rancher/)在现有集群上安装 rancher-backup Helm Chart 并恢复之前的状态。 +1. 按照[说明](../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md)在现有集群上安装 rancher-backup Helm Chart 并恢复之前的状态。 1. 省略步骤 3。 1. 执行到步骤 4 时,在要回滚到的 local 集群上安装 Rancher 2.6.x 版本。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md index d8021a77f4fd..2d9c0f009183 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md @@ -80,7 +80,10 @@ Rancher Server 会定期刷新 `rke-metadata-config` 来下载新的 Kubernetes 在将新的 Kubernetes 版本加载到 Rancher Server 中之后,需要执行其他步骤才能使用它们启动集群。Rancher 需要访问更新的系统镜像。虽然只有管理员可以更改元数据设置,但任何用户都可以下载 Rancher 系统镜像并为镜像准备私有容器镜像仓库。 -1. 要把系统镜像下载到私有镜像仓库,请单击 Rancher UI 左下角的 Rancher Server 版本。 +要下载私有镜像仓库的系统镜像: + +1. 点击左上角的 **☰**。 +1. 点击左侧导航底部的**简介**。 1. 下载适用于 Linux 或 Windows 操作系统的镜像。 1. 下载 `rancher-images.txt`。 1. 使用[离线环境安装](other-installation-methods/air-gapped-helm-cli-install/publish-images.md)时使用的步骤准备私有镜像仓库,但不要使用发布页面中的 `rancher-images.txt`,而是使用上一个步骤中获取的文件。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md index 541453845e10..814d38c5349e 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md @@ -12,6 +12,6 @@ title: 安装 Rancher CIS Benchmark :::note -CIS Benchmark 4.0.0 及更高版本默认禁用 PSP。要在加固集群上安装 CIS Benchmark,在安装 Chart 之前将 values 中的 `global.psp.enabled` 设置为 `true`。 +如果你使用 Kubernetes v1.24 或更早版本,并且具有使用 [Pod 安全策略](../../new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md) (PSP) 加固的集群,则 CIS Benchmark 4.0.0 及更高版本会默认禁用 PSP。要在 PSP 加固集群上安装 CIS Benchmark,请在安装 Chart 之前将 values 中的 `global.psp.enabled` 设置为 `true`。[Pod 安全准入](../../new-user-guides/authentication-permissions-and-global-configuration/pod-security-standards.md) (PSA) 加固集群不受影响。 ::: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md index bf3b78cac723..c6bac8399718 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md @@ -4,7 +4,7 @@ title: 启用 Prometheus Federator ## 要求 -默认情况下,Prometheus Federator 已配置并旨在与 [rancher-monitoring](https://rancher.com/docs/rancher/v2.6/en/monitoring-alerting/) 一起部署。rancher-monitoring 同时部署了 Prometheus Operator 和 Cluster Prometheus,每个项目监控堆栈(Project Monitoring Stack)默认会联合命名空间范围的指标。 +默认情况下,Prometheus Federator 已配置并旨在与 [rancher-monitoring](../../../../pages-for-subheaders/monitoring-and-alerting.md) 一起部署。rancher-monitoring 同时部署了 Prometheus Operator 和 Cluster Prometheus,每个项目监控堆栈(Project Monitoring Stack)默认会联合命名空间范围的指标。 有关安装 rancher-monitoring 的说明,请参阅[此页面](../enable-monitoring.md)。 @@ -75,7 +75,7 @@ matchLabels: 1. 单击 **Prometheus Federator** Chart。 1. 单击**安装**。 1. 在**元数据**页面,点击**下一步**。 -1. 在**项目 Release 命名空间项目 ID** 字段中,`System 项目`是默认值,但你可以使用具有类似[有限访问权限](#确保-cattle-monitoring-system-命名空间位于-system-项目中或者位于一个锁定并能访问集群中其他项目的项目中)的另一个项目覆盖它。你可以在 local 上游集群中运行以下命令来找到项目 ID: +1. 在**命名空间** > **项目 Release 命名空间项目 ID** 字段中,`System 项目`是默认值,但你可以使用具有类似[有限访问权限](#确保-cattle-monitoring-system-命名空间位于-system-项目中或者位于一个锁定并能访问集群中其他项目的项目中)的另一个项目覆盖它。你可以在 local 上游集群中运行以下命令来找到项目 ID: ```plain kubectl get projects -A -o custom-columns="NAMESPACE":.metadata.namespace,"ID":.metadata.name,"NAME":.spec.displayName diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md index 9b8fd1582c83..7ab5dd36f798 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md @@ -8,7 +8,7 @@ title: 为工作负载设置 Monitoring 如果你只需要工作负载的 CPU 和内存时间序列,则不需要部署 ServiceMonitor 或 PodMonitor,因为 Monitoring 应用默认会收集资源使用情况的指标数据。资源使用的时间序列数据在 Prometheus 的本地时间序列数据库中。 -Grafana 显示聚合数据,你也可以使用 PromQL 查询来查看单个工作负载的数据。进行 PromQL 查询后,你可以在 Prometheus UI 中单独执行查询并查看可视化的时间序列,你也可以使用查询来自定义显示工作负载指标的 Grafana 仪表板。有关工作负载指标的 PromQL 查询示例,请参阅[本节](https://rancher.com/docs/rancher/v2.6/en/monitoring-alerting/expression/#workload-metrics)。 +Grafana 显示聚合数据,你也可以使用 PromQL 查询来查看单个工作负载的数据。进行 PromQL 查询后,你可以在 Prometheus UI 中单独执行查询并查看可视化的时间序列,你也可以使用查询来自定义显示工作负载指标的 Grafana 仪表板。有关工作负载指标的 PromQL 查询示例,请参阅[本节](../../../integrations-in-rancher/monitoring-and-alerting/promql-expressions.md#工作负载指标)。 要为你的工作负载设置自定义指标,你需要设置一个 Exporter 并创建一个新的 ServiceMonitor 自定义资源,从而将 Prometheus 配置为从 Exporter 中抓取指标。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md index 11b0dc2a9fb4..3ba17f6ed643 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md @@ -256,11 +256,19 @@ Rancher 支持两种不同的备份目标: ### S3 备份目标 -`S3` 备份目标允许用户配置与 S3 兼容的后端来存储快照。此选项的主要好处是,如果集群丢失了所有 etcd 节点,由于快照存储在外部,集群仍然可以恢复。Rancher 推荐使用 `S3` 备份这类外部目标。但是它的配置要求进行额外的操作,这也应该被考虑在其中。由于 Rancher 会为群集配置的 S3 存储桶/文件夹中列出的任何可用快照填充快照信息,因此建议你确保每个集群都具有唯一的存储桶或文件夹。 +我们建议你使用 `S3` 备份目标。你可以将快照存储在外部 S3 兼容的后端上。由于快照不存储在本地,因此即使丢失所有 etcd 节点,你仍然可以还原集群。 + +虽然 `S3` 比本地备份具有优势,但它需要额外的配置。 + +:::caution + +如果你使用 S3 备份目标,请确保每个集群都有自己的存储桶或文件夹。Rancher 将使用集群配置的 S3 存储桶或文件夹中的可用快照来填充快照信息。 + +::: | 选项 | 描述 | 必填 | |---|---|---| -| S3 存储桶名称 | 将存储备份的 S3 存储桶名称 | * | +| S3 存储桶名称 | 用于存储备份的 S3 存储桶名称 | * | | S3 区域 | 备份存储桶的 S3 区域 | | | S3 区域端点 | 备份存储桶的 S3 区域端点 | * | | S3 访问密钥 | 有权访问备份存储桶的 S3 访问密钥 | * | diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md index ea3dafca61e1..11760970b23b 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md @@ -32,7 +32,7 @@ Rancher v2.6.4 将 cluster-api 模块从 v0.4.4 升级到 v1.0.2。反过来,c ### 从 v2.6.4+ 回滚到较低版本的 v2.6.x 1. 按照[说明](https://github.com/rancher/rancher-cleanup/blob/main/README.md)运行脚本。 -1. 按照[说明](https://rancher.com/docs/rancher/v2.6/en/backups/migrating-rancher/)在现有集群上安装 rancher-backup Helm Chart 并恢复之前的状态。 +1. 按照[说明](../backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md)在现有集群上安装 rancher-backup Helm Chart 并恢复之前的状态。 1. 省略步骤 3。 1. 执行到步骤 4 时,在要回滚到的 local 集群上安装 Rancher 2.6.x 版本。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/helm-charts-in-rancher/create-apps.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/helm-charts-in-rancher/create-apps.md index 554725a2e8ed..b6605e519659 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/helm-charts-in-rancher/create-apps.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/helm-charts-in-rancher/create-apps.md @@ -2,16 +2,12 @@ title: 创建应用 --- -Rancher 的应用市场基于 Helm 仓库和 Helm Chart。你可以添加基于 HTTP 的标准 Helm 仓库以及任何包含 Chart 的 Git 仓库。 - :::tip 有关开发 Chart 的完整演示,请参阅 Helm 官方文档中的 [Chart 模板开发者指南](https://helm.sh/docs/chart_template_guide/)。 ::: - - ## Chart 类型 Rancher 支持两种不同类型的 Chart,分别是 Helm Chart 和 Rancher Chart。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md index b1f584639a09..f0461eb27b58 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md @@ -4,7 +4,7 @@ title: 配置树外 vSphere 云提供商 Kubernetes 正在逐渐不在树内维护云提供商。vSphere 有一个树外云提供商,可通过安装 vSphere 云提供商和云存储插件来使用。 -本页介绍如何在启动集群后安装 Cloud Provider Interface (CPI) 和 Cloud Storage Interface (CSI) 插件。 +本文介绍了如何在设置集群后安装 Cloud Provider Interface (CPI) 和 Cloud Storage Interface (CSI) 插件。 ## 先决条件 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md index 6d0b408f1dd0..065ff2deb842 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md @@ -1,6 +1,7 @@ --- -title: 将 vSphere 树内卷迁移到 CSI +title: 将 vSphere 树内卷迁移到树外 --- + Kubernetes 正在逐渐不在树内维护云提供商。vSphere 有一个树外云提供商,可通过安装 vSphere 云提供商和云存储插件来使用。 本页介绍如何从树内 vSphere 云提供商迁移到树外,以及如何在迁移后管理现有虚拟机。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md index 9d59762ad3be..6231ec1697a5 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md @@ -2,6 +2,7 @@ title: Kubernetes 镜像仓库和容器镜像仓库 description: 了解容器镜像仓库和 Kubernetes 镜像仓库、它们的用例以及如何在 Rancher UI 中使用私有镜像仓库 --- + 镜像仓库是 Kubernetes 密文(Secret),包含用于向[私有容器镜像仓库](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/)进行身份验证的凭证。 “Registry” 这个词可能有两种意思,可指代容器或 Kubernetes 镜像仓库: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/ingress-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/ingress-configuration.md index a7e41c4488f5..3ff047bc108f 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/ingress-configuration.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/ingress-configuration.md @@ -7,7 +7,7 @@ description: Ingress 配置 在 Kubernetes v1.21 及更高版本中,NGINX Ingress Controller 不再默认运行在 hostNetwork 中。它改为将 hostPorts 用于端口 80 和端口 443,因此你可以将准入 Webhook 配置为只能通过 ClusterIP 访问。这确保了只能从集群内部访问 webhook。 -由于 controller 的这一更改,默认不再将 `hostNetwork` 设置为 `true`。但是,你必须将 `hostNetwork` 设置为 `true` 才能使基于 TCP 和 UDP 的 Service 正常工作。 +由于 controller 的这一更改,默认 RKE1 配置不再将 `hostNetwork` 设置为 `true`。但是,你必须将 `hostNetwork` 设置为 `true` 才能使基于 TCP 和 UDP 的 Service 正常工作。为此,请[编辑](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#使用-yaml-编辑集群)集群的 YAML 并按照[官方 RKE1 文档](https://rke.docs.rancher.com/config-options/add-ons/ingress-controllers#configuring-network-options)中的步骤操作。 ::: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md index 541a5f0f0a3a..c13f8dbf75e6 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md @@ -2,6 +2,7 @@ title: "四层和七层负载均衡" description: "Kubernetes 支持四层负载均衡和七层负载均衡。了解对不同 deployment 的支持" --- + Kubernetes 支持四层负载均衡和七层负载均衡。 ## 四层负载均衡器 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md index 25e4cf7e0870..806090bff7c1 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md @@ -48,8 +48,6 @@ title: 在 vSphere 中配置 Kubernetes 集群 ## 创建 vSphere 集群 -在 Rancher 中创建 vSphere 集群的操作取决于 Rancher 的版本。 - 1. [创建云凭证](#1-创建云凭证) 2. [使用云凭证创建节点模板](#2-使用云凭证创建节点模板) 3. [使用节点模板创建具有节点池的集群](#3-使用节点模板创建具有节点池的集群) @@ -109,4 +107,4 @@ title: 在 vSphere 中配置 Kubernetes 集群 - **通过 kubectl CLI 访问你的集群**:按照[这些步骤](../../../../new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#在工作站使用-kubectl-访问集群)在你的工作站上使用 kubectl 访问集群。在这种情况下,你将通过 Rancher Server 的身份验证代理进行身份验证,然后 Rancher 会让你连接到下游集群。此方法允许你在没有 Rancher UI 的情况下管理集群。 - **通过 kubectl CLI 使用授权的集群端点访问你的集群**:按照[这些步骤](../../../../new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#直接使用下游集群进行身份验证)直接使用 kubectl 访问集群,而无需通过 Rancher 进行身份验证。我们建议设置此替代方法来访问集群,以便在无法连接到 Rancher 时访问集群。 -- **配置存储**:有关如何使用 Rancher 在 vSphere 中配置存储的示例,请参阅[本节](../../../../../pages-for-subheaders/provisioning-storage-examples.md)。要在 vSphere 中动态配置存储,你必须启用 vSphere 云提供商。有关更多信息,请参阅[树内 vSphere cloud provider 文档](../../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-in-tree-vsphere.md)和[树外 vSphere cloud provider 文档](../../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md)。 \ No newline at end of file +- **配置存储**:有关如何使用 Rancher 在 vSphere 中配置存储的示例,请参阅[本节](../../../../../pages-for-subheaders/provisioning-storage-examples.md)。要在 vSphere 中动态配置存储,你必须启用 vSphere 云提供商。有关更多信息,请参阅[树内 vSphere cloud provider 文档](../../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-in-tree-vsphere.md)和[树外 vSphere cloud provider 文档](../../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md index dee43a454589..364a4012eee0 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md @@ -4,11 +4,18 @@ title: 授权集群端点的工作原理 本文介绍 kubectl CLI、kubeconfig 文件和授权集群端点如何协同工作,使你可以直接访问下游 Kubernetes 集群,而无需通过 Rancher Server 进行身份验证。本文旨在为[设置 kubectl 以直接访问集群的说明](use-kubectl-and-kubeconfig.md#直接使用下游集群进行身份验证)提供背景信息和上下文。 -### kubeconfig 文件说明 +### Kubeconfig 文件说明 -_kubeconfig 文件_ 是与 kubectl 命令行工具(或其他客户端)结合使用时用于配置 Kubernetes 访问的文件。 +kubeconfig 文件是与 kubectl 命令行工具(或其他客户端)结合使用时用于配置 Kubernetes 访问的文件。 -此 kubeconfig 文件及其内容特定于你正在查看的集群。你可以从 Rancher 的**集群**视图中下载该文件。在 Rancher 中可以访问的每个集群都需要一个单独的 kubeconfig 文件。 +kubeconfig 文件及其内容特定于各个集群。你可以从 Rancher 的**集群**页面进行下载: + +1. 点击左上角的 **☰**。 +1. 选择**集群管理**。 +1. 找到要下载其 kubeconfig 的集群,然后选择行末尾的 **⁝**。 +1. 从子菜单中选择**下载 KubeConfig**。 + +在 Rancher 中可以访问的每个集群都需要一个单独的 kubeconfig 文件。 下载 kubeconfig 文件后,你将能够使用 kubeconfig 文件及其 Kubernetes [上下文](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#kubectl-context-and-configuration)访问下游集群。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md index c28853ad3e4e..ac6a4a338fb6 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md @@ -27,9 +27,10 @@ description: "了解如何通过 kubectl Shell 使用 kubectl,或通过 kubect ::: -1. 登录到 Rancher。点击 **☰ > 集群管理**。 -1. 转到要使用 kubectl 访问的集群,然后单击 **Explore**。 -1. 在顶部导航栏中,单击**下载 KubeConfig** 按钮。 +1. 点击左上角的 **☰**。 +1. 选择**集群管理**。 +1. 找到要下载其 kubeconfig 的集群,然后选择行末尾的 **⁝**。 +1. 从子菜单中选择**下载 KubeConfig**。 1. 将 YAML 文件保存在本地计算机上。将文件移动到 `~/.kube/config`。注意:kubectl 用于 kubeconfig 文件的默认位置是 `~/.kube/config`。但是你也可以运行类似以下的命令,使用 `--kubeconfig` 标志指定任何其他目录: ``` kubectl --kubeconfig /custom/path/kube.config get pods diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md index 2174397f7cec..a942534d1c68 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md @@ -2,7 +2,14 @@ title: 节点和节点池 --- -在 Rancher 中启动 Kubernetes 集群后,你可以从集群的**节点**选项卡管理各个节点。不同的配置集群[选项](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md)对应不同的可用节点选项。 +在 Rancher 中启动 Kubernetes 集群后,你可以从集群的**节点**选项卡管理各个节点。 + +1. 点击左上角的 **☰**。 +1. 选择**集群管理**。 +1. 找到要管理其节点的集群,然后单击行末尾的**浏览**按钮。 +1. 从左侧导航中选择**节点**。 + +不同的集群配置[选项](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md)对应不同的可用节点选项。 :::note diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md index 0161de45166e..5dd50402e171 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md @@ -110,4 +110,8 @@ Alertmanager UI 可让你查看最近触发的告警。 ![PrometheusRules UI](/img/prometheus-rules-ui.png) -有关在 Rancher 中配置 PrometheusRule 的更多信息,请参阅[此页面](../../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md)。 \ No newline at end of file +有关在 Rancher 中配置 PrometheusRule 的更多信息,请参阅[此页面](../../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md)。 + +## 旧版 UI + +有关在引入 `rancher-monitoring` 应用程序之前 Rancher v2.2 到 v2.4 中可用仪表板的信息,请参阅 [Rancher v2.0—v2.4 文档](/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/viewing-metrics.md)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/access-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/access-clusters.md index 48e93b6f4472..4860f8a2a830 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/access-clusters.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/access-clusters.md @@ -10,22 +10,52 @@ title: 集群访问 有关如何设置身份验证系统的信息,请参阅[本节](authentication-config.md)。 +## Rancher UI 中的集群 -### Rancher UI +我们提供了多种通过 Rancher UI 查看和管理集群的方法。 -Rancher 提供了一个直观的用户界面来让你与集群进行交互。UI 中所有可用的选项都使用 Rancher API。因此,UI 中的任何操作都可以在 Rancher CLI 或 Rancher API 中进行。 +### 集群页面 -### kubectl +从 **☰** 菜单访问**集群**页面: + +1. 单击 **☰**。 +1. 选择**集群管理**。 + +你还可以通过单击 Rancher UI **主页**集群表格上方的**管理**按钮来访问**集群**页面。 + +在**集群**页面上,选择每行末尾的 **⁝** 以查看包含以下选项的子菜单: + +* [Kubectl Shell](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) +* 下载 KubeConfig +* 将 KubeConfig 复制到剪切板 +* 编辑配置 +* 查看 YAML +* 下载 YAML + +### 集群仪表板 + +在**集群**页面上,选择每行末尾的**浏览**按钮查看该集群的**集群仪表板**。你还可以通过单击表中集群的名称,然后单击**集群**页面上的**浏览**按钮来查看仪表板。 + +也可以通过单击集群名称从 Rancher UI **主页**访问**集群仪表板**。 + +你还可以从顶部导航栏中的 **☰** 访问**集群仪表板**: + +1. 单击 **☰**。 +1. 从**浏览集群**菜单中选择集群的名称。 + +**集群仪表板**列出了集群相关的信息,例如节点数量、内存使用情况、事件和资源。 + +## kubectl 你可以使用 Kubernetes 命令行工具 [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) 来管理你的集群。使用 kubectl 有两种选择: - **Rancher kubectl shell**:通过启动 Rancher UI 中可用的 kubectl shell 与集群交互。此选项不需要你进行任何配置操作。有关详细信息,请参阅[使用 kubectl Shell 访问集群](../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md)。 - **终端远程连接**:你也可以通过在本地桌面上安装 [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/),然后将集群的 kubeconfig 文件复制到本地 `~/.kube/config` 目录来与集群交互。有关更多信息,请参阅[使用 kubectl 和 kubeconfig 文件访问集群](../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md)。 -### Rancher CLI +## Rancher CLI 你可以下载 Rancher 自己的命令行工具 [Rancher CLI](cli-with-rancher.md) 来控制你的集群。这个 CLI 工具可以直接与不同的集群和项目进行交互,或者向它们传递 `kubectl` 命令。 -### Rancher API +## Rancher API -最后,你可以通过 Rancher API 与集群进行交互。在使用 API 之前,你必须先获取 [API 密钥](../reference-guides/user-settings/api-keys.md)。要查看 API 对象的不同资源字段和操作,请打开 API UI(API UI 可以通过单击 Rancher UI 对象的**在 API 中查看**访问)。 \ No newline at end of file +最后,你可以通过 Rancher API 与集群进行交互。在使用 API 之前,你必须先获取 [API 密钥](../reference-guides/user-settings/api-keys.md)。要查看 API 对象的不同资源字段和操作,请打开 API UI(API UI 可以通过单击 Rancher UI 对象的**在 API 中查看**访问)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/authentication-permissions-and-global-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/authentication-permissions-and-global-configuration.md index 8d6758e8dcbb..ff9a4f49504f 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/authentication-permissions-and-global-configuration.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/authentication-permissions-and-global-configuration.md @@ -1,5 +1,5 @@ --- -title: 身份验证、权限和全局配置 +title: 身份验证、权限和全局设置 --- 安装完成后,[系统管理员](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md)需要配置 Rancher 来配置身份验证,安全,默认设定,安全策略,驱动和全局 DNS 条目。 @@ -48,6 +48,34 @@ Rancher 用于配置 [RKE 集群](launch-kubernetes-with-rancher.md)的 Kubernet 有关元数据如何工作以及如何配置元数据,请参见 [Rancher Kubernetes 元数据](../getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md)。 -## 启用实验功能 +## 全局设置 -Rancher 包含一些默认关闭的实验功能。我们引入了功能开关,让你试用这些新功能。详情请参见[功能开关](enable-experimental-features.md)的章节。 +顶部导航栏中提供了控制全局级别 Rancher 设置的选项。 + +点击左上角的 **☰**,然后选择**全局设置**来查看并进行配置: + +- **设置**:各种 Rancher 默认值,例如用户密码的最小长度 (`password-min-length`)。需要小心修改这些设置,因为无效的值可能会破坏 Rancher 安装。 +- **功能开关**:打开或关闭的 Rancher 功能。其中一些是[实验功能](#启用实验功能)。 +- **横幅**:可以添加到门户上固定位置的元素。例如,你可以使用这些选项在用户登录 Rancher 时[设置自定义横幅](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/custom-branding.md#固定横幅)。 +- **品牌**:可以[自定义](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/custom-branding.md)的 Rancher UI 设计元素。你可以添加自定义徽标或图标,并修改 UI 颜色。 +- **性能**:Rancher UI 的性能设置,例如增量资源加载。 +- **主页链接**:Rancher UI **主页**上显示的链接。你可以修改默认链接的可见性或添加你自己的链接。 + +### 启用实验功能 + +Rancher 包含一些实验性或默认禁用的功能。你可以使用功能开关来启用这些功能。详情请参见[功能开关](enable-experimental-features.md)的章节。 + +### 全局设置 + +除非你激活了**旧版**[功能开关](enable-experimental-features.md),否则**全局配置**选项不可见。v2.6 及更高版本的 Rancher 默认禁用 **legacy** 标志。如果你从旧 Rancher 版本升级,或者在 Rancher v2.6 及更高版本上激活了 **legacy** 功能开关,则可以从顶部导航菜单访问**全局设置**: + +1. 点击左上角的 **☰**。 +1. 从**旧版应用**中选择**全局设置**。 + +**全局设置**提供了以下功能: + +- **应用商店** +- **全局 DNS 条目** +- **全局 DNS 提供商** + +由于这些是旧版功能,因此请参阅有关[应用商店](/versioned_docs/v2.0-v2.4/pages-for-subheaders/helm-charts-in-rancher.md)、[全局 DNS 条目](/versioned_docs/v2.0-v2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#adding-a-global-dns-entry)和[全局 DNS 提供商](/versioned_docs/v2.0-v2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#editing-a-global-dns-provider)的 Rancher v2.0-v2.4 文档了解更多详情。 \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/create-kubernetes-persistent-storage.md b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/create-kubernetes-persistent-storage.md index 91ddc280e7e3..e59d36c843d4 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/create-kubernetes-persistent-storage.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/create-kubernetes-persistent-storage.md @@ -2,6 +2,7 @@ title: "Kubernetes 持久存储:卷和存储类" description: "了解在 Kubernetes 中创建持久存储的两种方式:持久卷和存储类" --- + 在部署需要保​​留数据的应用时,你需要创建持久存储。持久存储允许你在运行应用的 pod 之外存储应用数据。即使运行应用的 pod 发生故障,这种存储方式也能让你保留应用数据。 本文假设你已了解 Kubernetes 的持久卷、持久卷声明和存储类的概念。如需更多信息,请参阅[存储的工作原理](../how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/deploy-apps-across-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/deploy-apps-across-clusters.md index c349bcba4d91..5a6382f71b1b 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/deploy-apps-across-clusters.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/deploy-apps-across-clusters.md @@ -1,6 +1,7 @@ --- title: 跨集群部署应用 --- + ### Fleet Rancher 2.5 引入了 Fleet,这是一种跨集群部署应用的新方式。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/enable-experimental-features.md b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/enable-experimental-features.md index 3c47fcf0cf17..4f759f616fec 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/enable-experimental-features.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/enable-experimental-features.md @@ -1,6 +1,7 @@ --- title: 启用实验功能 --- + Rancher 包含一些默认关闭的实验功能。在某些情况下,例如当你认为使用[不支持的存储类型](../how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md)的好处大于使用未经测试的功能的风险时,你可能想要启用实验功能。为了让你能够试用这些默认关闭的功能,我们引入了功能开关(feature flag)。 实验功能可以通过以下三种方式启用: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/monitoring-and-alerting.md b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/monitoring-and-alerting.md index 71ac5e0ec938..11597dd034dc 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/monitoring-and-alerting.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/monitoring-and-alerting.md @@ -3,8 +3,13 @@ title: 监控和告警 description: Prometheus 允许你查看来自不同 Rancher 和 Kubernetes 对象的指标。了解监控范围以及如何启用集群监控 --- -你可以使用 `rancher-monitoring` 应用,将业界领先的开源监控和告警解决方案快速部署到你的集群中。 +`rancher-monitoring` 应用可以快速将领先的开源监控和告警解决方案部署到你的集群上。 + +该应用程序在 Rancher v2.5 中引入,由 [Prometheus](https://prometheus.io/)、[Grafana](https://grafana.com/grafana/)、[Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/)、[Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator) 和 [Prometheus Adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter) 提供支持。 +有关 Rancher v2.2 至 v2.4 中 V1 monitoring 和 alerting 的信息,请参阅有关[集群监控](/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-monitoring.m)、[告警](/versioned_docs/v2.0-v2.4/pages-for-subheaders/cluster-alerts.md)、[notifiers](/versioned_docs/v2.0-v2.4/explanations/integrations-in-rancher/notifiers) 和其他[工具](/versioned_docs/v2.0-v2.4/pages-for-subheaders/project-tools.md)的 Rancher v2.0-v2.4 文档。 + +你可以使用 `rancher-monitoring` 应用,将业界领先的开源监控和告警解决方案快速部署到你的集群中。 ### 功能 @@ -12,8 +17,6 @@ Prometheus 支持查看 Rancher 和 Kubernetes 对象的指标。通过使用时 通过查看 Prometheus 从集群的 controlplane、节点和 deployment 中抓取的数据,你可以随时了解集群中发生的所有事件。然后,你可以使用这些分析来更好地运行你的环境,例如在系统紧急情况发生之前阻止它们、制定维护策略,或恢复崩溃的服务器。 -在 Rancher v2.5 中引入的 `rancher-monitoring` operator 由 [Prometheus](https://prometheus.io/)、[Grafana](https://grafana.com/grafana/)、[Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/), [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator) 和 [Prometheus adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter) 提供支持。 - Monitoring 应用: - 监控集群节点、Kubernetes 组件和软件部署的状态和进程。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/quick-start-guides.md b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/quick-start-guides.md index 46359aab1ade..fd0013e07778 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/quick-start-guides.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/quick-start-guides.md @@ -1,6 +1,7 @@ --- title: Rancher 部署快速入门指南 --- + :::caution 本章节中提供的指南,旨在帮助你快速启动一个用于 Rancher 的沙盒,以评估 Rancher 是否能满足你的使用需求。快速入门指南不适用于生产环境。如果你需要获取生产环境的操作指导,请参见[安装](installation-and-upgrade.md)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/rancher-security.md b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/rancher-security.md index 542ddb1db768..15677f4a9a76 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/rancher-security.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/rancher-security.md @@ -22,9 +22,8 @@ title: 安全 安全是 Rancher 全部功能的基础。Rancher 集成了全部主流身份验证工具和服务,并提供了企业级的 [RBAC 功能](manage-role-based-access-control-rbac.md),让你的 Kubernetes 集群更加安全。 本文介绍了安全相关的文档以及资源,让你的 Rancher 安装和下游 Kubernetes 集群更加安全。 -### NeuVector 与 Rancher 的集成 -_2.6.5 的新功能_ +### NeuVector 与 Rancher 的集成 NeuVector 是一个开源的、以容器为中心的安全应用程序,现已集成到 Rancher 中。NeuVector 提供生产安全、DevOps 漏洞保护和容器防火墙等功能。请参阅 [Rancher 文档](../integrations-in-rancher/neuvector.md)和 [NeuVector 文档](https://open-docs.neuvector.com/)了解更多信息。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/vsphere.md b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/vsphere.md index 888b5d45a3e1..db206b47f965 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/vsphere.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/vsphere.md @@ -2,6 +2,7 @@ title: 创建 vSphere 集群 description: 使用 Rancher 创建 vSphere 集群。集群可能包括具有不同属性的 VM 组,这些属性可用于细粒度控制节点的大小。 --- + import YouTube from '@site/src/components/YouTube' 你可以结合使用 Rancher 与 vSphere,从而在本地体验云环境的操作。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/backup-restore-configuration/backup-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/backup-restore-configuration/backup-configuration.md index 24505753d866..005e93110aa9 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/backup-restore-configuration/backup-configuration.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/backup-restore-configuration/backup-configuration.md @@ -68,6 +68,12 @@ kubectl create secret generic encryptionconfig \ ### S3 +:::caution + +如果你使用 S3 备份目标,请确保每个集群都有自己的存储桶或文件夹。Rancher 将使用集群配置的 S3 存储桶或文件夹中的可用快照来填充快照信息。 + +::: + S3 存储位置包含以下配置字段: 1. **凭证密文**(可选):如果你需要使用 AWS 访问密钥(access key)和密文密钥(secret key)来访问 S3 存储桶,请使用带有密钥和指令 `accessKey` 和 `secretKey` 的凭证来创建密文。它可以是任意一个命名空间。你可以点击[此处](#credentialsecret-示例)查看示例密文。如果运行 operator 的节点在 EC2 中,并且设置了允许它们访问 S3 的 IAM 权限,则此指令是不必要的(如[本节](#ec2-节点访问-s3-的-iam-权限)所述)。凭证密文下拉菜单列出了所有命名空间的密文。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md index d880b03bd0ba..c48a1d04995e 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md @@ -86,7 +86,7 @@ Prometheus 不是用于长期存储指标的,它只用于短期存储。 如果你有一个(微)服务架构,在该架构中集群的多个单独的工作负载相互通信,那么拥有这些流量的详细指标和跟踪是非常重要的,因为这可以帮助你了解所有这些工作负载之间的通信方式,以及问题或瓶颈可能出现的地方。 -当然,你可以监控所有工作负载中的所有内部流量,并将这些指标暴露给 Prometheus,但这相当耗费精力。像 Istio 这样的服务网格(可以通过[单击](https://rancher.com/docs/rancher/v2.6/en/istio/)在 Rancher 中安装)可以自动完成这项工作,并提供所有 Service 之间流量的丰富的遥测数据。 +当然,你可以监控所有工作负载中的所有内部流量,并将这些指标暴露给 Prometheus,但这相当耗费精力。像 Istio 这样的服务网格(可以通过[单击](../../../pages-for-subheaders/istio.md)在 Rancher 中安装)可以自动完成这项工作,并提供所有 Service 之间流量的丰富的遥测数据。 ## 真实用户监控 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cli-with-rancher/rancher-cli.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cli-with-rancher/rancher-cli.md index 03c9137205ef..4cf442380040 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cli-with-rancher/rancher-cli.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cli-with-rancher/rancher-cli.md @@ -10,7 +10,7 @@ Rancher CLI(命令行界面)是一个命令行工具,可用于与 Rancher 你可以直接 UI 下载二进制文件。 1. 点击左上角的 **☰**。 -1. 单击底部的 **v2.6.x**,**v2.6.x** 是一个超链接文本,表示已安装的 Rancher 版本。 +1. 在导航侧边栏菜单底部,单击**简介**。 1. 在 **CLI 下载**中,有 Windows、Mac 和 Linux 的二进制文件下载链接。你还可以访问我们的 CLI [发布页面](https://github.com/rancher/cli/releases)直接下载二进制文件。 ### 要求 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md index ff4638fc589e..fa562ae9cee1 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md @@ -26,7 +26,7 @@ Cloud NAT 将[产生费用](https://cloud.google.com/nat/pricing)。 ::: -如果要求限制节点的传入和传出流量,请按照离线安装说明,在集群所在的 VPC 上设置一个私有容器[镜像仓库](https://rancher.com/docs/rancher/v2.6/en/installation/other-installation-methods/air-gap/),从而允许集群节点访问和下载运行 cluster agent 所需的镜像。如果 controlplane 端点也是私有的,Rancher 将需要[直接访问](#直接访问)它。 +如果要求限制节点的传入和传出流量,请按照离线安装说明,在集群所在的 VPC 上设置一个私有容器[镜像仓库](../../../../pages-for-subheaders/air-gapped-helm-cli-install.md),从而允许集群节点访问和下载运行 cluster agent 所需的镜像。如果 controlplane 端点也是私有的,Rancher 将需要[直接访问](#直接访问)它。 ### 私有 controlplane 端点 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md index c1771ed32d3f..3f4328512ae3 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md @@ -160,7 +160,7 @@ Rancher 与以下开箱即用的网络提供商兼容: ### Agent 环境变量 -为 [Rancher agent](https://rancher.com/docs/rancher/v2.6/en/cluster-provisioning/rke-clusters/rancher-agents/) 设置环境变量的选项。你可以使用键值对设置环境变量。有关详细信息,请参阅 [RKE2 文档](https://docs.rke2.io/reference/linux_agent_config)。 +为 [Rancher agent](../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/about-rancher-agents.md) 设置环境变量的选项。你可以使用键值对设置环境变量。有关详细信息,请参阅 [RKE2 文档](https://docs.rke2.io/reference/linux_agent_config)。 ### etcd diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md new file mode 100644 index 000000000000..16614a7e5205 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md @@ -0,0 +1,3148 @@ +--- +title: K3s Self-Assessment Guide - CIS Benchmark v1.7 - K8s v1.25 +--- + +This document is a companion to the [K3s Hardening Guide](../../../../pages-for-subheaders/k3s-hardening-guide.md), which provides prescriptive guidance on how to harden K3s clusters that are running in production and managed by Rancher. This benchmark guide helps you evaluate the security of a hardened cluster against each control in the CIS Kubernetes Benchmark. + +This guide corresponds to the following versions of Rancher, CIS Benchmarks, and Kubernetes: + +| Rancher Version | CIS Benchmark Version | Kubernetes Version | +|-----------------|-----------------------|--------------------| +| Rancher v2.7 | Benchmark v1.7 | Kubernetes v1.25 | + +This document is for Rancher operators, security teams, auditors and decision makers. + +For more information about each control, including detailed descriptions and remediations for failing tests, refer to the corresponding section of the CIS Kubernetes Benchmark v1.7. You can download the benchmark, after creating a free account, at [Center for Internet Security (CIS)](https://www.cisecurity.org/benchmark/kubernetes/). + +## Testing Methodology + +Each control in the CIS Kubernetes Benchmark was evaluated against a K3s cluster that was configured according to the accompanying hardening guide. + +Where control audits differ from the original CIS benchmark, the audit commands specific to K3s are provided for testing. + +These are the possible results for each control: + +- **Pass** - The K3s cluster passes the audit outlined in the benchmark. +- **Not Applicable** - The control is not applicable to K3s because of how it is designed to operate. The remediation section explains why. +- **Warn** - The control is manual in the CIS benchmark and it depends on the cluster's use-case or some other factor that must be determined by the cluster operator. These controls have been evaluated to ensure K3s doesn't prevent their implementation, but no further configuration or auditing of the cluster has been performed. + +This guide makes the assumption that K3s is running as a Systemd unit. Your installation may vary. Adjust the "audit" commands to fit your scenario. + +:::note + +This guide only covers `automated` (previously called `scored`) tests. + +::: + +### Controls + +## 1.1 Control Plane Node Configuration Files +### 1.1.1 Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the +control plane node. +For example, chmod 644 /etc/kubernetes/manifests/kube-apiserver.yaml + +### 1.1.2 Ensure that the API server pod specification file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, chown root:root /etc/kubernetes/manifests/kube-apiserver.yaml + +### 1.1.3 Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, chmod 644 /etc/kubernetes/manifests/kube-controller-manager.yaml + +### 1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, chown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml + +### 1.1.5 Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, chmod 644 /etc/kubernetes/manifests/kube-scheduler.yaml + +### 1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, chown root:root /etc/kubernetes/manifests/kube-scheduler.yaml + +### 1.1.7 Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chmod 644 /etc/kubernetes/manifests/etcd.yaml + +### 1.1.8 Ensure that the etcd pod specification file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chown root:root /etc/kubernetes/manifests/etcd.yaml + +### 1.1.9 Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, chmod 644 + +### 1.1.10 Ensure that the Container Network Interface file ownership is set to root:root (Manual) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chown root:root + +### 1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument --data-dir, +from the command 'ps -ef | grep etcd'. +Run the below command (based on the etcd data directory found above). For example, +chmod 700 /var/lib/etcd + +**Audit Script:** `check_for_k3s_etcd.sh` + +```bash +#!/bin/bash + +# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) +# before it checks the requirement +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + + +if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd cluster initializing' | grep -v grep | wc -l)" -gt 0 ]]; then + case $1 in + "1.1.11") + echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; + "1.2.29") + echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; + "2.1") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.2") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.3") + echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.4") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.5") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.6") + echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.7") + echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; + esac +else +# If another database is running, return whatever is required to pass the scan + case $1 in + "1.1.11") + echo "700";; + "1.2.29") + echo "--etcd-certfile AND --etcd-keyfile";; + "2.1") + echo "cert-file AND key-file";; + "2.2") + echo "--client-cert-auth=true";; + "2.3") + echo "false";; + "2.4") + echo "peer-cert-file AND peer-key-file";; + "2.5") + echo "--client-cert-auth=true";; + "2.6") + echo "--peer-auto-tls=false";; + "2.7") + echo "--trusted-ca-file";; + esac +fi + +``` + +**Audit Execution:** + +```bash +./check_for_k3s_etcd.sh 1.1.11 +``` + +**Expected Result**: + +```console +'700' is equal to '700' +``` + +**Returned Value**: + +```console +700 +``` + +### 1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd (Automated) + + +**Result:** Not Applicable + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument --data-dir, +from the command 'ps -ef | grep etcd'. +Run the below command (based on the etcd data directory found above). +For example, chown etcd:etcd /var/lib/etcd + +### 1.1.13 Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, chmod 600 /var/lib/rancher/k3s/server/cred/admin.kubeconfig + +### 1.1.14 Ensure that the admin.conf file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, chown root:root /etc/kubernetes/admin.conf + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi' +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.15 Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chmod 644 scheduler + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi' +``` + +**Expected Result**: + +```console +permissions has permissions 644, expected 644 or more restrictive +``` + +**Returned Value**: + +```console +permissions=644 +``` + +### 1.1.16 Ensure that the scheduler.conf file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chown root:root scheduler + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi' +``` + +**Expected Result**: + +```console +'root:root' is present +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.17 Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chmod 644 controllermanager + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/controller.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/controller.kubeconfig; fi' +``` + +**Expected Result**: + +```console +permissions has permissions 644, expected 644 or more restrictive +``` + +**Returned Value**: + +```console +permissions=644 +``` + +### 1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chown root:root controllermanager + +**Audit:** + +```bash +stat -c %U:%G /var/lib/rancher/k3s/server/tls +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chown -R root:root /etc/kubernetes/pki/ + +**Audit:** + +```bash +find /var/lib/rancher/k3s/server/tls | xargs stat -c %U:%G +``` + +**Expected Result**: + +```console +'root:root' is present +``` + +**Returned Value**: + +```console +root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root root:root +``` + +### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Manual) + + +**Result:** warn + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chmod -R 644 /etc/kubernetes/pki/*.crt + +**Audit:** + +```bash +stat -c %n %a /var/lib/rancher/k3s/server/tls/*.crt +``` + +### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual) + + +**Result:** warn + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chmod -R 600 /etc/kubernetes/pki/*.key + +**Audit:** + +```bash +stat -c %n %a /var/lib/rancher/k3s/server/tls/*.key +``` + +## 1.2 API Server +### 1.2.1 Ensure that the --anonymous-auth argument is set to false (Manual) + + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the below parameter. +--anonymous-auth=false + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth' +``` + +### 1.2.2 Ensure that the --token-auth-file parameter is not set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and configure alternate mechanisms for authentication. Then, +edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and remove the --token-auth-file= parameter. + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep +``` + +**Expected Result**: + +```console +'--token-auth-file' is not present +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.3 Ensure that the --DenyServiceExternalIPs is not set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and remove the `DenyServiceExternalIPs` +from enabled admission plugins. + +**Audit:** + +```bash +/bin/ps -ef | grep containerd | grep -v grep +``` + +**Expected Result**: + +```console +'--enable-admission-plugins' is present OR '--enable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 519 1 0 22:09 ? 00:00:00 /usr/bin/containerd root 801 1 0 22:09 ? 00:00:00 /usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock root 3864 1 0 22:31 ? 00:00:00 /var/lib/rancher/k3s/data/630c40ff866a3db218a952ebd4fd2a5cfe1543a1a467e738cb46a2ad4012d6f1/bin/containerd-shim-runc-v2 -namespace k8s.io -id d00174abbc275f6bb85c7f0be1d3154b9c91982a10b9dba6b5cb280f4d4c531d -address /run/k3s/containerd/containerd.sock root 4105 1 0 22:31 ? 00:00:00 /var/lib/rancher/k3s/data/630c40ff866a3db218a952ebd4fd2a5cfe1543a1a467e738cb46a2ad4012d6f1/bin/containerd-shim-runc-v2 -namespace k8s.io -id 7c2b546b4d2380bcb51278661f34cff94fad2ba06978e13f8f1b92dafcc89d43 -address /run/k3s/containerd/containerd.sock root 4206 1 0 22:31 ? 00:00:00 /var/lib/rancher/k3s/data/630c40ff866a3db218a952ebd4fd2a5cfe1543a1a467e738cb46a2ad4012d6f1/bin/containerd-shim-runc-v2 -namespace k8s.io -id 68d8a55ff4663985be004608dbf78b0362f5522e18490c81d4c8dc9963de1556 -address /run/k3s/containerd/containerd.sock root 5374 1 0 22:31 ? 00:00:00 /var/lib/rancher/k3s/data/630c40ff866a3db218a952ebd4fd2a5cfe1543a1a467e738cb46a2ad4012d6f1/bin/containerd-shim-runc-v2 -namespace k8s.io -id ca0ae9e0b37dfd7b1ce05f72e1bc5a1be8f5cb08f2b4543081536de3bdbc925d -address /run/k3s/containerd/containerd.sock root 5443 1 0 22:31 ? 00:00:01 /var/lib/rancher/k3s/data/630c40ff866a3db218a952ebd4fd2a5cfe1543a1a467e738cb46a2ad4012d6f1/bin/containerd-shim-runc-v2 -namespace k8s.io -id 3ea3c1cdbbd5adb8efd5c67a46aadd0fca9918dc0ad1f7cafe38b83171e3dc1b -address /run/k3s/containerd/containerd.sock root 7130 1 0 22:32 ? 00:00:00 /var/lib/rancher/k3s/data/630c40ff866a3db218a952ebd4fd2a5cfe1543a1a467e738cb46a2ad4012d6f1/bin/containerd-shim-runc-v2 -namespace k8s.io -id 4d838297d35a31003106ac5989c3547433985bb2964b47baad12cee6e375645e -address /run/k3s/containerd/containerd.sock root 7639 1 0 22:32 ? 00:00:00 /var/lib/rancher/k3s/data/630c40ff866a3db218a952ebd4fd2a5cfe1543a1a467e738cb46a2ad4012d6f1/bin/containerd-shim-runc-v2 -namespace k8s.io -id 341cb9bcd8486aa2f1acb8e1ae51baebd630ac6ed266643266c34d677f61c7d0 -address /run/k3s/containerd/containerd.sock root 10308 1 0 23:17 ? 00:00:00 /var/lib/rancher/k3s/data/630c40ff866a3db218a952ebd4fd2a5cfe1543a1a467e738cb46a2ad4012d6f1/bin/containerd-shim-runc-v2 -namespace k8s.io -id c534fbee8e0d06fd9b29bf8fc70a138975c6b18db25f1faf2615677dfdb4199e -address /run/k3s/containerd/containerd.sock root 11370 1 0 23:18 ? 00:00:00 /var/lib/rancher/k3s/data/630c40ff866a3db218a952ebd4fd2a5cfe1543a1a467e738cb46a2ad4012d6f1/bin/containerd-shim-runc-v2 -namespace k8s.io -id 4ff4b8776dac7a35b83616d341dbe4d5a689ac7fb9b8eee8db5978e3968380ea -address /run/k3s/containerd/containerd.sock root 13736 13723 2 23:21 ? 00:00:10 containerd -c /var/lib/rancher/k3s/agent/etc/containerd/config.toml -a /run/k3s/containerd/containerd.sock --state /run/k3s/containerd --root /var/lib/rancher/k3s/agent/containerd root 16022 1 0 23:29 ? 00:00:00 /var/lib/rancher/k3s/data/630c40ff866a3db218a952ebd4fd2a5cfe1543a1a467e738cb46a2ad4012d6f1/bin/containerd-shim-runc-v2 -namespace k8s.io -id 9027256349086e458119478e5e00384b1b76fbf5e6dbee23699f596a88d9f2bc -address /run/k3s/containerd/containerd.sock root 16159 1 0 23:29 ? 00:00:00 /var/lib/rancher/k3s/data/630c40ff866a3db218a952ebd4fd2a5cfe1543a1a467e738cb46a2ad4012d6f1/bin/containerd-shim-runc-v2 -namespace k8s.io -id 929bf369fc5881654f4c1925624151ddb7cea51073267b8d213d966ba45406f3 -address /run/k3s/containerd/containerd.sock +``` + +### 1.2.4 Ensure that the --kubelet-https argument is set to true (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and remove the --kubelet-https parameter. + +### 1.2.5 Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the +apiserver and kubelets. Then, edit API server pod specification file +/etc/kubernetes/manifests/kube-apiserver.yaml on the control plane node and set the +kubelet client certificate and key parameters as below. +--kubelet-client-certificate= +--kubelet-client-key= + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority' +``` + +**Expected Result**: + +```console +'--kubelet-client-certificate' is present AND '--kubelet-client-key' is present +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.6 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and setup the TLS connection between +the apiserver and kubelets. Then, edit the API server pod specification file +/etc/kubernetes/manifests/kube-apiserver.yaml on the control plane node and set the +--kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. +--kubelet-certificate-authority= + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority' +``` + +**Expected Result**: + +```console +'--kubelet-certificate-authority' is present +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.7 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. +One such example could be as below. +--authorization-mode=RBAC + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode' +``` + +**Expected Result**: + +```console +'--authorization-mode' does not have 'AlwaysAllow' +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.8 Ensure that the --authorization-mode argument includes Node (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --authorization-mode parameter to a value that includes Node. +--authorization-mode=Node,RBAC + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode' +``` + +**Expected Result**: + +```console +'--authorization-mode' has 'Node' +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.9 Ensure that the --authorization-mode argument includes RBAC (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, +for example `--authorization-mode=Node,RBAC`. + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode' +``` + +**Expected Result**: + +```console +'--authorization-mode' has 'RBAC' +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.10 Ensure that the admission control plugin EventRateLimit is set (Manual) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and set the desired limits in a configuration file. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +and set the below parameters. +--enable-admission-plugins=...,EventRateLimit,... +--admission-control-config-file= + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins' +``` + +**Expected Result**: + +```console +'--enable-admission-plugins' has 'EventRateLimit' +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.11 Ensure that the admission control plugin AlwaysAdmit is not set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a +value that does not include AlwaysAdmit. + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins' +``` + +**Expected Result**: + +```console +'--enable-admission-plugins' does not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.12 Ensure that the admission control plugin AlwaysPullImages is set (Manual) + + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --enable-admission-plugins parameter to include +AlwaysPullImages. +--enable-admission-plugins=...,AlwaysPullImages,... + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins' +``` + +**Expected Result**: + +```console +'--enable-admission-plugins' has 'AlwaysPullImages' +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.13 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual) + + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --enable-admission-plugins parameter to include +SecurityContextDeny, unless PodSecurityPolicy is already in place. +--enable-admission-plugins=...,SecurityContextDeny,... + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins' +``` + +**Expected Result**: + +```console +'--enable-admission-plugins' has 'SecurityContextDeny' OR '--enable-admission-plugins' has 'PodSecurityPolicy' +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.14 Ensure that the admission control plugin ServiceAccount is set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and create ServiceAccount objects as per your environment. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and ensure that the --disable-admission-plugins parameter is set to a +value that does not include ServiceAccount. + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'ServiceAccount' +``` + +**Expected Result**: + +```console +'--disable-admission-plugins' is present OR '--disable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.15 Ensure that the admission control plugin NamespaceLifecycle is set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --disable-admission-plugins parameter to +ensure it does not include NamespaceLifecycle. + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep +``` + +**Expected Result**: + +```console +'--disable-admission-plugins' is present OR '--disable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.16 Ensure that the admission control plugin NodeRestriction is set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --enable-admission-plugins parameter to a +value that includes NodeRestriction. +--enable-admission-plugins=...,NodeRestriction,... + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins' +``` + +**Expected Result**: + +```console +'--enable-admission-plugins' has 'NodeRestriction' +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.17 Ensure that the --secure-port argument is not set to 0 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and either remove the --secure-port parameter or +set it to a different (non-zero) desired port. + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port' +``` + +**Expected Result**: + +```console +'--secure-port' is greater than 0 OR '--secure-port' is not present +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.18 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling' +``` + +**Expected Result**: + +```console +'--profiling' is equal to 'false' +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.19 Ensure that the --audit-log-path argument is set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --audit-log-path parameter to a suitable path and +file where you would like audit logs to be written, for example, +--audit-log-path=/var/log/apiserver/audit.log + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-path' +``` + +**Expected Result**: + +```console +'--audit-log-path' is present +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.20 Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --audit-log-maxage parameter to 30 +or as an appropriate number of days, for example, +--audit-log-maxage=30 + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-maxage' +``` + +**Expected Result**: + +```console +'--audit-log-maxage' is greater or equal to 30 +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.21 Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate +value. For example, +--audit-log-maxbackup=10 + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-maxbackup' +``` + +**Expected Result**: + +```console +'--audit-log-maxbackup' is greater or equal to 10 +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.22 Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. +For example, to set it as 100 MB, --audit-log-maxsize=100 + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-maxsize' +``` + +**Expected Result**: + +```console +'--audit-log-maxsize' is greater or equal to 100 +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.23 Ensure that the --request-timeout argument is set as appropriate (Manual) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +and set the below parameter as appropriate and if needed. +For example, --request-timeout=300s + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'request-timeout' +``` + +**Expected Result**: + +```console +'--request-timeout' is not present OR '--request-timeout' is present +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.24 Ensure that the --service-account-lookup argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the below parameter. +--service-account-lookup=true +Alternatively, you can delete the --service-account-lookup parameter from this file so +that the default takes effect. + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'service-account-lookup' +``` + +**Expected Result**: + +```console +'--service-account-lookup' is not present OR '--service-account-lookup' is equal to 'true' +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.25 Ensure that the --service-account-key-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --service-account-key-file parameter +to the public key file for service accounts. For example, +--service-account-key-file= + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'service-account-key-file' +``` + +**Expected Result**: + +```console +'--service-account-key-file' is present +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.26 Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the etcd certificate and key file parameters. +--etcd-certfile= +--etcd-keyfile= + +**Audit Script:** `check_for_k3s_etcd.sh` + +```bash +#!/bin/bash + +# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) +# before it checks the requirement +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + + +if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd cluster initializing' | grep -v grep | wc -l)" -gt 0 ]]; then + case $1 in + "1.1.11") + echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; + "1.2.29") + echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; + "2.1") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.2") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.3") + echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.4") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.5") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.6") + echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.7") + echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; + esac +else +# If another database is running, return whatever is required to pass the scan + case $1 in + "1.1.11") + echo "700";; + "1.2.29") + echo "--etcd-certfile AND --etcd-keyfile";; + "2.1") + echo "cert-file AND key-file";; + "2.2") + echo "--client-cert-auth=true";; + "2.3") + echo "false";; + "2.4") + echo "peer-cert-file AND peer-key-file";; + "2.5") + echo "--client-cert-auth=true";; + "2.6") + echo "--peer-auto-tls=false";; + "2.7") + echo "--trusted-ca-file";; + esac +fi + +``` + +**Audit Execution:** + +```bash +./check_for_k3s_etcd.sh 1.2.29 +``` + +**Expected Result**: + +```console +'--etcd-certfile' is present AND '--etcd-keyfile' is present +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.27 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the TLS certificate and private key file parameters. +--tls-cert-file= +--tls-private-key-file= + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep -A1 'Running kube-apiserver' | tail -n2 +``` + +**Expected Result**: + +```console +'--tls-cert-file' is present AND '--tls-private-key-file' is present +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/kube-scheduler --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --profiling=false --secure-port=10259" +``` + +### 1.2.28 Ensure that the --client-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the client certificate authority file. +--client-ca-file= + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file' +``` + +**Expected Result**: + +```console +'--client-ca-file' is present +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.29 Ensure that the --etcd-cafile argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the etcd certificate authority file parameter. +--etcd-cafile= + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile' +``` + +**Expected Result**: + +```console +'--etcd-cafile' is present +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.30 Ensure that the --encryption-provider-config argument is set as appropriate (Manual) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure a EncryptionConfig file. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --encryption-provider-config parameter to the path of that file. +For example, --encryption-provider-config= + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config' +``` + +**Expected Result**: + +```console +'--encryption-provider-config' is present +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 1.2.31 Ensure that encryption providers are appropriately configured (Manual) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and configure a EncryptionConfig file. +In this file, choose aescbc, kms or secretbox as the encryption provider. + +**Audit:** + +```bash +grep aescbc /path/to/encryption-config.json +``` + +### 1.2.32 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the below parameter. +--tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, +TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, +TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, +TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, +TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, +TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, +TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites' +``` + +**Expected Result**: + +```console +'--tls-cipher-suites' contains valid elements from 'TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384' +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +## 1.3 Controller Manager +### 1.3.1 Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual) + + +**Result:** warn + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, +for example, --terminated-pod-gc-threshold=10 + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold' +``` + +### 1.3.2 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the control plane node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling' +``` + +**Expected Result**: + +```console +'--profiling' is equal to 'false' +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +``` + +### 1.3.3 Ensure that the --use-service-account-credentials argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the control plane node to set the below parameter. +--use-service-account-credentials=true + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials' +``` + +**Expected Result**: + +```console +'--use-service-account-credentials' is not equal to 'false' +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +``` + +### 1.3.4 Ensure that the --service-account-private-key-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the control plane node and set the --service-account-private-key-file parameter +to the private key file for service accounts. +--service-account-private-key-file= + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file' +``` + +**Expected Result**: + +```console +'--service-account-private-key-file' is present +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +``` + +### 1.3.5 Ensure that the --root-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. +--root-ca-file= + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file' +``` + +**Expected Result**: + +```console +'--root-ca-file' is present +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +``` + +### 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. +--feature-gates=RotateKubeletServerCertificate=true + +### 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the control plane node and ensure the correct value for the --bind-address parameter + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'bind-address' +``` + +**Expected Result**: + +```console +'--bind-address' is equal to '127.0.0.1' OR '--bind-address' is not present +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +``` + +## 1.4 Scheduler +### 1.4.1 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Scheduler pod specification file /etc/kubernetes/manifests/kube-scheduler.yaml file +on the control plane node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1 +``` + +**Expected Result**: + +```console +'--profiling' is equal to 'false' +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/kube-scheduler --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --profiling=false --secure-port=10259" +``` + +### 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Scheduler pod specification file /etc/kubernetes/manifests/kube-scheduler.yaml +on the control plane node and ensure the correct value for the --bind-address parameter + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address' +``` + +**Expected Result**: + +```console +'--bind-address' is equal to '127.0.0.1' OR '--bind-address' is not present +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/kube-scheduler --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --profiling=false --secure-port=10259" +``` + +## 2 Etcd Node Configuration +### 2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the etcd service documentation and configure TLS encryption. +Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml +on the master node and set the below parameters. +--cert-file= +--key-file= + +**Audit Script:** `check_for_k3s_etcd.sh` + +```bash +#!/bin/bash + +# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) +# before it checks the requirement +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + + +if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd cluster initializing' | grep -v grep | wc -l)" -gt 0 ]]; then + case $1 in + "1.1.11") + echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; + "1.2.29") + echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; + "2.1") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.2") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.3") + echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.4") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.5") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.6") + echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.7") + echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; + esac +else +# If another database is running, return whatever is required to pass the scan + case $1 in + "1.1.11") + echo "700";; + "1.2.29") + echo "--etcd-certfile AND --etcd-keyfile";; + "2.1") + echo "cert-file AND key-file";; + "2.2") + echo "--client-cert-auth=true";; + "2.3") + echo "false";; + "2.4") + echo "peer-cert-file AND peer-key-file";; + "2.5") + echo "--client-cert-auth=true";; + "2.6") + echo "--peer-auto-tls=false";; + "2.7") + echo "--trusted-ca-file";; + esac +fi + +``` + +**Audit Execution:** + +```bash +./check_for_k3s_etcd.sh 2.1 +``` + +**Expected Result**: + +```console +'cert-file' is present AND 'key-file' is present +``` + +**Returned Value**: + +```console +cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key +``` + +### 2.2 Ensure that the --client-cert-auth argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /var/lib/rancher/k3s/server/db/etcd/config on the master +node and set the below parameter. +--client-cert-auth="true" + +**Audit Script:** `check_for_k3s_etcd.sh` + +```bash +#!/bin/bash + +# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) +# before it checks the requirement +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + + +if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd cluster initializing' | grep -v grep | wc -l)" -gt 0 ]]; then + case $1 in + "1.1.11") + echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; + "1.2.29") + echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; + "2.1") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.2") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.3") + echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.4") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.5") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.6") + echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.7") + echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; + esac +else +# If another database is running, return whatever is required to pass the scan + case $1 in + "1.1.11") + echo "700";; + "1.2.29") + echo "--etcd-certfile AND --etcd-keyfile";; + "2.1") + echo "cert-file AND key-file";; + "2.2") + echo "--client-cert-auth=true";; + "2.3") + echo "false";; + "2.4") + echo "peer-cert-file AND peer-key-file";; + "2.5") + echo "--client-cert-auth=true";; + "2.6") + echo "--peer-auto-tls=false";; + "2.7") + echo "--trusted-ca-file";; + esac +fi + +``` + +**Audit Execution:** + +```bash +./check_for_k3s_etcd.sh 2.2 +``` + +**Expected Result**: + +```console +'--client-cert-auth' is present OR 'client-cert-auth' is equal to 'true' +``` + +**Returned Value**: + +```console +client-cert-auth: true +``` + +### 2.3 Ensure that the --auto-tls argument is not set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /var/lib/rancher/k3s/server/db/etcd/config on the master +node and either remove the --auto-tls parameter or set it to false. +--auto-tls=false + +**Audit Script:** `check_for_k3s_etcd.sh` + +```bash +#!/bin/bash + +# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) +# before it checks the requirement +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + + +if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd cluster initializing' | grep -v grep | wc -l)" -gt 0 ]]; then + case $1 in + "1.1.11") + echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; + "1.2.29") + echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; + "2.1") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.2") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.3") + echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.4") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.5") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.6") + echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.7") + echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; + esac +else +# If another database is running, return whatever is required to pass the scan + case $1 in + "1.1.11") + echo "700";; + "1.2.29") + echo "--etcd-certfile AND --etcd-keyfile";; + "2.1") + echo "cert-file AND key-file";; + "2.2") + echo "--client-cert-auth=true";; + "2.3") + echo "false";; + "2.4") + echo "peer-cert-file AND peer-key-file";; + "2.5") + echo "--client-cert-auth=true";; + "2.6") + echo "--peer-auto-tls=false";; + "2.7") + echo "--trusted-ca-file";; + esac +fi + +``` + +**Audit Execution:** + +```bash +./check_for_k3s_etcd.sh 2.3 +``` + +**Expected Result**: + +```console +'ETCD_AUTO_TLS' is not present OR 'ETCD_AUTO_TLS' is present +``` + +**Returned Value**: + +```console +error: process ID list syntax error Usage: ps [options] Try 'ps --help ' or 'ps --help ' for additional help text. For more details see ps(1). cat: /proc//environ: No such file or directory +``` + +### 2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the etcd service documentation and configure peer TLS encryption as appropriate +for your etcd cluster. +Then, edit the etcd pod specification file /var/lib/rancher/k3s/server/db/etcd/config on the +master node and set the below parameters. +--peer-client-file= +--peer-key-file= + +**Audit Script:** `check_for_k3s_etcd.sh` + +```bash +#!/bin/bash + +# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) +# before it checks the requirement +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + + +if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd cluster initializing' | grep -v grep | wc -l)" -gt 0 ]]; then + case $1 in + "1.1.11") + echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; + "1.2.29") + echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; + "2.1") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.2") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.3") + echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.4") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.5") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.6") + echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.7") + echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; + esac +else +# If another database is running, return whatever is required to pass the scan + case $1 in + "1.1.11") + echo "700";; + "1.2.29") + echo "--etcd-certfile AND --etcd-keyfile";; + "2.1") + echo "cert-file AND key-file";; + "2.2") + echo "--client-cert-auth=true";; + "2.3") + echo "false";; + "2.4") + echo "peer-cert-file AND peer-key-file";; + "2.5") + echo "--client-cert-auth=true";; + "2.6") + echo "--peer-auto-tls=false";; + "2.7") + echo "--trusted-ca-file";; + esac +fi + +``` + +**Audit Execution:** + +```bash +./check_for_k3s_etcd.sh 2.4 +``` + +**Expected Result**: + +```console +'cert-file' is present AND 'key-file' is present +``` + +**Returned Value**: + +```console +cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key +``` + +### 2.5 Ensure that the --peer-client-cert-auth argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /var/lib/rancher/k3s/server/db/etcd/config on the master +node and set the below parameter. +--peer-client-cert-auth=true + +**Audit Script:** `check_for_k3s_etcd.sh` + +```bash +#!/bin/bash + +# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) +# before it checks the requirement +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + + +if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd cluster initializing' | grep -v grep | wc -l)" -gt 0 ]]; then + case $1 in + "1.1.11") + echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; + "1.2.29") + echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; + "2.1") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.2") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.3") + echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.4") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.5") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.6") + echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.7") + echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; + esac +else +# If another database is running, return whatever is required to pass the scan + case $1 in + "1.1.11") + echo "700";; + "1.2.29") + echo "--etcd-certfile AND --etcd-keyfile";; + "2.1") + echo "cert-file AND key-file";; + "2.2") + echo "--client-cert-auth=true";; + "2.3") + echo "false";; + "2.4") + echo "peer-cert-file AND peer-key-file";; + "2.5") + echo "--client-cert-auth=true";; + "2.6") + echo "--peer-auto-tls=false";; + "2.7") + echo "--trusted-ca-file";; + esac +fi + +``` + +**Audit Execution:** + +```bash +./check_for_k3s_etcd.sh 2.5 +``` + +**Expected Result**: + +```console +'--client-cert-auth' is present OR 'client-cert-auth' is equal to 'true' +``` + +**Returned Value**: + +```console +client-cert-auth: true +``` + +### 2.6 Ensure that the --peer-auto-tls argument is not set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /var/lib/rancher/k3s/server/db/etcd/config on the master +node and either remove the --peer-auto-tls parameter or set it to false. +--peer-auto-tls=false + +**Audit Script:** `check_for_k3s_etcd.sh` + +```bash +#!/bin/bash + +# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) +# before it checks the requirement +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + + +if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd cluster initializing' | grep -v grep | wc -l)" -gt 0 ]]; then + case $1 in + "1.1.11") + echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; + "1.2.29") + echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; + "2.1") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.2") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.3") + echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.4") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.5") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.6") + echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.7") + echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; + esac +else +# If another database is running, return whatever is required to pass the scan + case $1 in + "1.1.11") + echo "700";; + "1.2.29") + echo "--etcd-certfile AND --etcd-keyfile";; + "2.1") + echo "cert-file AND key-file";; + "2.2") + echo "--client-cert-auth=true";; + "2.3") + echo "false";; + "2.4") + echo "peer-cert-file AND peer-key-file";; + "2.5") + echo "--client-cert-auth=true";; + "2.6") + echo "--peer-auto-tls=false";; + "2.7") + echo "--trusted-ca-file";; + esac +fi + +``` + +**Audit Execution:** + +```bash +./check_for_k3s_etcd.sh 2.6 +``` + +**Expected Result**: + +```console +'ETCD_PEER_AUTO_TLS' is not present OR 'ETCD_PEER_AUTO_TLS' is present +``` + +**Returned Value**: + +```console +error: process ID list syntax error Usage: ps [options] Try 'ps --help ' or 'ps --help ' for additional help text. For more details see ps(1). cat: /proc//environ: No such file or directory +``` + +### 2.7 Ensure that a unique Certificate Authority is used for etcd (Manual) + + +**Result:** pass + +**Remediation:** +[Manual test] +Follow the etcd documentation and create a dedicated certificate authority setup for the +etcd service. +Then, edit the etcd pod specification file /var/lib/rancher/k3s/server/db/etcd/config on the +master node and set the below parameter. +--trusted-ca-file= + +**Audit Script:** `check_for_k3s_etcd.sh` + +```bash +#!/bin/bash + +# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) +# before it checks the requirement +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + + +if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd cluster initializing' | grep -v grep | wc -l)" -gt 0 ]]; then + case $1 in + "1.1.11") + echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; + "1.2.29") + echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; + "2.1") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.2") + echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.3") + echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.4") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; + "2.5") + echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth');; + "2.6") + echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; + "2.7") + echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; + esac +else +# If another database is running, return whatever is required to pass the scan + case $1 in + "1.1.11") + echo "700";; + "1.2.29") + echo "--etcd-certfile AND --etcd-keyfile";; + "2.1") + echo "cert-file AND key-file";; + "2.2") + echo "--client-cert-auth=true";; + "2.3") + echo "false";; + "2.4") + echo "peer-cert-file AND peer-key-file";; + "2.5") + echo "--client-cert-auth=true";; + "2.6") + echo "--peer-auto-tls=false";; + "2.7") + echo "--trusted-ca-file";; + esac +fi + +``` + +**Audit Execution:** + +```bash +./check_for_k3s_etcd.sh 2.7 +``` + +**Expected Result**: + +```console +'trusted-ca-file' is present +``` + +**Returned Value**: + +```console +trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt +``` + +## 3.1 Authentication and Authorization +### 3.1.1 Client certificate authentication should not be used for users (Manual) + + +**Result:** warn + +**Remediation:** +Alternative mechanisms provided by Kubernetes such as the use of OIDC should be +implemented in place of client certificates. + +## 3.2 Logging +### 3.2.1 Ensure that a minimal audit policy is created (Manual) + + +**Result:** warn + +**Remediation:** +Create an audit policy file for your cluster. + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file' +``` + +### 3.2.2 Ensure that the audit policy covers key security concerns (Manual) + + +**Result:** warn + +**Remediation:** +Review the audit policy provided for the cluster and ensure that it covers +at least the following areas, +- Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. +- Modification of Pod and Deployment objects. +- Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + +## 4.1 Worker Node Configuration Files +### 4.1.1 Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, chmod 644 /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + +### 4.1.2 Ensure that the kubelet service file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chown root:root /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + +### 4.1.3 If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chmod 644 /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig + +**Audit:** + +```bash +stat -c %a /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig +``` + +**Expected Result**: + +```console +'permissions' is present OR '/var/lib/rancher/k3s/agent/kubeproxy.kubeconfig' is not present +``` + +**Returned Value**: + +```console +644 +``` + +### 4.1.4 If proxy kubeconfig file exists ensure ownership is set to root:root (Manual) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, chown root:root /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig + +**Audit:** + +```bash +stat -c %U:%G /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig +``` + +**Expected Result**: + +```console +'root:root' is present OR '/var/lib/rancher/k3s/agent/kubeproxy.kubeconfig' is not present +``` + +**Returned Value**: + +```console +root:root +``` + +### 4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chmod 644 /var/lib/rancher/k3s/server/cred/admin.kubeconfig + +**Audit:** + +```bash +stat -c %a /var/lib/rancher/k3s/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'644' is equal to '644' +``` + +**Returned Value**: + +```console +644 +``` + +### 4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chown root:root /var/lib/rancher/k3s/server/cred/admin.kubeconfig + +**Audit:** + +```bash +stat -c %U:%G /var/lib/rancher/k3s/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 4.1.7 Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual) + + +**Result:** pass + +**Remediation:** +Run the following command to modify the file permissions of the +--client-ca-file chmod 644 + +**Audit:** + +```bash +stat -c %a /var/lib/rancher/k3s/server/tls/server-ca.crt +``` + +**Expected Result**: + +```console +'644' is equal to '644' OR '640' is present OR '600' is present OR '444' is present OR '440' is present OR '400' is present OR '000' is present +``` + +**Returned Value**: + +```console +644 +``` + +### 4.1.8 Ensure that the client certificate authorities file ownership is set to root:root (Manual) + + +**Result:** pass + +**Remediation:** +Run the following command to modify the ownership of the --client-ca-file. +chown root:root + +**Audit:** + +```bash +stat -c %U:%G /var/lib/rancher/k3s/server/tls/client-ca.crt +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 4.1.9 Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the following command (using the config file location identified in the Audit step) +chmod 644 /var/lib/kubelet/config.yaml + +### 4.1.10 Ensure that the kubelet --config configuration file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the following command (using the config file location identified in the Audit step) +chown root:root /var/lib/kubelet/config.yaml + +## 4.2 Kubelet +### 4.2.1 Ensure that the --anonymous-auth argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to +`false`. +If using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +`--anonymous-auth=false` +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/sh -c 'if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi' +``` + +**Expected Result**: + +```console +'--anonymous-auth' is equal to 'false' +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If +using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_AUTHZ_ARGS variable. +--authorization-mode=Webhook +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/sh -c 'if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" | grep -v grep; else echo "--authorization-mode=Webhook"; fi' +``` + +**Expected Result**: + +```console +'--authorization-mode' does not have 'AlwaysAllow' +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 4.2.3 Ensure that the --client-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to +the location of the client CA file. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_AUTHZ_ARGS variable. +--client-ca-file= +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/sh -c 'if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" | grep -v grep; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi' +``` + +**Expected Result**: + +```console +'--client-ca-file' is present +``` + +**Returned Value**: + +```console +Feb 27 23:21:42 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:42Z" level=info msg="Running kube-apiserver --admission-control-config-file=/etc/rancher/k3s/config/rancher-psact.yaml --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log --audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction,ServiceAccount --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +``` + +### 4.2.4 Ensure that the --read-only-port argument is set to 0 (Manual) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--read-only-port=0 +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'read-only-port' +``` + +**Expected Result**: + +```console +'--read-only-port' is equal to '0' OR '--read-only-port' is not present +``` + +**Returned Value**: + +```console +Feb 27 23:21:44 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:44Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=ip-172-31-31-124 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-labels=cattle.io/os=linux,rke.cattle.io/machine=5c42d922-ed1e-4e15-8414-d399d179d897 --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` + +### 4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual) + + +**Result:** warn + +**Remediation:** +If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a +value other than 0. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--streaming-connection-idle-timeout=5m +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'streaming-connection-idle-timeout' +``` + +### 4.2.6 Ensure that the --protect-kernel-defaults argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--protect-kernel-defaults=true +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'protect-kernel-defaults' +``` + +**Expected Result**: + +```console +'--protect-kernel-defaults' is equal to 'true' +``` + +**Returned Value**: + +```console +Feb 27 23:21:44 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:44Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=ip-172-31-31-124 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-labels=cattle.io/os=linux,rke.cattle.io/machine=5c42d922-ed1e-4e15-8414-d399d179d897 --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` + +### 4.2.7 Ensure that the --make-iptables-util-chains argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +remove the --make-iptables-util-chains argument from the +KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'make-iptables-util-chains' +``` + +**Expected Result**: + +```console +'--make-iptables-util-chains' is equal to 'true' OR '--make-iptables-util-chains' is not present +``` + +**Returned Value**: + +```console +Feb 27 23:21:44 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:44Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=ip-172-31-31-124 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-labels=cattle.io/os=linux,rke.cattle.io/machine=5c42d922-ed1e-4e15-8414-d399d179d897 --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` + +### 4.2.8 Ensure that the --hostname-override argument is not set (Manual) + + +**Result:** Not Applicable + +**Remediation:** +Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +on each worker node and remove the --hostname-override argument from the +KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +### 4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Manual) + + +**Result:** warn + +**Remediation:** +If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC containerd +``` + +### 4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `tlsCertFile` to the location +of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` +to the location of the corresponding private key file. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameters in KUBELET_CERTIFICATE_ARGS variable. +--tls-cert-file= +--tls-private-key-file= +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 +``` + +**Expected Result**: + +```console +'--tls-cert-file' is present AND '--tls-private-key-file' is present +``` + +**Returned Value**: + +```console +Feb 27 23:21:44 ip-172-31-31-124 k3s[13723]: time="2023-02-27T23:21:44Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=ip-172-31-31-124 --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-labels=cattle.io/os=linux,rke.cattle.io/machine=5c42d922-ed1e-4e15-8414-d399d179d897 --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" +``` + +### 4.2.11 Ensure that the --rotate-certificates argument is not set to false (Manual) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or +remove it altogether to use the default value. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS +variable. +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC containerd +``` + +**Audit Config:** + +```bash +/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then /bin/cat /var/lib/kubelet/config.yaml; fi' +``` + +**Expected Result**: + +```console +'{.rotateCertificates}' is present OR '{.rotateCertificates}' is not present +``` + +### 4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true (Manual) + + +**Result:** Not Applicable + +**Remediation:** +Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. +--feature-gates=RotateKubeletServerCertificate=true +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +### 4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual) + + +**Result:** warn + +**Remediation:** +If using a Kubelet config file, edit the file to set `TLSCipherSuites` to +TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +or to a subset of these values. +If using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the --tls-cipher-suites parameter as follows, or to a subset of these values. +--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC containerd +``` + +## 5.1 RBAC and Service Accounts +### 5.1.1 Ensure that the cluster-admin role is only used where required (Manual) + + +**Result:** warn + +**Remediation:** +Identify all clusterrolebindings to the cluster-admin role. Check if they are used and +if they need this role or if they could use a role with fewer privileges. +Where possible, first bind users to a lower privileged role and then remove the +clusterrolebinding to the cluster-admin role : +kubectl delete clusterrolebinding [name] + +### 5.1.2 Minimize access to secrets (Manual) + + +**Result:** warn + +**Remediation:** +Where possible, remove get, list and watch access to Secret objects in the cluster. + +### 5.1.3 Minimize wildcard use in Roles and ClusterRoles (Manual) + + +**Result:** warn + +**Remediation:** +Where possible replace any use of wildcards in clusterroles and roles with specific +objects or actions. + +### 5.1.4 Minimize access to create pods (Manual) + + +**Result:** warn + +**Remediation:** +Where possible, remove create access to pod objects in the cluster. + +### 5.1.5 Ensure that default service accounts are not actively used. (Manual) + + +**Result:** warn + +**Remediation:** +Create explicit service accounts wherever a Kubernetes workload requires specific access +to the Kubernetes API server. +Modify the configuration of each default service account to include this value +automountServiceAccountToken: false + +### 5.1.6 Ensure that Service Account Tokens are only mounted where necessary (Manual) + + +**Result:** warn + +**Remediation:** +Modify the definition of pods and service accounts which do not need to mount service +account tokens to disable it. + +### 5.1.7 Avoid use of system:masters group (Manual) + + +**Result:** warn + +**Remediation:** +Remove the system:masters group from all users in the cluster. + +### 5.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual) + + +**Result:** warn + +**Remediation:** +Where possible, remove the impersonate, bind and escalate rights from subjects. + +## 5.2 Pod Security Standards +### 5.2.1 Ensure that the cluster has at least one active policy control mechanism in place (Manual) + + +**Result:** warn + +**Remediation:** +Ensure that either Pod Security Admission or an external policy control system is in place +for every namespace which contains user workloads. + +### 5.2.2 Minimize the admission of privileged containers (Manual) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of privileged containers. + +### 5.2.3 Minimize the admission of containers wishing to share the host process ID namespace (Automated) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of `hostPID` containers. + +### 5.2.4 Minimize the admission of containers wishing to share the host IPC namespace (Automated) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of `hostIPC` containers. + +### 5.2.5 Minimize the admission of containers wishing to share the host network namespace (Automated) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of `hostNetwork` containers. + +### 5.2.6 Minimize the admission of containers with allowPrivilegeEscalation (Automated) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + +### 5.2.7 Minimize the admission of root containers (Automated) + + +**Result:** warn + +**Remediation:** +Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` +or `MustRunAs` with the range of UIDs not including 0, is set. + +### 5.2.8 Minimize the admission of containers with the NET_RAW capability (Automated) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers with the `NET_RAW` capability. + +### 5.2.9 Minimize the admission of containers with added capabilities (Automated) + + +**Result:** warn + +**Remediation:** +Ensure that `allowedCapabilities` is not present in policies for the cluster unless +it is set to an empty array. + +### 5.2.10 Minimize the admission of containers with capabilities assigned (Manual) + + +**Result:** warn + +**Remediation:** +Review the use of capabilites in applications running on your cluster. Where a namespace +contains applicaions which do not require any Linux capabities to operate consider adding +a PSP which forbids the admission of containers which do not drop all capabilities. + +### 5.2.11 Minimize the admission of Windows HostProcess containers (Manual) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + +### 5.2.12 Minimize the admission of HostPath volumes (Manual) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers with `hostPath` volumes. + +### 5.2.13 Minimize the admission of containers which use HostPorts (Manual) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers which use `hostPort` sections. + +## 5.3 Network Policies and CNI +### 5.3.1 Ensure that the CNI in use supports NetworkPolicies (Manual) + + +**Result:** warn + +**Remediation:** +If the CNI plugin in use does not support network policies, consideration should be given to +making use of a different plugin, or finding an alternate mechanism for restricting traffic +in the Kubernetes cluster. + +### 5.3.2 Ensure that all Namespaces have NetworkPolicies defined (Manual) + + +**Result:** warn + +**Remediation:** +Follow the documentation and create NetworkPolicy objects as you need them. + +## 5.4 Secrets Management +### 5.4.1 Prefer using Secrets as files over Secrets as environment variables (Manual) + + +**Result:** warn + +**Remediation:** +If possible, rewrite application code to read Secrets from mounted secret files, rather than +from environment variables. + +### 5.4.2 Consider external secret storage (Manual) + + +**Result:** warn + +**Remediation:** +Refer to the Secrets management options offered by your cloud provider or a third-party +secrets management solution. + +## 5.5 Extensible Admission Control +### 5.5.1 Configure Image Provenance using ImagePolicyWebhook admission controller (Manual) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and setup image provenance. + +## 5.7 General Policies +### 5.7.1 Create administrative boundaries between resources using namespaces (Manual) + + +**Result:** warn + +**Remediation:** +Follow the documentation and create namespaces for objects in your deployment as you need +them. + +### 5.7.2 Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual) + + +**Result:** warn + +**Remediation:** +Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. +An example is as below: +securityContext: +seccompProfile: +type: RuntimeDefault + +### 5.7.3 Apply SecurityContext to your Pods and Containers (Manual) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a +suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker +Containers. + +### 5.7.4 The default namespace should not be used (Manual) + + +**Result:** warn + +**Remediation:** +Ensure that namespaces are created to allow for appropriate segregation of Kubernetes +resources and that all new resources are created in a specific namespace. diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md new file mode 100644 index 000000000000..084568cc4025 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md @@ -0,0 +1,3085 @@ +--- +title: RKE Self-Assessment Guide - CIS Benchmark v1.7 - K8s v1.25 +--- + +This document is a companion to the [RKE Hardening Guide](../../../../pages-for-subheaders/rke1-hardening-guide.md), which provides prescriptive guidance on how to harden RKE clusters that are running in production and managed by Rancher. This benchmark guide helps you evaluate the security of a hardened cluster against each control in the CIS Kubernetes Benchmark. + + +This guide corresponds to the following versions of Rancher, CIS Benchmarks, and Kubernetes: + +| Rancher Version | CIS Benchmark Version | Kubernetes Version | +|-----------------|-----------------------|--------------------| +| Rancher v2.7 | Benchmark v1.7 | Kubernetes v1.25 | + +This guide walks through the various controls and provide updated example commands to audit compliance in Rancher created clusters. Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply. These checks will return a result of `Not Applicable`. + +This document is for Rancher operators, security teams, auditors and decision makers. + +For more information about each control, including detailed descriptions and remediations for failing tests, refer to the corresponding section of the CIS Kubernetes Benchmark v1.7. You can download the benchmark, after creating a free account, at [Center for Internet Security (CIS)](https://www.cisecurity.org/benchmark/kubernetes/). + +## Testing Methodology + +Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. + +Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher are provided for testing. When performing the tests, you will need access to the command line on the hosts of all RKE nodes. The commands also make use of the [kubectl](https://kubernetes.io/docs/tasks/tools/) (with a valid configuration file) and [jq](https://stedolan.github.io/jq/) tools, which are required in the testing and evaluation of test results. + +:::note + +This guide only covers `automated` (previously called `scored`) tests. + +::: + +### Controls + +## 1.1 Control Plane Node Configuration Files +### 1.1.1 Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. +All configuration is passed in as arguments at container run time. + +### 1.1.2 Ensure that the API server pod specification file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. +All configuration is passed in as arguments at container run time. + +### 1.1.3 Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. +All configuration is passed in as arguments at container run time. + +### 1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. +All configuration is passed in as arguments at container run time. + +### 1.1.5 Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. +All configuration is passed in as arguments at container run time. + +### 1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. +All configuration is passed in as arguments at container run time. + +### 1.1.7 Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for etcd. +All configuration is passed in as arguments at container run time. + +### 1.1.8 Ensure that the etcd pod specification file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for etcd. +All configuration is passed in as arguments at container run time. + +### 1.1.9 Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, chmod 644 + +**Audit:** + +```bash +ps -fC ${kubeletbin:-kubelet} | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a +``` + +**Expected Result**: + +```console +permissions has permissions 600, expected 644 or more restrictive +``` + +**Returned Value**: + +```console +permissions=644 permissions=600 +``` + +### 1.1.10 Ensure that the Container Network Interface file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chown root:root + +**Audit:** + +```bash +ps -fC ${kubeletbin:-kubelet} | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G +``` + +**Expected Result**: + +```console +'root:root' is present +``` + +**Returned Value**: + +```console +root:root root:root +``` + +### 1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument --data-dir, +from the command 'ps -ef | grep etcd'. +Run the below command (based on the etcd data directory found above). For example, +chmod 700 /var/lib/etcd + +**Audit:** + +```bash +stat -c %a /node/var/lib/etcd +``` + +**Expected Result**: + +```console +'700' is equal to '700' +``` + +**Returned Value**: + +```console +700 +``` + +### 1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd (Automated) + + +**Result:** Not Applicable + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument --data-dir, +from the command 'ps -ef | grep etcd'. +Run the below command (based on the etcd data directory found above). +For example, chown etcd:etcd /var/lib/etcd + +### 1.1.13 Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. + +### 1.1.14 Ensure that the admin.conf file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. + +### 1.1.15 Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. +All configuration is passed in as arguments at container run time. + +### 1.1.16 Ensure that the scheduler.conf file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. +All configuration is passed in as arguments at container run time. + +### 1.1.17 Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. +All configuration is passed in as arguments at container run time. + +### 1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. +All configuration is passed in as arguments at container run time. + +### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown -R root:root /etc/kubernetes/pki/ + +**Audit Script:** `check_files_owner_in_dir.sh` + +```bash +#!/usr/bin/env bash + +# This script is used to ensure the owner is set to root:root for +# the given directory and all the files in it +# +# inputs: +# $1 = /full/path/to/directory +# +# outputs: +# true/false + +INPUT_DIR=$1 + +if [[ "${INPUT_DIR}" == "" ]]; then + echo "false" + exit +fi + +if [[ $(stat -c %U:%G ${INPUT_DIR}) != "root:root" ]]; then + echo "false" + exit +fi + +statInfoLines=$(stat -c "%n %U:%G" ${INPUT_DIR}/*) +while read -r statInfoLine; do + f=$(echo ${statInfoLine} | cut -d' ' -f1) + p=$(echo ${statInfoLine} | cut -d' ' -f2) + + if [[ $(basename "$f" .pem) == "kube-etcd-"* ]]; then + if [[ "$p" != "root:root" && "$p" != "etcd:etcd" ]]; then + echo "false" + exit + fi + else + if [[ "$p" != "root:root" ]]; then + echo "false" + exit + fi + fi +done <<< "${statInfoLines}" + + +echo "true" +exit + +``` + +**Audit Execution:** + +```bash +./check_files_owner_in_dir.sh /node/etc/kubernetes/ssl +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +true +``` + +### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod -R 644 /etc/kubernetes/pki/*.crt + +**Audit Script:** `check_files_permissions.sh` + +```bash +#!/usr/bin/env bash + +# This script is used to ensure the file permissions are set to 644 or +# more restrictive for all files in a given directory or a wildcard +# selection of files +# +# inputs: +# $1 = /full/path/to/directory or /path/to/fileswithpattern +# ex: !(*key).pem +# +# $2 (optional) = permission (ex: 600) +# +# outputs: +# true/false + +# Turn on "extended glob" for use of '!' in wildcard +shopt -s extglob + +# Turn off history to avoid surprises when using '!' +set -H + +USER_INPUT=$1 + +if [[ "${USER_INPUT}" == "" ]]; then + echo "false" + exit +fi + + +if [[ -d ${USER_INPUT} ]]; then + PATTERN="${USER_INPUT}/*" +else + PATTERN="${USER_INPUT}" +fi + +PERMISSION="" +if [[ "$2" != "" ]]; then + PERMISSION=$2 +fi + +FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) + +while read -r fileInfo; do + p=$(echo ${fileInfo} | cut -d' ' -f2) + + if [[ "${PERMISSION}" != "" ]]; then + if [[ "$p" != "${PERMISSION}" ]]; then + echo "false" + exit + fi + else + if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then + echo "false" + exit + fi + fi +done <<< "${FILES_PERMISSIONS}" + + +echo "true" +exit + +``` + +**Audit Execution:** + +```bash +./check_files_permissions.sh '/node/etc/kubernetes/ssl/!(*key).pem' +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +true +``` + +### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chmod -R 600 /etc/kubernetes/ssl/*key.pem + +**Audit Script:** `check_files_permissions.sh` + +```bash +#!/usr/bin/env bash + +# This script is used to ensure the file permissions are set to 644 or +# more restrictive for all files in a given directory or a wildcard +# selection of files +# +# inputs: +# $1 = /full/path/to/directory or /path/to/fileswithpattern +# ex: !(*key).pem +# +# $2 (optional) = permission (ex: 600) +# +# outputs: +# true/false + +# Turn on "extended glob" for use of '!' in wildcard +shopt -s extglob + +# Turn off history to avoid surprises when using '!' +set -H + +USER_INPUT=$1 + +if [[ "${USER_INPUT}" == "" ]]; then + echo "false" + exit +fi + + +if [[ -d ${USER_INPUT} ]]; then + PATTERN="${USER_INPUT}/*" +else + PATTERN="${USER_INPUT}" +fi + +PERMISSION="" +if [[ "$2" != "" ]]; then + PERMISSION=$2 +fi + +FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) + +while read -r fileInfo; do + p=$(echo ${fileInfo} | cut -d' ' -f2) + + if [[ "${PERMISSION}" != "" ]]; then + if [[ "$p" != "${PERMISSION}" ]]; then + echo "false" + exit + fi + else + if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then + echo "false" + exit + fi + fi +done <<< "${FILES_PERMISSIONS}" + + +echo "true" +exit + +``` + +**Audit Execution:** + +```bash +./check_files_permissions.sh '/node/etc/kubernetes/ssl/*key.pem' +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +true +``` + +## 1.2 API Server +### 1.2.1 Ensure that the --anonymous-auth argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the below parameter. +--anonymous-auth=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--anonymous-auth' is equal to 'false' +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.2 Ensure that the --token-auth-file parameter is not set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and configure alternate mechanisms for authentication. Then, +edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and remove the --token-auth-file= parameter. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--token-auth-file' is not present +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.3 Ensure that the --DenyServiceExternalIPs is not set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and remove the `DenyServiceExternalIPs` +from enabled admission plugins. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--enable-admission-plugins' does not have 'DenyServiceExternalIPs' OR '--enable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.4 Ensure that the --kubelet-https argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and remove the --kubelet-https parameter. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--kubelet-https' is present OR '--kubelet-https' is not present +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.5 Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the +apiserver and kubelets. Then, edit API server pod specification file +/etc/kubernetes/manifests/kube-apiserver.yaml on the control plane node and set the +kubelet client certificate and key parameters as below. +--kubelet-client-certificate= +--kubelet-client-key= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--kubelet-client-certificate' is present AND '--kubelet-client-key' is present +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.6 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and setup the TLS connection between +the apiserver and kubelets. Then, edit the API server pod specification file +/etc/kubernetes/manifests/kube-apiserver.yaml on the control plane node and set the +--kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. +--kubelet-certificate-authority= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--kubelet-certificate-authority' is present +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.7 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. +One such example could be as below. +--authorization-mode=RBAC + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--authorization-mode' does not have 'AlwaysAllow' +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.8 Ensure that the --authorization-mode argument includes Node (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --authorization-mode parameter to a value that includes Node. +--authorization-mode=Node,RBAC + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--authorization-mode' has 'Node' +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.9 Ensure that the --authorization-mode argument includes RBAC (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, +for example `--authorization-mode=Node,RBAC`. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--authorization-mode' has 'RBAC' +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.10 Ensure that the admission control plugin EventRateLimit is set (Manual) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set the desired limits in a configuration file. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +and set the below parameters. +--enable-admission-plugins=...,EventRateLimit,... +--admission-control-config-file= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--enable-admission-plugins' has 'EventRateLimit' +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.11 Ensure that the admission control plugin AlwaysAdmit is not set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a +value that does not include AlwaysAdmit. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--enable-admission-plugins' does not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.12 Ensure that the admission control plugin AlwaysPullImages is set (Manual) + + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --enable-admission-plugins parameter to include +AlwaysPullImages. +--enable-admission-plugins=...,AlwaysPullImages,... + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +### 1.2.13 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual) + + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --enable-admission-plugins parameter to include +SecurityContextDeny, unless PodSecurityPolicy is already in place. +--enable-admission-plugins=...,SecurityContextDeny,... + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +### 1.2.14 Ensure that the admission control plugin ServiceAccount is set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and create ServiceAccount objects as per your environment. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and ensure that the --disable-admission-plugins parameter is set to a +value that does not include ServiceAccount. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--disable-admission-plugins' is present OR '--disable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.15 Ensure that the admission control plugin NamespaceLifecycle is set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --disable-admission-plugins parameter to +ensure it does not include NamespaceLifecycle. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--disable-admission-plugins' is present OR '--disable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.16 Ensure that the admission control plugin NodeRestriction is set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --enable-admission-plugins parameter to a +value that includes NodeRestriction. +--enable-admission-plugins=...,NodeRestriction,... + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--enable-admission-plugins' has 'NodeRestriction' +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.17 Ensure that the --secure-port argument is not set to 0 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and either remove the --secure-port parameter or +set it to a different (non-zero) desired port. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--secure-port' is greater than 0 OR '--secure-port' is not present +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.18 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--profiling' is equal to 'false' +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.19 Ensure that the --audit-log-path argument is set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --audit-log-path parameter to a suitable path and +file where you would like audit logs to be written, for example, +--audit-log-path=/var/log/apiserver/audit.log + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--audit-log-path' is present +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.20 Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --audit-log-maxage parameter to 30 +or as an appropriate number of days, for example, +--audit-log-maxage=30 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--audit-log-maxage' is greater or equal to 30 +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.21 Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate +value. For example, +--audit-log-maxbackup=10 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--audit-log-maxbackup' is greater or equal to 10 +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.22 Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. +For example, to set it as 100 MB, --audit-log-maxsize=100 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--audit-log-maxsize' is greater or equal to 100 +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.24 Ensure that the --service-account-lookup argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the below parameter. +--service-account-lookup=true +Alternatively, you can delete the --service-account-lookup parameter from this file so +that the default takes effect. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--service-account-lookup' is not present OR '--service-account-lookup' is equal to 'true' +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.25 Ensure that the --request-timeout argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --service-account-key-file parameter +to the public key file for service accounts. For example, +--service-account-key-file= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--service-account-key-file' is present +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.26 Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the etcd certificate and key file parameters. +--etcd-certfile= +--etcd-keyfile= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--etcd-certfile' is present AND '--etcd-keyfile' is present +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.27 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the TLS certificate and private key file parameters. +--tls-cert-file= +--tls-private-key-file= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--tls-cert-file' is present AND '--tls-private-key-file' is present +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.28 Ensure that the --client-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the client certificate authority file. +--client-ca-file= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--client-ca-file' is present +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.29 Ensure that the --etcd-cafile argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the etcd certificate authority file parameter. +--etcd-cafile= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--etcd-cafile' is present +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.30 Ensure that the --encryption-provider-config argument is set as appropriate (Manual) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure a EncryptionConfig file. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the --encryption-provider-config parameter to the path of that file. +For example, --encryption-provider-config= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--encryption-provider-config' is present +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 1.2.31 Ensure that encryption providers are appropriately configured (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure a EncryptionConfig file. +In this file, choose aescbc, kms or secretbox as the encryption provider. + +**Audit Script:** `check_encryption_provider_config.sh` + +```bash +#!/usr/bin/env bash + +# This script is used to check the encrption provider config is set to aesbc +# +# outputs: +# true/false + +# TODO: Figure out the file location from the kube-apiserver commandline args +ENCRYPTION_CONFIG_FILE="/node/etc/kubernetes/ssl/encryption.yaml" + +if [[ ! -f "${ENCRYPTION_CONFIG_FILE}" ]]; then + echo "false" + exit +fi + +for provider in "$@" +do + if grep "$provider" "${ENCRYPTION_CONFIG_FILE}"; then + echo "true" + exit + fi +done + +echo "false" +exit + +``` + +**Audit Execution:** + +```bash +./check_encryption_provider_config.sh aescbc +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +- aescbc: true +``` + +### 1.2.32 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the below parameter. +--tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, +TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, +TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, +TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, +TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, +TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, +TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--tls-cipher-suites' contains valid elements from 'TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384' +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +## 1.3 Controller Manager +### 1.3.1 Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, +for example, --terminated-pod-gc-threshold=10 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--terminated-pod-gc-threshold' is present +``` + +**Returned Value**: + +```console +root 5506 5484 2 22:01 ? 00:00:05 kube-controller-manager --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --pod-eviction-timeout=5m0s --terminated-pod-gc-threshold=1000 --v=2 --allocate-node-cidrs=true --enable-hostpath-provisioner=false --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --allow-untagged-cloud=true --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cluster-cidr=10.42.0.0/16 --node-monitor-grace-period=40s --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --profiling=false --configure-cloud-routes=false --leader-elect=true --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true +``` + +### 1.3.2 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the control plane node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--profiling' is equal to 'false' +``` + +**Returned Value**: + +```console +root 5506 5484 2 22:01 ? 00:00:05 kube-controller-manager --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --pod-eviction-timeout=5m0s --terminated-pod-gc-threshold=1000 --v=2 --allocate-node-cidrs=true --enable-hostpath-provisioner=false --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --allow-untagged-cloud=true --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cluster-cidr=10.42.0.0/16 --node-monitor-grace-period=40s --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --profiling=false --configure-cloud-routes=false --leader-elect=true --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true +``` + +### 1.3.3 Ensure that the --use-service-account-credentials argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the control plane node to set the below parameter. +--use-service-account-credentials=true + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--use-service-account-credentials' is not equal to 'false' +``` + +**Returned Value**: + +```console +root 5506 5484 2 22:01 ? 00:00:05 kube-controller-manager --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --pod-eviction-timeout=5m0s --terminated-pod-gc-threshold=1000 --v=2 --allocate-node-cidrs=true --enable-hostpath-provisioner=false --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --allow-untagged-cloud=true --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cluster-cidr=10.42.0.0/16 --node-monitor-grace-period=40s --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --profiling=false --configure-cloud-routes=false --leader-elect=true --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true +``` + +### 1.3.4 Ensure that the --service-account-private-key-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the control plane node and set the --service-account-private-key-file parameter +to the private key file for service accounts. +--service-account-private-key-file= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--service-account-private-key-file' is present +``` + +**Returned Value**: + +```console +root 5506 5484 2 22:01 ? 00:00:05 kube-controller-manager --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --pod-eviction-timeout=5m0s --terminated-pod-gc-threshold=1000 --v=2 --allocate-node-cidrs=true --enable-hostpath-provisioner=false --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --allow-untagged-cloud=true --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cluster-cidr=10.42.0.0/16 --node-monitor-grace-period=40s --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --profiling=false --configure-cloud-routes=false --leader-elect=true --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true +``` + +### 1.3.5 Ensure that the --root-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. +--root-ca-file= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--root-ca-file' is present +``` + +**Returned Value**: + +```console +root 5506 5484 2 22:01 ? 00:00:05 kube-controller-manager --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --pod-eviction-timeout=5m0s --terminated-pod-gc-threshold=1000 --v=2 --allocate-node-cidrs=true --enable-hostpath-provisioner=false --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --allow-untagged-cloud=true --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cluster-cidr=10.42.0.0/16 --node-monitor-grace-period=40s --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --profiling=false --configure-cloud-routes=false --leader-elect=true --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true +``` + +### 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. +--feature-gates=RotateKubeletServerCertificate=true + +Cluster provisioned by RKE handles certificate rotation directly through RKE. + +### 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the control plane node and ensure the correct value for the --bind-address parameter + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--bind-address' is present OR '--bind-address' is not present +``` + +**Returned Value**: + +```console +root 5506 5484 2 22:01 ? 00:00:05 kube-controller-manager --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --pod-eviction-timeout=5m0s --terminated-pod-gc-threshold=1000 --v=2 --allocate-node-cidrs=true --enable-hostpath-provisioner=false --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --allow-untagged-cloud=true --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cluster-cidr=10.42.0.0/16 --node-monitor-grace-period=40s --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --profiling=false --configure-cloud-routes=false --leader-elect=true --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true +``` + +## 1.4 Scheduler +### 1.4.1 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Scheduler pod specification file /etc/kubernetes/manifests/kube-scheduler.yaml file +on the control plane node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-scheduler | grep -v grep +``` + +**Expected Result**: + +```console +'--profiling' is equal to 'false' +``` + +**Returned Value**: + +```console +root 5671 5649 0 22:01 ? 00:00:01 kube-scheduler --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml --leader-elect=true --profiling=false --v=2 --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml +``` + +### 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Scheduler pod specification file /etc/kubernetes/manifests/kube-scheduler.yaml +on the control plane node and ensure the correct value for the --bind-address parameter + +**Audit:** + +```bash +/bin/ps -ef | grep kube-scheduler | grep -v grep +``` + +**Expected Result**: + +```console +'--bind-address' is present OR '--bind-address' is not present +``` + +**Returned Value**: + +```console +root 5671 5649 0 22:01 ? 00:00:01 kube-scheduler --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml --leader-elect=true --profiling=false --v=2 --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml +``` + +## 2 Etcd Node Configuration +### 2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the etcd service documentation and configure TLS encryption. +Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml +on the master node and set the below parameters. +--cert-file= +--key-file= + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--cert-file' is present AND '--key-file' is present +``` + +**Returned Value**: + +```console +etcd 5188 5167 3 22:01 ? 00:00:08 /usr/local/bin/etcd --client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://172.31.31.51:2380 --listen-peer-urls=https://172.31.31.51:2380 --initial-cluster=etcd-ip-172-31-31-51=https://172.31.31.51:2380 --initial-cluster-state=new --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-31-51.pem --peer-client-cert-auth=true --listen-client-urls=https://172.31.31.51:2379 --cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-31-51.pem --advertise-client-urls=https://172.31.31.51:2379 --initial-cluster-token=etcd-cluster-1 --name=etcd-ip-172-31-31-51 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --key-file=/etc/kubernetes/ssl/kube-etcd-172-31-31-51-key.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd-172-31-31-51-key.pem --election-timeout=5000 --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 --heartbeat-interval=500 root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem root 19036 18926 5 22:05 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=0 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.23-hardened --json --log_dir /tmp/sonobuoy/logs --outputfile /tmp/sonobuoy/etcd.json +``` + +### 2.2 Ensure that the --client-cert-auth argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master +node and set the below parameter. +--client-cert-auth="true" + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--client-cert-auth' is equal to 'true' +``` + +**Returned Value**: + +```console +etcd 5188 5167 3 22:01 ? 00:00:08 /usr/local/bin/etcd --client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://172.31.31.51:2380 --listen-peer-urls=https://172.31.31.51:2380 --initial-cluster=etcd-ip-172-31-31-51=https://172.31.31.51:2380 --initial-cluster-state=new --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-31-51.pem --peer-client-cert-auth=true --listen-client-urls=https://172.31.31.51:2379 --cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-31-51.pem --advertise-client-urls=https://172.31.31.51:2379 --initial-cluster-token=etcd-cluster-1 --name=etcd-ip-172-31-31-51 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --key-file=/etc/kubernetes/ssl/kube-etcd-172-31-31-51-key.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd-172-31-31-51-key.pem --election-timeout=5000 --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 --heartbeat-interval=500 root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem root 19036 18926 4 22:05 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=0 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.23-hardened --json --log_dir /tmp/sonobuoy/logs --outputfile /tmp/sonobuoy/etcd.json +``` + +### 2.3 Ensure that the --auto-tls argument is not set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master +node and either remove the --auto-tls parameter or set it to false. +--auto-tls=false + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'ETCD_AUTO_TLS' is not present OR 'ETCD_AUTO_TLS' is present +``` + +**Returned Value**: + +```console +PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin HOSTNAME=ip-172-31-31-51 ETCDCTL_API=3 ETCDCTL_CACERT=/etc/kubernetes/ssl/kube-ca.pem ETCDCTL_CERT=/etc/kubernetes/ssl/kube-etcd-172-31-31-51.pem ETCDCTL_KEY=/etc/kubernetes/ssl/kube-etcd-172-31-31-51-key.pem ETCDCTL_ENDPOINTS=https://172.31.31.51:2379 ETCD_UNSUPPORTED_ARCH=x86_64 HOME=/ +``` + +### 2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the etcd service documentation and configure peer TLS encryption as appropriate +for your etcd cluster. +Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the +master node and set the below parameters. +--peer-client-file= +--peer-key-file= + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--peer-cert-file' is present AND '--peer-key-file' is present +``` + +**Returned Value**: + +```console +etcd 5188 5167 3 22:01 ? 00:00:08 /usr/local/bin/etcd --client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://172.31.31.51:2380 --listen-peer-urls=https://172.31.31.51:2380 --initial-cluster=etcd-ip-172-31-31-51=https://172.31.31.51:2380 --initial-cluster-state=new --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-31-51.pem --peer-client-cert-auth=true --listen-client-urls=https://172.31.31.51:2379 --cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-31-51.pem --advertise-client-urls=https://172.31.31.51:2379 --initial-cluster-token=etcd-cluster-1 --name=etcd-ip-172-31-31-51 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --key-file=/etc/kubernetes/ssl/kube-etcd-172-31-31-51-key.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd-172-31-31-51-key.pem --election-timeout=5000 --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 --heartbeat-interval=500 root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem root 19036 18926 2 22:05 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=0 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.23-hardened --json --log_dir /tmp/sonobuoy/logs --outputfile /tmp/sonobuoy/etcd.json +``` + +### 2.5 Ensure that the --peer-client-cert-auth argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master +node and set the below parameter. +--peer-client-cert-auth=true + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--peer-client-cert-auth' is equal to 'true' +``` + +**Returned Value**: + +```console +etcd 5188 5167 3 22:01 ? 00:00:08 /usr/local/bin/etcd --client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://172.31.31.51:2380 --listen-peer-urls=https://172.31.31.51:2380 --initial-cluster=etcd-ip-172-31-31-51=https://172.31.31.51:2380 --initial-cluster-state=new --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-31-51.pem --peer-client-cert-auth=true --listen-client-urls=https://172.31.31.51:2379 --cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-31-51.pem --advertise-client-urls=https://172.31.31.51:2379 --initial-cluster-token=etcd-cluster-1 --name=etcd-ip-172-31-31-51 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --key-file=/etc/kubernetes/ssl/kube-etcd-172-31-31-51-key.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd-172-31-31-51-key.pem --election-timeout=5000 --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 --heartbeat-interval=500 root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem root 19036 18926 3 22:05 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=0 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.23-hardened --json --log_dir /tmp/sonobuoy/logs --outputfile /tmp/sonobuoy/etcd.json +``` + +### 2.6 Ensure that the --peer-auto-tls argument is not set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master +node and either remove the --peer-auto-tls parameter or set it to false. +--peer-auto-tls=false + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'ETCD_PEER_AUTO_TLS' is not present OR 'ETCD_PEER_AUTO_TLS' is present +``` + +**Returned Value**: + +```console +PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin HOSTNAME=ip-172-31-31-51 ETCDCTL_API=3 ETCDCTL_CACERT=/etc/kubernetes/ssl/kube-ca.pem ETCDCTL_CERT=/etc/kubernetes/ssl/kube-etcd-172-31-31-51.pem ETCDCTL_KEY=/etc/kubernetes/ssl/kube-etcd-172-31-31-51-key.pem ETCDCTL_ENDPOINTS=https://172.31.31.51:2379 ETCD_UNSUPPORTED_ARCH=x86_64 HOME=/ +``` + +### 2.7 Ensure that a unique Certificate Authority is used for etcd (Automated) + + +**Result:** pass + +**Remediation:** +[Manual test] +Follow the etcd documentation and create a dedicated certificate authority setup for the +etcd service. +Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the +master node and set the below parameter. +--trusted-ca-file= + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--trusted-ca-file' is present +``` + +**Returned Value**: + +```console +etcd 5188 5167 3 22:01 ? 00:00:08 /usr/local/bin/etcd --client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://172.31.31.51:2380 --listen-peer-urls=https://172.31.31.51:2380 --initial-cluster=etcd-ip-172-31-31-51=https://172.31.31.51:2380 --initial-cluster-state=new --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-31-51.pem --peer-client-cert-auth=true --listen-client-urls=https://172.31.31.51:2379 --cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-31-51.pem --advertise-client-urls=https://172.31.31.51:2379 --initial-cluster-token=etcd-cluster-1 --name=etcd-ip-172-31-31-51 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --key-file=/etc/kubernetes/ssl/kube-etcd-172-31-31-51-key.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd-172-31-31-51-key.pem --election-timeout=5000 --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 --heartbeat-interval=500 root 5354 5332 14 22:01 ? 00:00:33 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem root 19036 18926 2 22:05 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=0 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.23-hardened --json --log_dir /tmp/sonobuoy/logs --outputfile /tmp/sonobuoy/etcd.json +``` + +## 3.1 Authentication and Authorization +### 3.1.1 Client certificate authentication should not be used for users (Manual) + + +**Result:** warn + +**Remediation:** +Alternative mechanisms provided by Kubernetes such as the use of OIDC should be +implemented in place of client certificates. + +## 3.2 Logging +### 3.2.1 Ensure that a minimal audit policy is created (Automated) + + +**Result:** pass + +**Remediation:** +Create an audit policy file for your cluster. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--audit-policy-file' is present +``` + +**Returned Value**: + +```console +root 5354 5332 14 22:01 ? 00:00:34 kube-apiserver --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,EventRateLimit --runtime-config=authorization.k8s.io/v1beta1=true --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxsize=100 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-group-headers=X-Remote-Group --storage-backend=etcd3 --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --authentication-token-webhook-cache-ttl=5s --etcd-prefix=/registry --service-node-port-range=30000-32767 --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml --profiling=false --audit-log-format=json --admission-control-config-file=/etc/kubernetes/admission.yaml --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --allow-privileged=true --requestheader-username-headers=X-Remote-User --anonymous-auth=false --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --api-audiences=unknown --etcd-servers=https://172.31.31.51:2379 --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --advertise-address=172.31.31.51 --audit-log-maxage=30 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --requestheader-extra-headers-prefix=X-Remote-Extra- --bind-address=0.0.0.0 --service-account-lookup=true --authorization-mode=Node,RBAC --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-allowed-names=kube-apiserver-proxy-client --service-account-issuer=rke --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --secure-port=6443 --audit-log-maxbackup=10 --audit-policy-file=/etc/kubernetes/audit-policy.yaml --cloud-provider= --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem +``` + +### 3.2.2 Ensure that the audit policy covers key security concerns (Manual) + + +**Result:** warn + +**Remediation:** +Review the audit policy provided for the cluster and ensure that it covers +at least the following areas, +- Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. +- Modification of Pod and Deployment objects. +- Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + +## 4.1 Worker Node Configuration Files +### 4.1.1 Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. +All configuration is passed in as arguments at container run time. + +### 4.1.2 Ensure that the kubelet service file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. +All configuration is passed in as arguments at container run time. + +### 4.1.3 If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chmod 644 /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /node/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c permissions=%a /node/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' +``` + +**Expected Result**: + +```console +permissions has permissions 600, expected 644 or more restrictive OR '/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml' is not present +``` + +**Returned Value**: + +```console +permissions=600 +``` + +### 4.1.4 If proxy kubeconfig file exists ensure ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, chown root:root /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' +``` + +**Expected Result**: + +```console +'root:root' is present OR '/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml' is not present +``` + +### 4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chmod 644 /etc/kubernetes/ssl/kubecfg-kube-node.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /node/etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c permissions=%a /node/etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' +``` + +**Expected Result**: + +```console +permissions has permissions 600, expected 644 or more restrictive +``` + +**Returned Value**: + +```console +permissions=600 +``` + +### 4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chown root:root /etc/kubernetes/ssl/kubecfg-kube-node.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /node/etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %U:%G /node/etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 4.1.7 Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the following command to modify the file permissions of the +--client-ca-file chmod 644 + +**Audit:** + +```bash +stat -c permissions=%a /node/etc/kubernetes/ssl/kube-ca.pem +``` + +**Expected Result**: + +```console +permissions has permissions 644, expected 644 or more restrictive +``` + +**Returned Value**: + +```console +permissions=644 +``` + +### 4.1.8 Ensure that the client certificate authorities file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the following command to modify the ownership of the --client-ca-file. +chown root:root + +**Audit:** + +```bash +stat -c %U:%G /node/etc/kubernetes/ssl/kube-ca.pem +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 4.1.9 Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the following command (using the config file location identified in the Audit step) +chmod 644 /var/lib/kubelet/config.yaml + +Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. +All configuration is passed in as arguments at container run time. + +### 4.1.10 Ensure that the kubelet --config configuration file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the following command (using the config file location identified in the Audit step) +chown root:root /var/lib/kubelet/config.yaml + +Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. +All configuration is passed in as arguments at container run time. + +## 4.2 Kubelet +### 4.2.1 Ensure that the --anonymous-auth argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to +`false`. +If using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +`--anonymous-auth=false` +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then /bin/cat /var/lib/kubelet/config.yaml; fi' +``` + +**Expected Result**: + +```console +'--anonymous-auth' is equal to 'false' +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 6239 5834 2 22:02 ? 00:00:04 kubelet --authorization-mode=Webhook --v=2 --root-dir=/var/lib/kubelet --container-runtime-endpoint=unix:///var/run/cri-dockerd.sock --tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet-172-31-31-51-key.pem --cgroups-per-qos=True --streaming-connection-idle-timeout=30m --tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-172-31-31-51.pem --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-node.yaml --address=0.0.0.0 --cluster-domain=cluster.local --fail-swap-on=false --make-iptables-util-chains=true --volume-plugin-dir=/var/lib/kubelet/volumeplugins --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --pod-infra-container-image=rancher/mirrored-pause:3.6 --node-ip=172.31.31.51 --resolv-conf=/etc/resolv.conf --event-qps=0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --protect-kernel-defaults=true --cluster-dns=10.43.0.10 --container-runtime=remote --authentication-token-webhook=true --anonymous-auth=false --feature-gates=RotateKubeletServerCertificate=true --cloud-provider= --read-only-port=0 --hostname-override=ip-172-31-31-51 --cgroup-driver=cgroupfs --resolv-conf=/run/systemd/resolve/resolv.conf +``` + +### 4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If +using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_AUTHZ_ARGS variable. +--authorization-mode=Webhook +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then /bin/cat /var/lib/kubelet/config.yaml; fi' +``` + +**Expected Result**: + +```console +'--authorization-mode' does not have 'AlwaysAllow' +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 6239 5834 2 22:02 ? 00:00:04 kubelet --authorization-mode=Webhook --v=2 --root-dir=/var/lib/kubelet --container-runtime-endpoint=unix:///var/run/cri-dockerd.sock --tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet-172-31-31-51-key.pem --cgroups-per-qos=True --streaming-connection-idle-timeout=30m --tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-172-31-31-51.pem --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-node.yaml --address=0.0.0.0 --cluster-domain=cluster.local --fail-swap-on=false --make-iptables-util-chains=true --volume-plugin-dir=/var/lib/kubelet/volumeplugins --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --pod-infra-container-image=rancher/mirrored-pause:3.6 --node-ip=172.31.31.51 --resolv-conf=/etc/resolv.conf --event-qps=0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --protect-kernel-defaults=true --cluster-dns=10.43.0.10 --container-runtime=remote --authentication-token-webhook=true --anonymous-auth=false --feature-gates=RotateKubeletServerCertificate=true --cloud-provider= --read-only-port=0 --hostname-override=ip-172-31-31-51 --cgroup-driver=cgroupfs --resolv-conf=/run/systemd/resolve/resolv.conf +``` + +### 4.2.3 Ensure that the --client-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to +the location of the client CA file. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_AUTHZ_ARGS variable. +--client-ca-file= +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then /bin/cat /var/lib/kubelet/config.yaml; fi' +``` + +**Expected Result**: + +```console +'--client-ca-file' is present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 6239 5834 2 22:02 ? 00:00:04 kubelet --authorization-mode=Webhook --v=2 --root-dir=/var/lib/kubelet --container-runtime-endpoint=unix:///var/run/cri-dockerd.sock --tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet-172-31-31-51-key.pem --cgroups-per-qos=True --streaming-connection-idle-timeout=30m --tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-172-31-31-51.pem --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-node.yaml --address=0.0.0.0 --cluster-domain=cluster.local --fail-swap-on=false --make-iptables-util-chains=true --volume-plugin-dir=/var/lib/kubelet/volumeplugins --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --pod-infra-container-image=rancher/mirrored-pause:3.6 --node-ip=172.31.31.51 --resolv-conf=/etc/resolv.conf --event-qps=0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --protect-kernel-defaults=true --cluster-dns=10.43.0.10 --container-runtime=remote --authentication-token-webhook=true --anonymous-auth=false --feature-gates=RotateKubeletServerCertificate=true --cloud-provider= --read-only-port=0 --hostname-override=ip-172-31-31-51 --cgroup-driver=cgroupfs --resolv-conf=/run/systemd/resolve/resolv.conf +``` + +### 4.2.4 Ensure that the --read-only-port argument is set to 0 (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--read-only-port=0 +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then /bin/cat /var/lib/kubelet/config.yaml; fi' +``` + +**Expected Result**: + +```console +'--read-only-port' is equal to '0' OR '--read-only-port' is not present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 6239 5834 2 22:02 ? 00:00:04 kubelet --authorization-mode=Webhook --v=2 --root-dir=/var/lib/kubelet --container-runtime-endpoint=unix:///var/run/cri-dockerd.sock --tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet-172-31-31-51-key.pem --cgroups-per-qos=True --streaming-connection-idle-timeout=30m --tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-172-31-31-51.pem --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-node.yaml --address=0.0.0.0 --cluster-domain=cluster.local --fail-swap-on=false --make-iptables-util-chains=true --volume-plugin-dir=/var/lib/kubelet/volumeplugins --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --pod-infra-container-image=rancher/mirrored-pause:3.6 --node-ip=172.31.31.51 --resolv-conf=/etc/resolv.conf --event-qps=0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --protect-kernel-defaults=true --cluster-dns=10.43.0.10 --container-runtime=remote --authentication-token-webhook=true --anonymous-auth=false --feature-gates=RotateKubeletServerCertificate=true --cloud-provider= --read-only-port=0 --hostname-override=ip-172-31-31-51 --cgroup-driver=cgroupfs --resolv-conf=/run/systemd/resolve/resolv.conf +``` + +### 4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a +value other than 0. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--streaming-connection-idle-timeout=5m +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then /bin/cat /var/lib/kubelet/config.yaml; fi' +``` + +**Expected Result**: + +```console +'--streaming-connection-idle-timeout' is not equal to '0' OR '--streaming-connection-idle-timeout' is not present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 6239 5834 2 22:02 ? 00:00:04 kubelet --authorization-mode=Webhook --v=2 --root-dir=/var/lib/kubelet --container-runtime-endpoint=unix:///var/run/cri-dockerd.sock --tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet-172-31-31-51-key.pem --cgroups-per-qos=True --streaming-connection-idle-timeout=30m --tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-172-31-31-51.pem --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-node.yaml --address=0.0.0.0 --cluster-domain=cluster.local --fail-swap-on=false --make-iptables-util-chains=true --volume-plugin-dir=/var/lib/kubelet/volumeplugins --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --pod-infra-container-image=rancher/mirrored-pause:3.6 --node-ip=172.31.31.51 --resolv-conf=/etc/resolv.conf --event-qps=0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --protect-kernel-defaults=true --cluster-dns=10.43.0.10 --container-runtime=remote --authentication-token-webhook=true --anonymous-auth=false --feature-gates=RotateKubeletServerCertificate=true --cloud-provider= --read-only-port=0 --hostname-override=ip-172-31-31-51 --cgroup-driver=cgroupfs --resolv-conf=/run/systemd/resolve/resolv.conf +``` + +### 4.2.6 Ensure that the --protect-kernel-defaults argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--protect-kernel-defaults=true +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then /bin/cat /var/lib/kubelet/config.yaml; fi' +``` + +**Expected Result**: + +```console +'--protect-kernel-defaults' is equal to 'true' +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 6239 5834 2 22:02 ? 00:00:04 kubelet --authorization-mode=Webhook --v=2 --root-dir=/var/lib/kubelet --container-runtime-endpoint=unix:///var/run/cri-dockerd.sock --tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet-172-31-31-51-key.pem --cgroups-per-qos=True --streaming-connection-idle-timeout=30m --tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-172-31-31-51.pem --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-node.yaml --address=0.0.0.0 --cluster-domain=cluster.local --fail-swap-on=false --make-iptables-util-chains=true --volume-plugin-dir=/var/lib/kubelet/volumeplugins --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --pod-infra-container-image=rancher/mirrored-pause:3.6 --node-ip=172.31.31.51 --resolv-conf=/etc/resolv.conf --event-qps=0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --protect-kernel-defaults=true --cluster-dns=10.43.0.10 --container-runtime=remote --authentication-token-webhook=true --anonymous-auth=false --feature-gates=RotateKubeletServerCertificate=true --cloud-provider= --read-only-port=0 --hostname-override=ip-172-31-31-51 --cgroup-driver=cgroupfs --resolv-conf=/run/systemd/resolve/resolv.conf +``` + +### 4.2.7 Ensure that the --make-iptables-util-chains argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +remove the --make-iptables-util-chains argument from the +KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then /bin/cat /var/lib/kubelet/config.yaml; fi' +``` + +**Expected Result**: + +```console +'--make-iptables-util-chains' is equal to 'true' OR '--make-iptables-util-chains' is not present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 6239 5834 2 22:02 ? 00:00:04 kubelet --authorization-mode=Webhook --v=2 --root-dir=/var/lib/kubelet --container-runtime-endpoint=unix:///var/run/cri-dockerd.sock --tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet-172-31-31-51-key.pem --cgroups-per-qos=True --streaming-connection-idle-timeout=30m --tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-172-31-31-51.pem --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-node.yaml --address=0.0.0.0 --cluster-domain=cluster.local --fail-swap-on=false --make-iptables-util-chains=true --volume-plugin-dir=/var/lib/kubelet/volumeplugins --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --pod-infra-container-image=rancher/mirrored-pause:3.6 --node-ip=172.31.31.51 --resolv-conf=/etc/resolv.conf --event-qps=0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --protect-kernel-defaults=true --cluster-dns=10.43.0.10 --container-runtime=remote --authentication-token-webhook=true --anonymous-auth=false --feature-gates=RotateKubeletServerCertificate=true --cloud-provider= --read-only-port=0 --hostname-override=ip-172-31-31-51 --cgroup-driver=cgroupfs --resolv-conf=/run/systemd/resolve/resolv.conf +``` + +### 4.2.8 Ensure that the --hostname-override argument is not set (Manual) + + +**Result:** Not Applicable + +**Remediation:** +Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +on each worker node and remove the --hostname-override argument from the +KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +Clusters provisioned by RKE set the --hostname-override to avoid any hostname configuration errors + +### 4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Manual) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then /bin/cat /var/lib/kubelet/config.yaml; fi' +``` + +**Expected Result**: + +```console +'--event-qps' is equal to '0' +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 6239 5834 2 22:02 ? 00:00:04 kubelet --authorization-mode=Webhook --v=2 --root-dir=/var/lib/kubelet --container-runtime-endpoint=unix:///var/run/cri-dockerd.sock --tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet-172-31-31-51-key.pem --cgroups-per-qos=True --streaming-connection-idle-timeout=30m --tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-172-31-31-51.pem --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-node.yaml --address=0.0.0.0 --cluster-domain=cluster.local --fail-swap-on=false --make-iptables-util-chains=true --volume-plugin-dir=/var/lib/kubelet/volumeplugins --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --pod-infra-container-image=rancher/mirrored-pause:3.6 --node-ip=172.31.31.51 --resolv-conf=/etc/resolv.conf --event-qps=0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --protect-kernel-defaults=true --cluster-dns=10.43.0.10 --container-runtime=remote --authentication-token-webhook=true --anonymous-auth=false --feature-gates=RotateKubeletServerCertificate=true --cloud-provider= --read-only-port=0 --hostname-override=ip-172-31-31-51 --cgroup-driver=cgroupfs --resolv-conf=/run/systemd/resolve/resolv.conf +``` + +### 4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `tlsCertFile` to the location +of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` +to the location of the corresponding private key file. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameters in KUBELET_CERTIFICATE_ARGS variable. +--tls-cert-file= +--tls-private-key-file= +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then /bin/cat /var/lib/kubelet/config.yaml; fi' +``` + +**Expected Result**: + +```console +'--tls-cert-file' is present AND '--tls-private-key-file' is present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 6239 5834 2 22:02 ? 00:00:04 kubelet --authorization-mode=Webhook --v=2 --root-dir=/var/lib/kubelet --container-runtime-endpoint=unix:///var/run/cri-dockerd.sock --tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet-172-31-31-51-key.pem --cgroups-per-qos=True --streaming-connection-idle-timeout=30m --tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-172-31-31-51.pem --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-node.yaml --address=0.0.0.0 --cluster-domain=cluster.local --fail-swap-on=false --make-iptables-util-chains=true --volume-plugin-dir=/var/lib/kubelet/volumeplugins --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --pod-infra-container-image=rancher/mirrored-pause:3.6 --node-ip=172.31.31.51 --resolv-conf=/etc/resolv.conf --event-qps=0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --protect-kernel-defaults=true --cluster-dns=10.43.0.10 --container-runtime=remote --authentication-token-webhook=true --anonymous-auth=false --feature-gates=RotateKubeletServerCertificate=true --cloud-provider= --read-only-port=0 --hostname-override=ip-172-31-31-51 --cgroup-driver=cgroupfs --resolv-conf=/run/systemd/resolve/resolv.conf +``` + +### 4.2.11 Ensure that the --rotate-certificates argument is not set to false (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or +remove it altogether to use the default value. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS +variable. +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then /bin/cat /var/lib/kubelet/config.yaml; fi' +``` + +**Expected Result**: + +```console +'{.rotateCertificates}' is present OR '{.rotateCertificates}' is not present +``` + +### 4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. +--feature-gates=RotateKubeletServerCertificate=true +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +Clusters provisioned by RKE handles certificate rotation directly through RKE. + +**Audit Config:** + +```bash +/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then /bin/cat /var/lib/kubelet/config.yaml; fi' +``` + +### 4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `TLSCipherSuites` to +TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +or to a subset of these values. +If using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the --tls-cipher-suites parameter as follows, or to a subset of these values. +--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then /bin/cat /var/lib/kubelet/config.yaml; fi' +``` + +**Expected Result**: + +```console +'--tls-cipher-suites' contains valid elements from 'TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256' +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 6239 5834 2 22:02 ? 00:00:04 kubelet --authorization-mode=Webhook --v=2 --root-dir=/var/lib/kubelet --container-runtime-endpoint=unix:///var/run/cri-dockerd.sock --tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet-172-31-31-51-key.pem --cgroups-per-qos=True --streaming-connection-idle-timeout=30m --tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-172-31-31-51.pem --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-node.yaml --address=0.0.0.0 --cluster-domain=cluster.local --fail-swap-on=false --make-iptables-util-chains=true --volume-plugin-dir=/var/lib/kubelet/volumeplugins --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --pod-infra-container-image=rancher/mirrored-pause:3.6 --node-ip=172.31.31.51 --resolv-conf=/etc/resolv.conf --event-qps=0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --protect-kernel-defaults=true --cluster-dns=10.43.0.10 --container-runtime=remote --authentication-token-webhook=true --anonymous-auth=false --feature-gates=RotateKubeletServerCertificate=true --cloud-provider= --read-only-port=0 --hostname-override=ip-172-31-31-51 --cgroup-driver=cgroupfs --resolv-conf=/run/systemd/resolve/resolv.conf +``` + +## 5.1 RBAC and Service Accounts +### 5.1.1 Ensure that the cluster-admin role is only used where required (Manual) + + +**Result:** warn + +**Remediation:** +Identify all clusterrolebindings to the cluster-admin role. Check if they are used and +if they need this role or if they could use a role with fewer privileges. +Where possible, first bind users to a lower privileged role and then remove the +clusterrolebinding to the cluster-admin role : +kubectl delete clusterrolebinding [name] + +### 5.1.2 Minimize access to secrets (Manual) + + +**Result:** warn + +**Remediation:** +Where possible, remove get, list and watch access to Secret objects in the cluster. + +### 5.1.3 Minimize wildcard use in Roles and ClusterRoles (Manual) + + +**Result:** warn + +**Remediation:** +Where possible replace any use of wildcards in clusterroles and roles with specific +objects or actions. + +### 5.1.4 Minimize access to create pods (Manual) + + +**Result:** warn + +**Remediation:** +Where possible, remove create access to pod objects in the cluster. + +### 5.1.5 Ensure that default service accounts are not actively used. (Automated) + + +**Result:** pass + +**Remediation:** +Create explicit service accounts wherever a Kubernetes workload requires specific access +to the Kubernetes API server. +Modify the configuration of each default service account to include this value +automountServiceAccountToken: false + +**Audit Script:** `check_for_default_sa.sh` + +```bash +#!/bin/bash + +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + +count_sa=$(kubectl get serviceaccounts --all-namespaces -o json | jq -r '.items[] | select(.metadata.name=="default") | select((.automountServiceAccountToken == null) or (.automountServiceAccountToken == true))' | jq .metadata.namespace | wc -l) +if [[ ${count_sa} -gt 0 ]]; then + echo "false" + exit +fi + +for ns in $(kubectl get ns --no-headers -o custom-columns=":metadata.name") +do + for result in $(kubectl get clusterrolebinding,rolebinding -n $ns -o json | jq -r '.items[] | select((.subjects[].kind=="ServiceAccount" and .subjects[].name=="default") or (.subjects[].kind=="Group" and .subjects[].name=="system:serviceaccounts"))' | jq -r '"\(.roleRef.kind),\(.roleRef.name)"') + do + read kind name <<<$(IFS=","; echo $result) + resource_count=$(kubectl get $kind $name -n $ns -o json | jq -r '.rules[] | select(.resources[] != "podsecuritypolicies")' | wc -l) + if [[ ${resource_count} -gt 0 ]]; then + echo "false" + exit + fi + done +done + + +echo "true" +``` + +**Audit Execution:** + +```bash +./check_for_default_sa.sh +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +Error from server (Forbidden): serviceaccounts is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "serviceaccounts" in API group "" at the cluster scope Error from server (Forbidden): clusterrolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "clusterrolebindings" in API group "rbac.authorization.k8s.io" at the cluster scope Error from server (Forbidden): rolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "rolebindings" in API group "rbac.authorization.k8s.io" in the namespace "cattle-fleet-system" Error from server (Forbidden): clusterrolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "clusterrolebindings" in API group "rbac.authorization.k8s.io" at the cluster scope Error from server (Forbidden): rolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "rolebindings" in API group "rbac.authorization.k8s.io" in the namespace "cattle-impersonation-system" Error from server (Forbidden): clusterrolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "clusterrolebindings" in API group "rbac.authorization.k8s.io" at the cluster scope Error from server (Forbidden): rolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "rolebindings" in API group "rbac.authorization.k8s.io" in the namespace "cattle-system" Error from server (Forbidden): clusterrolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "clusterrolebindings" in API group "rbac.authorization.k8s.io" at the cluster scope Error from server (Forbidden): rolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "rolebindings" in API group "rbac.authorization.k8s.io" in the namespace "cis-operator-system" Error from server (Forbidden): clusterrolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "clusterrolebindings" in API group "rbac.authorization.k8s.io" at the cluster scope Error from server (Forbidden): rolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "rolebindings" in API group "rbac.authorization.k8s.io" in the namespace "default" Error from server (Forbidden): clusterrolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "clusterrolebindings" in API group "rbac.authorization.k8s.io" at the cluster scope Error from server (Forbidden): rolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "rolebindings" in API group "rbac.authorization.k8s.io" in the namespace "ingress-nginx" Error from server (Forbidden): clusterrolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "clusterrolebindings" in API group "rbac.authorization.k8s.io" at the cluster scope Error from server (Forbidden): rolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "rolebindings" in API group "rbac.authorization.k8s.io" in the namespace "kube-node-lease" Error from server (Forbidden): clusterrolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "clusterrolebindings" in API group "rbac.authorization.k8s.io" at the cluster scope Error from server (Forbidden): rolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "rolebindings" in API group "rbac.authorization.k8s.io" in the namespace "kube-public" Error from server (Forbidden): clusterrolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "clusterrolebindings" in API group "rbac.authorization.k8s.io" at the cluster scope Error from server (Forbidden): rolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "rolebindings" in API group "rbac.authorization.k8s.io" in the namespace "kube-system" Error from server (Forbidden): clusterrolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "clusterrolebindings" in API group "rbac.authorization.k8s.io" at the cluster scope Error from server (Forbidden): rolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "rolebindings" in API group "rbac.authorization.k8s.io" in the namespace "local" true +``` + +### 5.1.6 Ensure that Service Account Tokens are only mounted where necessary (Manual) + + +**Result:** warn + +**Remediation:** +Modify the definition of pods and service accounts which do not need to mount service +account tokens to disable it. + +### 5.1.7 Avoid use of system:masters group (Manual) + + +**Result:** warn + +**Remediation:** +Remove the system:masters group from all users in the cluster. + +### 5.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual) + + +**Result:** warn + +**Remediation:** +Where possible, remove the impersonate, bind and escalate rights from subjects. + +## 5.2 Pod Security Standards +### 5.2.1 Ensure that the cluster has at least one active policy control mechanism in place (Manual) + + +**Result:** warn + +**Remediation:** +Ensure that either Pod Security Admission or an external policy control system is in place +for every namespace which contains user workloads. + +### 5.2.2 Minimize the admission of privileged containers (Manual) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of privileged containers. + +### 5.2.3 Minimize the admission of containers wishing to share the host process ID namespace (Automated) + + +**Result:** fail + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of `hostPID` containers. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +'count' is greater than 0 +``` + +**Returned Value**: + +```console +error: the server doesn't have a resource type "psp" --count=0 +``` + +### 5.2.4 Minimize the admission of containers wishing to share the host IPC namespace (Automated) + + +**Result:** fail + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of `hostIPC` containers. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +'count' is greater than 0 +``` + +**Returned Value**: + +```console +error: the server doesn't have a resource type "psp" --count=0 +``` + +### 5.2.5 Minimize the admission of containers wishing to share the host network namespace (Automated) + + +**Result:** fail + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of `hostNetwork` containers. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +'count' is greater than 0 +``` + +**Returned Value**: + +```console +error: the server doesn't have a resource type "psp" --count=0 +``` + +### 5.2.6 Minimize the admission of containers with allowPrivilegeEscalation (Automated) + + +**Result:** fail + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +'count' is greater than 0 +``` + +**Returned Value**: + +```console +error: the server doesn't have a resource type "psp" --count=0 +``` + +### 5.2.7 Minimize the admission of root containers (Automated) + + +**Result:** warn + +**Remediation:** +Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` +or `MustRunAs` with the range of UIDs not including 0, is set. + +### 5.2.8 Minimize the admission of containers with the NET_RAW capability (Automated) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers with the `NET_RAW` capability. + +### 5.2.9 Minimize the admission of containers with added capabilities (Automated) + + +**Result:** warn + +**Remediation:** +Ensure that `allowedCapabilities` is not present in policies for the cluster unless +it is set to an empty array. + +### 5.2.10 Minimize the admission of containers with capabilities assigned (Manual) + + +**Result:** warn + +**Remediation:** +Review the use of capabilites in applications running on your cluster. Where a namespace +contains applicaions which do not require any Linux capabities to operate consider adding +a PSP which forbids the admission of containers which do not drop all capabilities. + +### 5.2.11 Minimize the admission of Windows HostProcess containers (Manual) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + +### 5.2.12 Minimize the admission of HostPath volumes (Manual) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers with `hostPath` volumes. + +### 5.2.13 Minimize the admission of containers which use HostPorts (Manual) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers which use `hostPort` sections. + +## 5.3 Network Policies and CNI +### 5.3.1 Ensure that the CNI in use supports NetworkPolicies (Manual) + + +**Result:** warn + +**Remediation:** +If the CNI plugin in use does not support network policies, consideration should be given to +making use of a different plugin, or finding an alternate mechanism for restricting traffic +in the Kubernetes cluster. + +### 5.3.2 Ensure that all Namespaces have Network Policies defined (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and create NetworkPolicy objects as you need them. + +**Audit Script:** `check_for_network_policies.sh` + +```bash +#!/bin/bash + +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + +for namespace in $(kubectl get namespaces --all-namespaces -o json | jq -r '.items[].metadata.name'); do + policy_count=$(kubectl get networkpolicy -n ${namespace} -o json | jq '.items | length') + if [[ ${policy_count} -eq 0 ]]; then + echo "false" + exit + fi +done + +echo "true" + +``` + +**Audit Execution:** + +```bash +./check_for_network_policies.sh +``` + +**Expected Result**: + +```console +'true' is present +``` + +**Returned Value**: + +```console +true +``` + +## 5.4 Secrets Management +### 5.4.1 Prefer using Secrets as files over Secrets as environment variables (Manual) + + +**Result:** warn + +**Remediation:** +If possible, rewrite application code to read Secrets from mounted secret files, rather than +from environment variables. + +### 5.4.2 Consider external secret storage (Manual) + + +**Result:** warn + +**Remediation:** +Refer to the Secrets management options offered by your cloud provider or a third-party +secrets management solution. + +## 5.5 Extensible Admission Control +### 5.5.1 Configure Image Provenance using ImagePolicyWebhook admission controller (Manual) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and setup image provenance. + +## 5.7 General Policies +### 5.7.1 Create administrative boundaries between resources using namespaces (Manual) + + +**Result:** warn + +**Remediation:** +Follow the documentation and create namespaces for objects in your deployment as you need +them. + +### 5.7.2 Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual) + + +**Result:** warn + +**Remediation:** +Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. +An example is as below: +securityContext: +seccompProfile: +type: RuntimeDefault + +### 5.7.3 Apply SecurityContext to your Pods and Containers (Manual) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a +suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker +Containers. + +### 5.7.4 The default namespace should not be used (Automated) + + +**Result:** pass + +**Remediation:** +Ensure that namespaces are created to allow for appropriate segregation of Kubernetes +resources and that all new resources are created in a specific namespace. + +**Audit Script:** `check_for_default_ns.sh` + +```bash +#!/bin/bash + +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + +count=$(kubectl get all -n default -o json | jq .items[] | jq -r 'select((.metadata.name!="kubernetes"))' | jq .metadata.name | wc -l) +if [[ ${count} -gt 0 ]]; then + echo "false" + exit +fi + +echo "true" + + +``` + +**Audit Execution:** + +```bash +./check_for_default_ns.sh +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +Error from server (Forbidden): replicationcontrollers is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "replicationcontrollers" in API group "" in the namespace "default" Error from server (Forbidden): services is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "services" in API group "" in the namespace "default" Error from server (Forbidden): daemonsets.apps is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "daemonsets" in API group "apps" in the namespace "default" Error from server (Forbidden): deployments.apps is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "deployments" in API group "apps" in the namespace "default" Error from server (Forbidden): replicasets.apps is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "replicasets" in API group "apps" in the namespace "default" Error from server (Forbidden): statefulsets.apps is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "statefulsets" in API group "apps" in the namespace "default" Error from server (Forbidden): horizontalpodautoscalers.autoscaling is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "horizontalpodautoscalers" in API group "autoscaling" in the namespace "default" Error from server (Forbidden): cronjobs.batch is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "cronjobs" in API group "batch" in the namespace "default" Error from server (Forbidden): jobs.batch is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "jobs" in API group "batch" in the namespace "default" true +``` + diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md new file mode 100644 index 000000000000..252accbb79d6 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md @@ -0,0 +1,3196 @@ +--- +title: RKE2 Self-Assessment Guide - CIS Benchmark v1.7 - K8s v1.25 +--- + +This document is a companion to the [RKE2 Hardening Guide](../../../../pages-for-subheaders/rke2-hardening-guide.md), which provides prescriptive guidance on how to harden RKE2 clusters that are running in production and managed by Rancher. This benchmark guide helps you evaluate the security of a hardened cluster against each control in the CIS Kubernetes Benchmark. + +This guide corresponds to the following versions of Rancher, CIS Benchmarks, and Kubernetes: + +| Rancher Version | CIS Benchmark Version | Kubernetes Version | +|-----------------|-----------------------|--------------------| +| Rancher v2.7 | Benchmark v1.7 | Kubernetes v1.25 | + +This guide walks through the various controls and provide updated example commands to audit compliance in Rancher created clusters. Because Rancher and RKE2 install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply. These checks will return a result of `Not Applicable`. + +This document is for Rancher operators, security teams, auditors and decision makers. + +For more information about each control, including detailed descriptions and remediations for failing tests, refer to the corresponding section of the CIS Kubernetes Benchmark v1.7. You can download the benchmark, after creating a free account, at [Center for Internet Security (CIS)](https://www.cisecurity.org/benchmark/kubernetes/). + +## Testing Methodology + +RKE2 launches control plane components as static pods, managed by the kubelet, and uses containerd as the container runtime. Configuration is defined by arguments passed to the container at the time of initialization or via configuration file. + +Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher are provided for testing. When performing the tests, you will need access to the command line on the hosts of all RKE2 nodes. The commands also make use of the [kubectl](https://kubernetes.io/docs/tasks/tools/) (with a valid configuration file) and [jq](https://stedolan.github.io/jq/) tools, which are required in the testing and evaluation of test results. + +:::note + +This guide only covers `automated` (previously called `scored`) tests. + +::: + +### Controls + +## 1.1 Master Node Configuration Files +### 1.1.1 Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the +control plane node. +For example, chmod 644 /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml + +**Audit:** + +```bash +stat -c permissions=%a /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +``` + +**Expected Result**: + +```console +permissions has permissions 644, expected 644 or more restrictive +``` + +**Returned Value**: + +```console +permissions=644 +``` + +### 1.1.2 Ensure that the API server pod specification file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, chown root:root /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml; then stat -c %U:%G /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml; fi' +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.3 Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, chmod 644 /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml; then stat -c %a /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml; fi' +``` + +**Expected Result**: + +```console +'644' is equal to '644' +``` + +**Returned Value**: + +```console +644 +``` + +### 1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, chown root:root /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml; then stat -c %U:%G /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml; fi' +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.5 Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, chmod 644 /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml; then stat -c permissions=%a /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml; fi' +``` + +**Expected Result**: + +```console +'permissions' is equal to '644' +``` + +**Returned Value**: + +```console +permissions=644 +``` + +### 1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, chown root:root /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml; then stat -c %U:%G /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml; fi' +``` + +**Expected Result**: + +```console +'root:root' is present +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.7 Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chmod 644 /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml; then stat -c permissions=%a /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml; fi' +``` + +**Expected Result**: + +```console +'644' is equal to '644' +``` + +**Returned Value**: + +```console +permissions=644 +``` + +### 1.1.8 Ensure that the etcd pod specification file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chown root:root /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml; then stat -c %U:%G /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml; fi' +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.9 Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, chmod 644 + +**Audit:** + +```bash +ps -fC ${kubeletbin:-kubelet} | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a +``` + +**Expected Result**: + +```console +permissions has permissions 644, expected 644 or more restrictive +``` + +**Returned Value**: + +```console +permissions=600 permissions=644 +``` + +### 1.1.10 Ensure that the Container Network Interface file ownership is set to root:root (Manual) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chown root:root + +**Audit:** + +```bash +ps -fC ${kubeletbin:-kubelet} | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G +``` + +**Expected Result**: + +```console +'root:root' is present +``` + +**Returned Value**: + +```console +root:root root:root +``` + +### 1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument --data-dir, +from the command 'ps -ef | grep etcd'. +Run the below command (based on the etcd data directory found above). For example, +chmod 700 /var/lib/etcd + +**Audit:** + +```bash +stat -c permissions=%a /var/lib/rancher/rke2/server/db/etcd +``` + +**Expected Result**: + +```console +permissions has permissions 700, expected 700 or more restrictive +``` + +**Returned Value**: + +```console +permissions=700 +``` + +### 1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd (Automated) + + +**Result:** pass + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument --data-dir, +from the command 'ps -ef | grep etcd'. +Run the below command (based on the etcd data directory found above). +For example, chown etcd:etcd /var/lib/etcd + +**Audit:** + +```bash +stat -c %U:%G /var/lib/rancher/rke2/server/db/etcd +``` + +**Expected Result**: + +```console +'etcd:etcd' is present +``` + +**Returned Value**: + +```console +etcd:etcd +``` + +### 1.1.13 Ensure that the admin.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, chmod 600 /etc/kubernetes/admin.conf + +**Audit:** + +```bash +stat -c permissions=%a /var/lib/rancher/rke2/server/cred/admin.kubeconfig +``` + +**Expected Result**: + +```console +permissions has permissions 644, expected 644 or more restrictive +``` + +**Returned Value**: + +```console +permissions=644 +``` + +### 1.1.14 Ensure that the admin.conf file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, chown root:root /etc/kubernetes/admin.conf + +**Audit:** + +```bash +stat -c %U:%G /var/lib/rancher/rke2/server/cred/admin.kubeconfig +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.15 Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chmod 644 scheduler + +**Audit:** + +```bash +stat -c permissions=%a /var/lib/rancher/rke2/server/cred/scheduler.kubeconfig +``` + +**Expected Result**: + +```console +permissions has permissions 644, expected 644 or more restrictive +``` + +**Returned Value**: + +```console +permissions=644 +``` + +### 1.1.16 Ensure that the scheduler.conf file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chown root:root scheduler + +**Audit:** + +```bash +stat -c %U:%G /var/lib/rancher/rke2/server/cred/scheduler.kubeconfig +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.17 Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chmod 644 controllermanager + +**Audit:** + +```bash +stat -c permissions=%a /var/lib/rancher/rke2/server/cred/controller.kubeconfig +``` + +**Expected Result**: + +```console +permissions has permissions 644, expected 644 or more restrictive +``` + +**Returned Value**: + +```console +permissions=644 +``` + +### 1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chown root:root /var/lib/rancher/rke2/server/cred/controller.kubeconfig + +**Audit:** + +```bash +stat -c %U:%G /var/lib/rancher/rke2/server/cred/controller.kubeconfig +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chown -R root:root /var/lib/rancher/rke2/server/tls/ + +**Audit:** + +```bash +stat -c %U:%G /var/lib/rancher/rke2/server/tls +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Manual) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chmod -R 644 /var/lib/rancher/rke2/server/tls/*.crt + +**Audit:** + +```bash +stat -c permissions=%a /var/lib/rancher/rke2/server/tls/*.crt +``` + +**Expected Result**: + +```console +permissions has permissions 644, expected 644 or more restrictive +``` + +**Returned Value**: + +```console +permissions=644 permissions=644 permissions=644 permissions=644 permissions=644 permissions=644 permissions=644 permissions=644 permissions=644 permissions=644 permissions=644 permissions=644 +``` + +### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the control plane node. +For example, +chmod -R 600 /var/lib/rancher/rke2/server/tls/*.key + +**Audit:** + +```bash +stat -c permissions=%a /var/lib/rancher/rke2/server/tls/*.key +``` + +**Expected Result**: + +```console +'permissions' is equal to '600' +``` + +**Returned Value**: + +```console +permissions=600 permissions=600 permissions=600 permissions=600 permissions=600 permissions=600 permissions=600 permissions=600 permissions=600 permissions=600 permissions=600 permissions=600 permissions=600 permissions=600 permissions=600 +``` + +## 1.2 API Server +### 1.2.1 Ensure that the --anonymous-auth argument is set to false (Manual) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and set the below parameter. +--anonymous-auth=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--anonymous-auth' is equal to 'false' +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:05 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.2 Ensure that the --token-auth-file parameter is not set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and configure alternate mechanisms for authentication. Then, +edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and remove the --token-auth-file= parameter. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--token-auth-file' is not present +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:05 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.3 Ensure that the --DenyServiceExternalIPs is not set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and remove the `DenyServiceExternalIPs` +from enabled admission plugins. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--enable-admission-plugins' does not have 'DenyServiceExternalIPs' OR '--enable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:05 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.4 Ensure that the --kubelet-https argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and remove the --kubelet-https parameter. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--kubelet-https' is present OR '--kubelet-https' is not present +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:05 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.5 Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the +apiserver and kubelets. Then, edit API server pod specification file +/var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml on the control plane node and set the +kubelet client certificate and key parameters as below. +--kubelet-client-certificate= +--kubelet-client-key= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--kubelet-client-certificate' is present AND '--kubelet-client-key' is present +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:05 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.6 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and setup the TLS connection between +the apiserver and kubelets. Then, edit the API server pod specification file +/var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml on the control plane node and set the +--kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. +--kubelet-certificate-authority= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--kubelet-certificate-authority' is present +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:05 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.7 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. +One such example could be as below. +--authorization-mode=RBAC + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--authorization-mode' does not have 'AlwaysAllow' +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:05 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.8 Ensure that the --authorization-mode argument includes Node (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and set the --authorization-mode parameter to a value that includes Node. +--authorization-mode=Node,RBAC + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--authorization-mode' has 'Node' +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:05 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.9 Ensure that the --authorization-mode argument includes RBAC (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, +for example `--authorization-mode=Node,RBAC`. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--authorization-mode' has 'RBAC' +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:05 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.10 Ensure that the admission control plugin EventRateLimit is set (Manual) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and set the desired limits in a configuration file. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +and set the below parameters. +--enable-admission-plugins=...,EventRateLimit,... +--admission-control-config-file= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--enable-admission-plugins' has 'EventRateLimit' +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:05 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.11 Ensure that the admission control plugin AlwaysAdmit is not set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a +value that does not include AlwaysAdmit. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--enable-admission-plugins' does not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:05 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.12 Ensure that the admission control plugin AlwaysPullImages is set (Manual) + + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and set the --enable-admission-plugins parameter to include +AlwaysPullImages. +--enable-admission-plugins=...,AlwaysPullImages,... + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--enable-admission-plugins' has 'AlwaysPullImages' +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:05 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.13 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual) + + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and set the --enable-admission-plugins parameter to include +SecurityContextDeny, unless PodSecurityPolicy is already in place. +--enable-admission-plugins=...,SecurityContextDeny,... + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--enable-admission-plugins' has 'SecurityContextDeny' OR '--enable-admission-plugins' has 'PodSecurityPolicy' +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:05 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.14 Ensure that the admission control plugin ServiceAccount is set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and create ServiceAccount objects as per your environment. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and ensure that the --disable-admission-plugins parameter is set to a +value that does not include ServiceAccount. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--disable-admission-plugins' is present OR '--disable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:06 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.15 Ensure that the admission control plugin NamespaceLifecycle is set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and set the --disable-admission-plugins parameter to +ensure it does not include NamespaceLifecycle. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--disable-admission-plugins' is present OR '--disable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:06 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true root 15378 3910 99 23:32 ? 00:00:00 kubectl get --server=https://localhost:6443/ --client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --raw=/readyz +``` + +### 1.2.16 Ensure that the admission control plugin NodeRestriction is set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and set the --enable-admission-plugins parameter to a +value that includes NodeRestriction. +--enable-admission-plugins=...,NodeRestriction,... + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--enable-admission-plugins' has 'NodeRestriction' +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:06 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.17 Ensure that the --secure-port argument is not set to 0 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and either remove the --secure-port parameter or +set it to a different (non-zero) desired port. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--secure-port' is greater than 0 OR '--secure-port' is not present +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:06 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.18 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--profiling' is equal to 'false' +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:06 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.19 Ensure that the --audit-log-path argument is set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and set the --audit-log-path parameter to a suitable path and +file where you would like audit logs to be written, for example, +--audit-log-path=/var/log/apiserver/audit.log + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--audit-log-path' is present +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:06 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.20 Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and set the --audit-log-maxage parameter to 30 +or as an appropriate number of days, for example, +--audit-log-maxage=30 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--audit-log-maxage' is greater or equal to 30 +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:06 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.21 Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate +value. For example, +--audit-log-maxbackup=10 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--audit-log-maxbackup' is greater or equal to 10 +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:06 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.22 Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. +For example, to set it as 100 MB, --audit-log-maxsize=100 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--audit-log-maxsize' is greater or equal to 100 +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:06 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.24 Ensure that the --service-account-lookup argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and set the below parameter. +--service-account-lookup=true +Alternatively, you can delete the --service-account-lookup parameter from this file so +that the default takes effect. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--service-account-lookup' is not present OR '--service-account-lookup' is present +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:06 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.25 Ensure that the --request-timeout argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and set the --service-account-key-file parameter +to the public key file for service accounts. For example, +--service-account-key-file= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--service-account-key-file' is present +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:06 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.26 Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and set the etcd certificate and key file parameters. +--etcd-certfile= +--etcd-keyfile= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--etcd-certfile' is present AND '--etcd-keyfile' is present +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:06 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.27 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and set the TLS certificate and private key file parameters. +--tls-cert-file= +--tls-private-key-file= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--tls-cert-file' is present AND '--tls-private-key-file' is present +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:06 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.28 Ensure that the --client-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and set the client certificate authority file. +--client-ca-file= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--client-ca-file' is present +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:06 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.29 Ensure that the --etcd-cafile argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and set the etcd certificate authority file parameter. +--etcd-cafile= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--etcd-cafile' is present +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:06 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.30 Ensure that the --encryption-provider-config argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure a EncryptionConfig file. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the control plane node and set the --encryption-provider-config parameter to the path of that file. +For example, --encryption-provider-config= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--encryption-provider-config' is present +``` + +**Returned Value**: + +```console +root 3980 3910 19 23:26 ? 00:01:06 kube-apiserver --admission-control-config-file=/etc/rancher/rke2/rke2-pss.yaml --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --admission-control-config-file=/etc/rancher/rke2/config/rancher-psact.yaml --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/rke2/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.2.32 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual) + + +**Result:** Not Applicable + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the control plane node and set the below parameter. +--tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, +TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, +TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, +TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, +TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, +TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, +TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + +### 1.2.33 Ensure that encryption providers are appropriately configured (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure a EncryptionConfig file. +In this file, choose aescbc, kms or secretbox as the encryption provider. + +**Audit:** + +```bash +/bin/sh -c 'if grep aescbc /var/lib/rancher/rke2/server/cred/encryption-config.json; then echo 0; fi' +``` + +**Expected Result**: + +```console +'0' is present +``` + +**Returned Value**: + +```console +{"kind":"EncryptionConfiguration","apiVersion":"apiserver.config.k8s.io/v1","resources":[{"resources":["secrets"],"providers":[{"aescbc":{"keys":[{"name":"aescbckey","secret":"TSpBkJhIU0sRx+84IZuBZ1qO+eaRdW31C7QCnF3+n8s="}]}},{"identity":{}}]}]} 0 +``` + +## 1.3 Controller Manager +### 1.3.1 Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml +on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, +for example, --terminated-pod-gc-threshold=10 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--terminated-pod-gc-threshold' is present +``` + +**Returned Value**: + +```console +root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.3.2 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml +on the control plane node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--profiling' is equal to 'false' +``` + +**Returned Value**: + +```console +root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.3.3 Ensure that the --use-service-account-credentials argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml +on the control plane node to set the below parameter. +--use-service-account-credentials=true + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--use-service-account-credentials' is not equal to 'false' +``` + +**Returned Value**: + +```console +root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.3.4 Ensure that the --service-account-private-key-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml +on the control plane node and set the --service-account-private-key-file parameter +to the private key file for service accounts. +--service-account-private-key-file= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--service-account-private-key-file' is present +``` + +**Returned Value**: + +```console +root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.3.5 Ensure that the --root-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml +on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. +--root-ca-file= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--root-ca-file' is present +``` + +**Returned Value**: + +```console +root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +### 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml +on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. +--feature-gates=RotateKubeletServerCertificate=true + +### 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml +on the control plane node and ensure the correct value for the --bind-address parameter + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--bind-address' is equal to '127.0.0.1' OR '--bind-address' is not present +``` + +**Returned Value**: + +```console +root 4128 4029 2 23:27 ? 00:00:06 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true +``` + +## 1.4 Scheduler +### 1.4.1 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Scheduler pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml file +on the control plane node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-scheduler | grep -v grep +``` + +**Expected Result**: + +```console +'--profiling' is equal to 'false' +``` + +**Returned Value**: + +```console +root 4126 4014 0 23:27 ? 00:00:02 kube-scheduler --permit-port-sharing=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-scheduler --kubeconfig=/var/lib/rancher/rke2/server/cred/scheduler.kubeconfig --profiling=false --secure-port=10259 +``` + +### 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Scheduler pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml +on the control plane node and ensure the correct value for the --bind-address parameter + +**Audit:** + +```bash +/bin/ps -ef | grep kube-scheduler | grep -v grep +``` + +**Expected Result**: + +```console +'--bind-address' is equal to '127.0.0.1' OR '--bind-address' is not present +``` + +**Returned Value**: + +```console +root 4126 4014 0 23:27 ? 00:00:02 kube-scheduler --permit-port-sharing=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-scheduler --kubeconfig=/var/lib/rancher/rke2/server/cred/scheduler.kubeconfig --profiling=false --secure-port=10259 +``` + +## 2 Etcd Node Configuration +### 2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Follow the etcd service documentation and configure TLS encryption. +Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml +on the master node and set the below parameters. +--cert-file= +--key-file= + +### 2.2 Ensure that the --client-cert-auth argument is set to true (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the master +node and set the below parameter. +--client-cert-auth="true" + +### 2.3 Ensure that the --auto-tls argument is not set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the master +node and either remove the --auto-tls parameter or set it to false. +--auto-tls=false + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'ETCD_AUTO_TLS' is not present OR 'ETCD_AUTO_TLS' is present +``` + +**Returned Value**: + +```console +PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin HOSTNAME=ip-172-31-25-112 ETCD_UNSUPPORTED_ARCH= POD_HASH=ab0b8a2ee7711940d3d951edece075f3 FILE_HASH=068666c5f959fc1023cb1761daaaed212727c04120747d825c78fd7683122e6d NO_PROXY=.svc,.cluster.local,10.42.0.0/16,10.43.0.0/16 HOME=/ +``` + +### 2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Follow the etcd service documentation and configure peer TLS encryption as appropriate +for your etcd cluster. +Then, edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the +master node and set the below parameters. +--peer-client-file= +--peer-key-file= + +### 2.5 Ensure that the --peer-client-cert-auth argument is set to true (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the master +node and set the below parameter. +--peer-client-cert-auth=true + +### 2.6 Ensure that the --peer-auto-tls argument is not set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the master +node and either remove the --peer-auto-tls parameter or set it to false. +--peer-auto-tls=false + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'ETCD_PEER_AUTO_TLS' is not present OR 'ETCD_PEER_AUTO_TLS' is present +``` + +**Returned Value**: + +```console +PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin HOSTNAME=ip-172-31-25-112 ETCD_UNSUPPORTED_ARCH= POD_HASH=ab0b8a2ee7711940d3d951edece075f3 FILE_HASH=068666c5f959fc1023cb1761daaaed212727c04120747d825c78fd7683122e6d NO_PROXY=.svc,.cluster.local,10.42.0.0/16,10.43.0.0/16 HOME=/ +``` + +### 2.7 Ensure that a unique Certificate Authority is used for etcd (Automated) + + +**Result:** pass + +**Remediation:** +[Manual test] +Follow the etcd documentation and create a dedicated certificate authority setup for the +etcd service. +Then, edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the +master node and set the below parameter. +--trusted-ca-file= + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Audit Config:** + +```bash +cat /var/lib/rancher/rke2/server/db/etcd/config +``` + +**Expected Result**: + +```console +'ETCD_TRUSTED_CA_FILE' is present OR '{.peer-transport-security.trusted-ca-file}' is equal to '/var/lib/rancher/rke2/server/tls/etcd/peer-ca.crt' +``` + +**Returned Value**: + +```console +PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin HOSTNAME=ip-172-31-25-112 ETCD_UNSUPPORTED_ARCH= POD_HASH=ab0b8a2ee7711940d3d951edece075f3 FILE_HASH=068666c5f959fc1023cb1761daaaed212727c04120747d825c78fd7683122e6d NO_PROXY=.svc,.cluster.local,10.42.0.0/16,10.43.0.0/16 HOME=/ +``` + +## 3.1 Authentication and Authorization +### 3.1.1 Client certificate authentication should not be used for users (Manual) + + +**Result:** warn + +**Remediation:** +Alternative mechanisms provided by Kubernetes such as the use of OIDC should be +implemented in place of client certificates. + +## 3.2 Logging +### 3.2.1 Ensure that a minimal audit policy is created (Automated) + + +**Result:** pass + +**Remediation:** +Create an audit policy file for your cluster. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep | grep -o audit-policy-file +``` + +**Expected Result**: + +```console +'audit-policy-file' is equal to 'audit-policy-file' +``` + +**Returned Value**: + +```console +audit-policy-file +``` + +### 3.2.2 Ensure that the audit policy covers key security concerns (Manual) + + +**Result:** warn + +**Remediation:** +Review the audit policy provided for the cluster and ensure that it covers +at least the following areas, +- Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. +- Modification of Pod and Deployment objects. +- Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + +## 4.1 Worker Node Configuration Files +### 4.1.1 Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, chmod 644 /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + +### 4.1.2 Ensure that the kubelet service file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chown root:root /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + +### 4.1.3 If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chmod 644 /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig; then stat -c permissions=%a /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig; fi' +``` + +**Expected Result**: + +```console +permissions has permissions 644, expected 644 or more restrictive OR '/var/lib/rancher/rke2/agent/kubeproxy.kubeconfig' is not present +``` + +**Returned Value**: + +```console +permissions=644 +``` + +### 4.1.4 If proxy kubeconfig file exists ensure ownership is set to root:root (Manual) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, chown root:root /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig; then stat -c %U:%G /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig; fi' +``` + +**Expected Result**: + +```console +'root:root' is present OR '/var/lib/rancher/rke2/agent/kubeproxy.kubeconfig' is not present +``` + +**Returned Value**: + +```console +root:root +``` + +### 4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chmod 644 /var/lib/rancher/rke2/agent/kubelet.kubeconfig + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/kubelet.kubeconfig; then stat -c permissions=%a /var/lib/rancher/rke2/agent/kubelet.kubeconfig; fi' +``` + +**Expected Result**: + +```console +'644' is equal to '644' +``` + +**Returned Value**: + +```console +permissions=644 +``` + +### 4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chown root:root /var/lib/rancher/rke2/agent/kubelet.kubeconfig + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/kubelet.kubeconfig; then stat -c %U:%G /var/lib/rancher/rke2/agent/kubelet.kubeconfig; fi' +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 4.1.7 Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual) + + +**Result:** pass + +**Remediation:** +Run the following command to modify the file permissions of the +--client-ca-file chmod 644 + +**Audit Script:** `check_cafile_permissions.sh` + +```bash +#!/usr/bin/env bash + +CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') +CAFILE=/node$CAFILE +if test -z $CAFILE; then CAFILE=$kubeletcafile; fi +if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi + +``` + +**Audit Execution:** + +```bash +./check_cafile_permissions.sh +``` + +**Expected Result**: + +```console +permissions has permissions 600, expected 644 or more restrictive +``` + +**Returned Value**: + +```console +permissions=600 +``` + +### 4.1.8 Ensure that the client certificate authorities file ownership is set to root:root (Manual) + + +**Result:** pass + +**Remediation:** +Run the following command to modify the ownership of the --client-ca-file. +chown root:root + +**Audit Script:** `check_cafile_ownership.sh` + +```bash +#!/usr/bin/env bash + +CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') +CAFILE=/node$CAFILE +if test -z $CAFILE; then CAFILE=$kubeletcafile; fi +if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi + +``` + +**Audit Execution:** + +```bash +./check_cafile_ownership.sh +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 4.1.9 Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the following command (using the config file location identified in the Audit step) +chmod 644 /etc/rancher/rke2/rke2.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /etc/rancher/rke2/rke2.yaml; then stat -c permissions=%a /etc/rancher/rke2/rke2.yaml; fi' +``` + +**Expected Result**: + +```console +permissions has permissions 600, expected 644 or more restrictive +``` + +**Returned Value**: + +```console +permissions=600 +``` + +### 4.1.10 Ensure that the kubelet --config configuration file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the following command (using the config file location identified in the Audit step) +chown root:root /etc/rancher/rke2/rke2.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /etc/rancher/rke2/rke2.yaml; then stat -c %U:%G /etc/rancher/rke2/rke2.yaml; fi' +``` + +**Expected Result**: + +```console +'root:root' is present +``` + +**Returned Value**: + +```console +root:root +``` + +## 4.2 Kubelet +### 4.2.1 Ensure that the --anonymous-auth argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to +`false`. +If using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +`--anonymous-auth=false` +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /etc/rancher/rke2/rke2.yaml +``` + +**Expected Result**: + +```console +'--anonymous-auth' is equal to 'false' +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 3727 3667 3 23:26 ? 00:00:11 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=ip-172-31-25-112 --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --log-file-max-size=50 --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=5c1cd514-db7b-4692-a1c4-cacb2656161f --pod-infra-container-image=index.docker.io/rancher/pause:3.6 --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +### 4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If +using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_AUTHZ_ARGS variable. +--authorization-mode=Webhook +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /etc/rancher/rke2/rke2.yaml +``` + +**Expected Result**: + +```console +'--authorization-mode' does not have 'AlwaysAllow' +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 3727 3667 3 23:26 ? 00:00:11 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=ip-172-31-25-112 --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --log-file-max-size=50 --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=5c1cd514-db7b-4692-a1c4-cacb2656161f --pod-infra-container-image=index.docker.io/rancher/pause:3.6 --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +### 4.2.3 Ensure that the --client-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to +the location of the client CA file. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_AUTHZ_ARGS variable. +--client-ca-file= +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /etc/rancher/rke2/rke2.yaml +``` + +**Expected Result**: + +```console +'--client-ca-file' is present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 3727 3667 3 23:26 ? 00:00:11 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=ip-172-31-25-112 --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --log-file-max-size=50 --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=5c1cd514-db7b-4692-a1c4-cacb2656161f --pod-infra-container-image=index.docker.io/rancher/pause:3.6 --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +### 4.2.4 Ensure that the --read-only-port argument is set to 0 (Manual) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--read-only-port=0 +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /etc/rancher/rke2/rke2.yaml +``` + +**Expected Result**: + +```console +'--read-only-port' is equal to '0' OR '--read-only-port' is present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 3727 3667 3 23:26 ? 00:00:11 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=ip-172-31-25-112 --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --log-file-max-size=50 --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=5c1cd514-db7b-4692-a1c4-cacb2656161f --pod-infra-container-image=index.docker.io/rancher/pause:3.6 --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +### 4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a +value other than 0. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--streaming-connection-idle-timeout=5m +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /etc/rancher/rke2/rke2.yaml +``` + +**Expected Result**: + +```console +'{.streamingConnectionIdleTimeout}' is present OR '{.streamingConnectionIdleTimeout}' is not present +``` + +**Returned Value**: + +```console +apiVersion: v1 clusters: - cluster: certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlVENDQVIrZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWtNU0l3SUFZRFZRUUREQmx5YTJVeUxYTmwKY25abGNpMWpZVUF4TmpjM05EVXpPVE00TUI0WERUSXpNREl5TmpJek1qVXpPRm9YRFRNek1ESXlNekl6TWpVegpPRm93SkRFaU1DQUdBMVVFQXd3WmNtdGxNaTF6WlhKMlpYSXRZMkZBTVRZM056UTFNemt6T0RCWk1CTUdCeXFHClNNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJOUHllSlcwNE9lMUN6clo5cHplN09WWlliMlh4QmNvWUJ5Z3JRcUwKYUdpR29tN2xLNGs2ZW1uaUZQekpiOU9EQ0hDRmYyVUZaVXZDQTQxcUVmNVB3NlNqUWpCQU1BNEdBMVVkRHdFQgovd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCUnhUdGJTbTVMTnFrditkYXQrCmczalBWWXJjcERBS0JnZ3Foa2pPUFFRREFnTklBREJGQWlBN08wV1NNVHo4djgwM3BZK3hFeDR2SUZaVEMraVoKUHVDc082eFRuVVVtb2dJaEFPY1NqQVdkUVJ0UmJvaUhJbTd2RHZuM1czT1JmYlUwaXU2UUhNcXdsTGs4Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K server: https://127.0.0.1:6443 name: default contexts: - context: cluster: default user: default name: default current-context: default kind: Config preferences: {} users: - name: default user: client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrakNDQVRpZ0F3SUJBZ0lJRjc3RlZRT1l5RzR3Q2dZSUtvWkl6ajBFQXdJd0pERWlNQ0FHQTFVRUF3d1oKY210bE1pMWpiR2xsYm5RdFkyRkFNVFkzTnpRMU16a3pPREFlRncweU16QXlNall5TXpJMU16aGFGdzB5TkRBeQpNall5TXpJMU16aGFNREF4RnpBVkJnTlZCQW9URG5ONWMzUmxiVHB0WVhOMFpYSnpNUlV3RXdZRFZRUURFd3h6CmVYTjBaVzA2WVdSdGFXNHdXVEFUQmdjcWhrak9QUUlCQmdncWhrak9QUU1CQndOQ0FBUXg5Z1lxUzVCbndwTDIKcUdOeStjNnR0Yk15VmE3ZTdzT0J1UWdaWHBHZ1hNcGxOZHFhVy9lZjNZTjQwTk5RUnl2SWdWeTMzU0ZHSFJ0VgpqaXBpOXRZbG8wZ3dSakFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUhBd0l3Ckh3WURWUjBqQkJnd0ZvQVVyNlUvYVNMcm1ONkxiaDZHS1JJa3NoM1M1bEF3Q2dZSUtvWkl6ajBFQXdJRFNBQXcKUlFJaEFPS2paZmxRVU11RnZldlFkYzg3ckxPcnhoNUtyUGhlQUtkY0Y4YWdielFJQWlCcHBmUGNMMFRoZ1g5UAptcCtOZHJqa1hvQU5SRTlEWVdIRUlRbDdubytDdWc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlCZVRDQ0FSK2dBd0lCQWdJQkFEQUtCZ2dxaGtqT1BRUURBakFrTVNJd0lBWURWUVFEREJseWEyVXlMV05zCmFXVnVkQzFqWVVBeE5qYzNORFV6T1RNNE1CNFhEVEl6TURJeU5qSXpNalV6T0ZvWERUTXpNREl5TXpJek1qVXoKT0Zvd0pERWlNQ0FHQTFVRUF3d1pjbXRsTWkxamJHbGxiblF0WTJGQU1UWTNOelExTXprek9EQlpNQk1HQnlxRwpTTTQ5QWdFR0NDcUdTTTQ5QXdFSEEwSUFCQlhFZ0x2Z3JLV09KdkZtVnJhNEhyY05YdmNwN3JMN3VFNW1IcFpkCmJmT2xkRDRkVlJ4NjRxak9DeUNpc2Vsczk4WDJLSXlieGNSNkpnbFU2VXRoOU5xalFqQkFNQTRHQTFVZER3RUIKL3dRRUF3SUNwREFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVdCQlN2cFQ5cEl1dVkzb3R1SG9ZcApFaVN5SGRMbVVEQUtCZ2dxaGtqT1BRUURBZ05JQURCRkFpRUF3R2xqWXUxZkJpMHZROFczbWxueXVDNUJqMlBBCm14Sm1uS3BVSG8ydjJBZ0NJRndiblM3ajROUGtCT2hzRjJBeFhEZlZzdExoRWpqbmhPRHlQek1kT01STQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUpZblVvZ2U2bHVuNFN1WVVaN3VBQTVGYXV6blBaQzV2WHlpc1R2SVRjOXFvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFTWZZR0trdVFaOEtTOXFoamN2bk9yYld6TWxXdTN1N0RnYmtJR1Y2Um9GektaVFhhbWx2MwpuOTJEZU5EVFVFY3J5SUZjdDkwaFJoMGJWWTRxWXZiV0pRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= +``` + +### 4.2.6 Ensure that the --protect-kernel-defaults argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--protect-kernel-defaults=true +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /etc/rancher/rke2/rke2.yaml +``` + +**Expected Result**: + +```console +'--protect-kernel-defaults' is equal to 'true' +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 3727 3667 3 23:26 ? 00:00:11 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=ip-172-31-25-112 --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --log-file-max-size=50 --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=5c1cd514-db7b-4692-a1c4-cacb2656161f --pod-infra-container-image=index.docker.io/rancher/pause:3.6 --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +### 4.2.7 Ensure that the --make-iptables-util-chains argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +remove the --make-iptables-util-chains argument from the +KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /etc/rancher/rke2/rke2.yaml +``` + +**Expected Result**: + +```console +'{.makeIPTablesUtilChains}' is present OR '{.makeIPTablesUtilChains}' is not present +``` + +**Returned Value**: + +```console +apiVersion: v1 clusters: - cluster: certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlVENDQVIrZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWtNU0l3SUFZRFZRUUREQmx5YTJVeUxYTmwKY25abGNpMWpZVUF4TmpjM05EVXpPVE00TUI0WERUSXpNREl5TmpJek1qVXpPRm9YRFRNek1ESXlNekl6TWpVegpPRm93SkRFaU1DQUdBMVVFQXd3WmNtdGxNaTF6WlhKMlpYSXRZMkZBTVRZM056UTFNemt6T0RCWk1CTUdCeXFHClNNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJOUHllSlcwNE9lMUN6clo5cHplN09WWlliMlh4QmNvWUJ5Z3JRcUwKYUdpR29tN2xLNGs2ZW1uaUZQekpiOU9EQ0hDRmYyVUZaVXZDQTQxcUVmNVB3NlNqUWpCQU1BNEdBMVVkRHdFQgovd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCUnhUdGJTbTVMTnFrditkYXQrCmczalBWWXJjcERBS0JnZ3Foa2pPUFFRREFnTklBREJGQWlBN08wV1NNVHo4djgwM3BZK3hFeDR2SUZaVEMraVoKUHVDc082eFRuVVVtb2dJaEFPY1NqQVdkUVJ0UmJvaUhJbTd2RHZuM1czT1JmYlUwaXU2UUhNcXdsTGs4Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K server: https://127.0.0.1:6443 name: default contexts: - context: cluster: default user: default name: default current-context: default kind: Config preferences: {} users: - name: default user: client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrakNDQVRpZ0F3SUJBZ0lJRjc3RlZRT1l5RzR3Q2dZSUtvWkl6ajBFQXdJd0pERWlNQ0FHQTFVRUF3d1oKY210bE1pMWpiR2xsYm5RdFkyRkFNVFkzTnpRMU16a3pPREFlRncweU16QXlNall5TXpJMU16aGFGdzB5TkRBeQpNall5TXpJMU16aGFNREF4RnpBVkJnTlZCQW9URG5ONWMzUmxiVHB0WVhOMFpYSnpNUlV3RXdZRFZRUURFd3h6CmVYTjBaVzA2WVdSdGFXNHdXVEFUQmdjcWhrak9QUUlCQmdncWhrak9QUU1CQndOQ0FBUXg5Z1lxUzVCbndwTDIKcUdOeStjNnR0Yk15VmE3ZTdzT0J1UWdaWHBHZ1hNcGxOZHFhVy9lZjNZTjQwTk5RUnl2SWdWeTMzU0ZHSFJ0VgpqaXBpOXRZbG8wZ3dSakFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUhBd0l3Ckh3WURWUjBqQkJnd0ZvQVVyNlUvYVNMcm1ONkxiaDZHS1JJa3NoM1M1bEF3Q2dZSUtvWkl6ajBFQXdJRFNBQXcKUlFJaEFPS2paZmxRVU11RnZldlFkYzg3ckxPcnhoNUtyUGhlQUtkY0Y4YWdielFJQWlCcHBmUGNMMFRoZ1g5UAptcCtOZHJqa1hvQU5SRTlEWVdIRUlRbDdubytDdWc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlCZVRDQ0FSK2dBd0lCQWdJQkFEQUtCZ2dxaGtqT1BRUURBakFrTVNJd0lBWURWUVFEREJseWEyVXlMV05zCmFXVnVkQzFqWVVBeE5qYzNORFV6T1RNNE1CNFhEVEl6TURJeU5qSXpNalV6T0ZvWERUTXpNREl5TXpJek1qVXoKT0Zvd0pERWlNQ0FHQTFVRUF3d1pjbXRsTWkxamJHbGxiblF0WTJGQU1UWTNOelExTXprek9EQlpNQk1HQnlxRwpTTTQ5QWdFR0NDcUdTTTQ5QXdFSEEwSUFCQlhFZ0x2Z3JLV09KdkZtVnJhNEhyY05YdmNwN3JMN3VFNW1IcFpkCmJmT2xkRDRkVlJ4NjRxak9DeUNpc2Vsczk4WDJLSXlieGNSNkpnbFU2VXRoOU5xalFqQkFNQTRHQTFVZER3RUIKL3dRRUF3SUNwREFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVdCQlN2cFQ5cEl1dVkzb3R1SG9ZcApFaVN5SGRMbVVEQUtCZ2dxaGtqT1BRUURBZ05JQURCRkFpRUF3R2xqWXUxZkJpMHZROFczbWxueXVDNUJqMlBBCm14Sm1uS3BVSG8ydjJBZ0NJRndiblM3ajROUGtCT2hzRjJBeFhEZlZzdExoRWpqbmhPRHlQek1kT01STQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUpZblVvZ2U2bHVuNFN1WVVaN3VBQTVGYXV6blBaQzV2WHlpc1R2SVRjOXFvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFTWZZR0trdVFaOEtTOXFoamN2bk9yYld6TWxXdTN1N0RnYmtJR1Y2Um9GektaVFhhbWx2MwpuOTJEZU5EVFVFY3J5SUZjdDkwaFJoMGJWWTRxWXZiV0pRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= +``` + +### 4.2.8 Ensure that the --hostname-override argument is not set (Manual) + + +**Result:** Not Applicable + +**Remediation:** +Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +on each worker node and remove the --hostname-override argument from the +KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +### 4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Manual) + + +**Result:** warn + +**Remediation:** +If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /etc/rancher/rke2/rke2.yaml +``` + +**Expected Result**: + +```console +'{.eventRecordQPS}' is present +``` + +**Returned Value**: + +```console +apiVersion: v1 clusters: - cluster: certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlVENDQVIrZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWtNU0l3SUFZRFZRUUREQmx5YTJVeUxYTmwKY25abGNpMWpZVUF4TmpjM05EVXpPVE00TUI0WERUSXpNREl5TmpJek1qVXpPRm9YRFRNek1ESXlNekl6TWpVegpPRm93SkRFaU1DQUdBMVVFQXd3WmNtdGxNaTF6WlhKMlpYSXRZMkZBTVRZM056UTFNemt6T0RCWk1CTUdCeXFHClNNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJOUHllSlcwNE9lMUN6clo5cHplN09WWlliMlh4QmNvWUJ5Z3JRcUwKYUdpR29tN2xLNGs2ZW1uaUZQekpiOU9EQ0hDRmYyVUZaVXZDQTQxcUVmNVB3NlNqUWpCQU1BNEdBMVVkRHdFQgovd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCUnhUdGJTbTVMTnFrditkYXQrCmczalBWWXJjcERBS0JnZ3Foa2pPUFFRREFnTklBREJGQWlBN08wV1NNVHo4djgwM3BZK3hFeDR2SUZaVEMraVoKUHVDc082eFRuVVVtb2dJaEFPY1NqQVdkUVJ0UmJvaUhJbTd2RHZuM1czT1JmYlUwaXU2UUhNcXdsTGs4Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K server: https://127.0.0.1:6443 name: default contexts: - context: cluster: default user: default name: default current-context: default kind: Config preferences: {} users: - name: default user: client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrakNDQVRpZ0F3SUJBZ0lJRjc3RlZRT1l5RzR3Q2dZSUtvWkl6ajBFQXdJd0pERWlNQ0FHQTFVRUF3d1oKY210bE1pMWpiR2xsYm5RdFkyRkFNVFkzTnpRMU16a3pPREFlRncweU16QXlNall5TXpJMU16aGFGdzB5TkRBeQpNall5TXpJMU16aGFNREF4RnpBVkJnTlZCQW9URG5ONWMzUmxiVHB0WVhOMFpYSnpNUlV3RXdZRFZRUURFd3h6CmVYTjBaVzA2WVdSdGFXNHdXVEFUQmdjcWhrak9QUUlCQmdncWhrak9QUU1CQndOQ0FBUXg5Z1lxUzVCbndwTDIKcUdOeStjNnR0Yk15VmE3ZTdzT0J1UWdaWHBHZ1hNcGxOZHFhVy9lZjNZTjQwTk5RUnl2SWdWeTMzU0ZHSFJ0VgpqaXBpOXRZbG8wZ3dSakFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUhBd0l3Ckh3WURWUjBqQkJnd0ZvQVVyNlUvYVNMcm1ONkxiaDZHS1JJa3NoM1M1bEF3Q2dZSUtvWkl6ajBFQXdJRFNBQXcKUlFJaEFPS2paZmxRVU11RnZldlFkYzg3ckxPcnhoNUtyUGhlQUtkY0Y4YWdielFJQWlCcHBmUGNMMFRoZ1g5UAptcCtOZHJqa1hvQU5SRTlEWVdIRUlRbDdubytDdWc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlCZVRDQ0FSK2dBd0lCQWdJQkFEQUtCZ2dxaGtqT1BRUURBakFrTVNJd0lBWURWUVFEREJseWEyVXlMV05zCmFXVnVkQzFqWVVBeE5qYzNORFV6T1RNNE1CNFhEVEl6TURJeU5qSXpNalV6T0ZvWERUTXpNREl5TXpJek1qVXoKT0Zvd0pERWlNQ0FHQTFVRUF3d1pjbXRsTWkxamJHbGxiblF0WTJGQU1UWTNOelExTXprek9EQlpNQk1HQnlxRwpTTTQ5QWdFR0NDcUdTTTQ5QXdFSEEwSUFCQlhFZ0x2Z3JLV09KdkZtVnJhNEhyY05YdmNwN3JMN3VFNW1IcFpkCmJmT2xkRDRkVlJ4NjRxak9DeUNpc2Vsczk4WDJLSXlieGNSNkpnbFU2VXRoOU5xalFqQkFNQTRHQTFVZER3RUIKL3dRRUF3SUNwREFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVdCQlN2cFQ5cEl1dVkzb3R1SG9ZcApFaVN5SGRMbVVEQUtCZ2dxaGtqT1BRUURBZ05JQURCRkFpRUF3R2xqWXUxZkJpMHZROFczbWxueXVDNUJqMlBBCm14Sm1uS3BVSG8ydjJBZ0NJRndiblM3ajROUGtCT2hzRjJBeFhEZlZzdExoRWpqbmhPRHlQek1kT01STQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUpZblVvZ2U2bHVuNFN1WVVaN3VBQTVGYXV6blBaQzV2WHlpc1R2SVRjOXFvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFTWZZR0trdVFaOEtTOXFoamN2bk9yYld6TWxXdTN1N0RnYmtJR1Y2Um9GektaVFhhbWx2MwpuOTJEZU5EVFVFY3J5SUZjdDkwaFJoMGJWWTRxWXZiV0pRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= +``` + +### 4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set `tlsCertFile` to the location +of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` +to the location of the corresponding private key file. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameters in KUBELET_CERTIFICATE_ARGS variable. +--tls-cert-file= +--tls-private-key-file= +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /etc/rancher/rke2/rke2.yaml +``` + +**Expected Result**: + +```console +'--tls-cert-file' is present AND '--tls-private-key-file' is present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 3727 3667 3 23:26 ? 00:00:11 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=systemd --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=ip-172-31-25-112 --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --log-file-max-size=50 --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=5c1cd514-db7b-4692-a1c4-cacb2656161f --pod-infra-container-image=index.docker.io/rancher/pause:3.6 --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +### 4.2.11 Ensure that the --rotate-certificates argument is not set to false (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or +remove it altogether to use the default value. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS +variable. +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /etc/rancher/rke2/rke2.yaml +``` + +**Expected Result**: + +```console +'{.rotateCertificates}' is present OR '{.rotateCertificates}' is not present +``` + +**Returned Value**: + +```console +apiVersion: v1 clusters: - cluster: certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlVENDQVIrZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWtNU0l3SUFZRFZRUUREQmx5YTJVeUxYTmwKY25abGNpMWpZVUF4TmpjM05EVXpPVE00TUI0WERUSXpNREl5TmpJek1qVXpPRm9YRFRNek1ESXlNekl6TWpVegpPRm93SkRFaU1DQUdBMVVFQXd3WmNtdGxNaTF6WlhKMlpYSXRZMkZBTVRZM056UTFNemt6T0RCWk1CTUdCeXFHClNNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJOUHllSlcwNE9lMUN6clo5cHplN09WWlliMlh4QmNvWUJ5Z3JRcUwKYUdpR29tN2xLNGs2ZW1uaUZQekpiOU9EQ0hDRmYyVUZaVXZDQTQxcUVmNVB3NlNqUWpCQU1BNEdBMVVkRHdFQgovd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCUnhUdGJTbTVMTnFrditkYXQrCmczalBWWXJjcERBS0JnZ3Foa2pPUFFRREFnTklBREJGQWlBN08wV1NNVHo4djgwM3BZK3hFeDR2SUZaVEMraVoKUHVDc082eFRuVVVtb2dJaEFPY1NqQVdkUVJ0UmJvaUhJbTd2RHZuM1czT1JmYlUwaXU2UUhNcXdsTGs4Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K server: https://127.0.0.1:6443 name: default contexts: - context: cluster: default user: default name: default current-context: default kind: Config preferences: {} users: - name: default user: client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrakNDQVRpZ0F3SUJBZ0lJRjc3RlZRT1l5RzR3Q2dZSUtvWkl6ajBFQXdJd0pERWlNQ0FHQTFVRUF3d1oKY210bE1pMWpiR2xsYm5RdFkyRkFNVFkzTnpRMU16a3pPREFlRncweU16QXlNall5TXpJMU16aGFGdzB5TkRBeQpNall5TXpJMU16aGFNREF4RnpBVkJnTlZCQW9URG5ONWMzUmxiVHB0WVhOMFpYSnpNUlV3RXdZRFZRUURFd3h6CmVYTjBaVzA2WVdSdGFXNHdXVEFUQmdjcWhrak9QUUlCQmdncWhrak9QUU1CQndOQ0FBUXg5Z1lxUzVCbndwTDIKcUdOeStjNnR0Yk15VmE3ZTdzT0J1UWdaWHBHZ1hNcGxOZHFhVy9lZjNZTjQwTk5RUnl2SWdWeTMzU0ZHSFJ0VgpqaXBpOXRZbG8wZ3dSakFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUhBd0l3Ckh3WURWUjBqQkJnd0ZvQVVyNlUvYVNMcm1ONkxiaDZHS1JJa3NoM1M1bEF3Q2dZSUtvWkl6ajBFQXdJRFNBQXcKUlFJaEFPS2paZmxRVU11RnZldlFkYzg3ckxPcnhoNUtyUGhlQUtkY0Y4YWdielFJQWlCcHBmUGNMMFRoZ1g5UAptcCtOZHJqa1hvQU5SRTlEWVdIRUlRbDdubytDdWc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlCZVRDQ0FSK2dBd0lCQWdJQkFEQUtCZ2dxaGtqT1BRUURBakFrTVNJd0lBWURWUVFEREJseWEyVXlMV05zCmFXVnVkQzFqWVVBeE5qYzNORFV6T1RNNE1CNFhEVEl6TURJeU5qSXpNalV6T0ZvWERUTXpNREl5TXpJek1qVXoKT0Zvd0pERWlNQ0FHQTFVRUF3d1pjbXRsTWkxamJHbGxiblF0WTJGQU1UWTNOelExTXprek9EQlpNQk1HQnlxRwpTTTQ5QWdFR0NDcUdTTTQ5QXdFSEEwSUFCQlhFZ0x2Z3JLV09KdkZtVnJhNEhyY05YdmNwN3JMN3VFNW1IcFpkCmJmT2xkRDRkVlJ4NjRxak9DeUNpc2Vsczk4WDJLSXlieGNSNkpnbFU2VXRoOU5xalFqQkFNQTRHQTFVZER3RUIKL3dRRUF3SUNwREFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVdCQlN2cFQ5cEl1dVkzb3R1SG9ZcApFaVN5SGRMbVVEQUtCZ2dxaGtqT1BRUURBZ05JQURCRkFpRUF3R2xqWXUxZkJpMHZROFczbWxueXVDNUJqMlBBCm14Sm1uS3BVSG8ydjJBZ0NJRndiblM3ajROUGtCT2hzRjJBeFhEZlZzdExoRWpqbmhPRHlQek1kT01STQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUpZblVvZ2U2bHVuNFN1WVVaN3VBQTVGYXV6blBaQzV2WHlpc1R2SVRjOXFvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFTWZZR0trdVFaOEtTOXFoamN2bk9yYld6TWxXdTN1N0RnYmtJR1Y2Um9GektaVFhhbWx2MwpuOTJEZU5EVFVFY3J5SUZjdDkwaFJoMGJWWTRxWXZiV0pRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= +``` + +### 4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true (Manual) + + +**Result:** pass + +**Remediation:** +Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. +--feature-gates=RotateKubeletServerCertificate=true +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /etc/rancher/rke2/rke2.yaml +``` + +**Expected Result**: + +```console +'{.featureGates.RotateKubeletServerCertificate}' is present OR '{.featureGates.RotateKubeletServerCertificate}' is not present +``` + +**Returned Value**: + +```console +apiVersion: v1 clusters: - cluster: certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlVENDQVIrZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWtNU0l3SUFZRFZRUUREQmx5YTJVeUxYTmwKY25abGNpMWpZVUF4TmpjM05EVXpPVE00TUI0WERUSXpNREl5TmpJek1qVXpPRm9YRFRNek1ESXlNekl6TWpVegpPRm93SkRFaU1DQUdBMVVFQXd3WmNtdGxNaTF6WlhKMlpYSXRZMkZBTVRZM056UTFNemt6T0RCWk1CTUdCeXFHClNNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJOUHllSlcwNE9lMUN6clo5cHplN09WWlliMlh4QmNvWUJ5Z3JRcUwKYUdpR29tN2xLNGs2ZW1uaUZQekpiOU9EQ0hDRmYyVUZaVXZDQTQxcUVmNVB3NlNqUWpCQU1BNEdBMVVkRHdFQgovd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCUnhUdGJTbTVMTnFrditkYXQrCmczalBWWXJjcERBS0JnZ3Foa2pPUFFRREFnTklBREJGQWlBN08wV1NNVHo4djgwM3BZK3hFeDR2SUZaVEMraVoKUHVDc082eFRuVVVtb2dJaEFPY1NqQVdkUVJ0UmJvaUhJbTd2RHZuM1czT1JmYlUwaXU2UUhNcXdsTGs4Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K server: https://127.0.0.1:6443 name: default contexts: - context: cluster: default user: default name: default current-context: default kind: Config preferences: {} users: - name: default user: client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrakNDQVRpZ0F3SUJBZ0lJRjc3RlZRT1l5RzR3Q2dZSUtvWkl6ajBFQXdJd0pERWlNQ0FHQTFVRUF3d1oKY210bE1pMWpiR2xsYm5RdFkyRkFNVFkzTnpRMU16a3pPREFlRncweU16QXlNall5TXpJMU16aGFGdzB5TkRBeQpNall5TXpJMU16aGFNREF4RnpBVkJnTlZCQW9URG5ONWMzUmxiVHB0WVhOMFpYSnpNUlV3RXdZRFZRUURFd3h6CmVYTjBaVzA2WVdSdGFXNHdXVEFUQmdjcWhrak9QUUlCQmdncWhrak9QUU1CQndOQ0FBUXg5Z1lxUzVCbndwTDIKcUdOeStjNnR0Yk15VmE3ZTdzT0J1UWdaWHBHZ1hNcGxOZHFhVy9lZjNZTjQwTk5RUnl2SWdWeTMzU0ZHSFJ0VgpqaXBpOXRZbG8wZ3dSakFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUhBd0l3Ckh3WURWUjBqQkJnd0ZvQVVyNlUvYVNMcm1ONkxiaDZHS1JJa3NoM1M1bEF3Q2dZSUtvWkl6ajBFQXdJRFNBQXcKUlFJaEFPS2paZmxRVU11RnZldlFkYzg3ckxPcnhoNUtyUGhlQUtkY0Y4YWdielFJQWlCcHBmUGNMMFRoZ1g5UAptcCtOZHJqa1hvQU5SRTlEWVdIRUlRbDdubytDdWc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlCZVRDQ0FSK2dBd0lCQWdJQkFEQUtCZ2dxaGtqT1BRUURBakFrTVNJd0lBWURWUVFEREJseWEyVXlMV05zCmFXVnVkQzFqWVVBeE5qYzNORFV6T1RNNE1CNFhEVEl6TURJeU5qSXpNalV6T0ZvWERUTXpNREl5TXpJek1qVXoKT0Zvd0pERWlNQ0FHQTFVRUF3d1pjbXRsTWkxamJHbGxiblF0WTJGQU1UWTNOelExTXprek9EQlpNQk1HQnlxRwpTTTQ5QWdFR0NDcUdTTTQ5QXdFSEEwSUFCQlhFZ0x2Z3JLV09KdkZtVnJhNEhyY05YdmNwN3JMN3VFNW1IcFpkCmJmT2xkRDRkVlJ4NjRxak9DeUNpc2Vsczk4WDJLSXlieGNSNkpnbFU2VXRoOU5xalFqQkFNQTRHQTFVZER3RUIKL3dRRUF3SUNwREFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVdCQlN2cFQ5cEl1dVkzb3R1SG9ZcApFaVN5SGRMbVVEQUtCZ2dxaGtqT1BRUURBZ05JQURCRkFpRUF3R2xqWXUxZkJpMHZROFczbWxueXVDNUJqMlBBCm14Sm1uS3BVSG8ydjJBZ0NJRndiblM3ajROUGtCT2hzRjJBeFhEZlZzdExoRWpqbmhPRHlQek1kT01STQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUpZblVvZ2U2bHVuNFN1WVVaN3VBQTVGYXV6blBaQzV2WHlpc1R2SVRjOXFvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFTWZZR0trdVFaOEtTOXFoamN2bk9yYld6TWxXdTN1N0RnYmtJR1Y2Um9GektaVFhhbWx2MwpuOTJEZU5EVFVFY3J5SUZjdDkwaFJoMGJWWTRxWXZiV0pRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= +``` + +### 4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual) + + +**Result:** warn + +**Remediation:** +If using a Kubelet config file, edit the file to set `TLSCipherSuites` to +TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +or to a subset of these values. +If using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the --tls-cipher-suites parameter as follows, or to a subset of these values. +--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /etc/rancher/rke2/rke2.yaml +``` + +**Expected Result**: + +```console +'{range .tlsCipherSuites[:]}{}{','}{end}' is present +``` + +**Returned Value**: + +```console +apiVersion: v1 clusters: - cluster: certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlVENDQVIrZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWtNU0l3SUFZRFZRUUREQmx5YTJVeUxYTmwKY25abGNpMWpZVUF4TmpjM05EVXpPVE00TUI0WERUSXpNREl5TmpJek1qVXpPRm9YRFRNek1ESXlNekl6TWpVegpPRm93SkRFaU1DQUdBMVVFQXd3WmNtdGxNaTF6WlhKMlpYSXRZMkZBTVRZM056UTFNemt6T0RCWk1CTUdCeXFHClNNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJOUHllSlcwNE9lMUN6clo5cHplN09WWlliMlh4QmNvWUJ5Z3JRcUwKYUdpR29tN2xLNGs2ZW1uaUZQekpiOU9EQ0hDRmYyVUZaVXZDQTQxcUVmNVB3NlNqUWpCQU1BNEdBMVVkRHdFQgovd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCUnhUdGJTbTVMTnFrditkYXQrCmczalBWWXJjcERBS0JnZ3Foa2pPUFFRREFnTklBREJGQWlBN08wV1NNVHo4djgwM3BZK3hFeDR2SUZaVEMraVoKUHVDc082eFRuVVVtb2dJaEFPY1NqQVdkUVJ0UmJvaUhJbTd2RHZuM1czT1JmYlUwaXU2UUhNcXdsTGs4Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K server: https://127.0.0.1:6443 name: default contexts: - context: cluster: default user: default name: default current-context: default kind: Config preferences: {} users: - name: default user: client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrakNDQVRpZ0F3SUJBZ0lJRjc3RlZRT1l5RzR3Q2dZSUtvWkl6ajBFQXdJd0pERWlNQ0FHQTFVRUF3d1oKY210bE1pMWpiR2xsYm5RdFkyRkFNVFkzTnpRMU16a3pPREFlRncweU16QXlNall5TXpJMU16aGFGdzB5TkRBeQpNall5TXpJMU16aGFNREF4RnpBVkJnTlZCQW9URG5ONWMzUmxiVHB0WVhOMFpYSnpNUlV3RXdZRFZRUURFd3h6CmVYTjBaVzA2WVdSdGFXNHdXVEFUQmdjcWhrak9QUUlCQmdncWhrak9QUU1CQndOQ0FBUXg5Z1lxUzVCbndwTDIKcUdOeStjNnR0Yk15VmE3ZTdzT0J1UWdaWHBHZ1hNcGxOZHFhVy9lZjNZTjQwTk5RUnl2SWdWeTMzU0ZHSFJ0VgpqaXBpOXRZbG8wZ3dSakFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUhBd0l3Ckh3WURWUjBqQkJnd0ZvQVVyNlUvYVNMcm1ONkxiaDZHS1JJa3NoM1M1bEF3Q2dZSUtvWkl6ajBFQXdJRFNBQXcKUlFJaEFPS2paZmxRVU11RnZldlFkYzg3ckxPcnhoNUtyUGhlQUtkY0Y4YWdielFJQWlCcHBmUGNMMFRoZ1g5UAptcCtOZHJqa1hvQU5SRTlEWVdIRUlRbDdubytDdWc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlCZVRDQ0FSK2dBd0lCQWdJQkFEQUtCZ2dxaGtqT1BRUURBakFrTVNJd0lBWURWUVFEREJseWEyVXlMV05zCmFXVnVkQzFqWVVBeE5qYzNORFV6T1RNNE1CNFhEVEl6TURJeU5qSXpNalV6T0ZvWERUTXpNREl5TXpJek1qVXoKT0Zvd0pERWlNQ0FHQTFVRUF3d1pjbXRsTWkxamJHbGxiblF0WTJGQU1UWTNOelExTXprek9EQlpNQk1HQnlxRwpTTTQ5QWdFR0NDcUdTTTQ5QXdFSEEwSUFCQlhFZ0x2Z3JLV09KdkZtVnJhNEhyY05YdmNwN3JMN3VFNW1IcFpkCmJmT2xkRDRkVlJ4NjRxak9DeUNpc2Vsczk4WDJLSXlieGNSNkpnbFU2VXRoOU5xalFqQkFNQTRHQTFVZER3RUIKL3dRRUF3SUNwREFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVdCQlN2cFQ5cEl1dVkzb3R1SG9ZcApFaVN5SGRMbVVEQUtCZ2dxaGtqT1BRUURBZ05JQURCRkFpRUF3R2xqWXUxZkJpMHZROFczbWxueXVDNUJqMlBBCm14Sm1uS3BVSG8ydjJBZ0NJRndiblM3ajROUGtCT2hzRjJBeFhEZlZzdExoRWpqbmhPRHlQek1kT01STQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUpZblVvZ2U2bHVuNFN1WVVaN3VBQTVGYXV6blBaQzV2WHlpc1R2SVRjOXFvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFTWZZR0trdVFaOEtTOXFoamN2bk9yYld6TWxXdTN1N0RnYmtJR1Y2Um9GektaVFhhbWx2MwpuOTJEZU5EVFVFY3J5SUZjdDkwaFJoMGJWWTRxWXZiV0pRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= +``` + +## 5.1 RBAC and Service Accounts +### 5.1.1 Ensure that the cluster-admin role is only used where required (Manual) + + +**Result:** warn + +**Remediation:** +Identify all clusterrolebindings to the cluster-admin role. Check if they are used and +if they need this role or if they could use a role with fewer privileges. +Where possible, first bind users to a lower privileged role and then remove the +clusterrolebinding to the cluster-admin role : +kubectl delete clusterrolebinding [name] + +### 5.1.2 Minimize access to secrets (Manual) + + +**Result:** warn + +**Remediation:** +Where possible, remove get, list and watch access to Secret objects in the cluster. + +### 5.1.3 Minimize wildcard use in Roles and ClusterRoles (Manual) + + +**Result:** warn + +**Remediation:** +Where possible replace any use of wildcards in clusterroles and roles with specific +objects or actions. + +### 5.1.4 Minimize access to create pods (Manual) + + +**Result:** warn + +**Remediation:** +Where possible, remove create access to pod objects in the cluster. + +### 5.1.5 Ensure that default service accounts are not actively used. (Automated) + + +**Result:** pass + +**Remediation:** +Create explicit service accounts wherever a Kubernetes workload requires specific access +to the Kubernetes API server. +Modify the configuration of each default service account to include this value +automountServiceAccountToken: false + +**Audit Script:** `check_for_default_sa.sh` + +```bash +#!/bin/bash + +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + +count_sa=$(kubectl get serviceaccounts --all-namespaces -o json | jq -r '.items[] | select(.metadata.name=="default") | select((.automountServiceAccountToken == null) or (.automountServiceAccountToken == true))' | jq .metadata.namespace | wc -l) +if [[ ${count_sa} -gt 0 ]]; then + echo "false" + exit +fi + +for ns in $(kubectl get ns --no-headers -o custom-columns=":metadata.name") +do + for result in $(kubectl get clusterrolebinding,rolebinding -n $ns -o json | jq -r '.items[] | select((.subjects[].kind=="ServiceAccount" and .subjects[].name=="default") or (.subjects[].kind=="Group" and .subjects[].name=="system:serviceaccounts"))' | jq -r '"\(.roleRef.kind),\(.roleRef.name)"') + do + read kind name <<<$(IFS=","; echo $result) + resource_count=$(kubectl get $kind $name -n $ns -o json | jq -r '.rules[] | select(.resources[] != "podsecuritypolicies")' | wc -l) + if [[ ${resource_count} -gt 0 ]]; then + echo "false" + exit + fi + done +done + + +echo "true" +``` + +**Audit Execution:** + +```bash +./check_for_default_sa.sh +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +Error from server (Forbidden): serviceaccounts is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "serviceaccounts" in API group "" at the cluster scope Error from server (Forbidden): clusterrolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "clusterrolebindings" in API group "rbac.authorization.k8s.io" at the cluster scope Error from server (Forbidden): rolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "rolebindings" in API group "rbac.authorization.k8s.io" in the namespace "calico-system" Error from server (Forbidden): clusterrolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "clusterrolebindings" in API group "rbac.authorization.k8s.io" at the cluster scope Error from server (Forbidden): rolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "rolebindings" in API group "rbac.authorization.k8s.io" in the namespace "cattle-fleet-system" Error from server (Forbidden): clusterrolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "clusterrolebindings" in API group "rbac.authorization.k8s.io" at the cluster scope Error from server (Forbidden): rolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "rolebindings" in API group "rbac.authorization.k8s.io" in the namespace "cattle-impersonation-system" Error from server (Forbidden): clusterrolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "clusterrolebindings" in API group "rbac.authorization.k8s.io" at the cluster scope Error from server (Forbidden): rolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "rolebindings" in API group "rbac.authorization.k8s.io" in the namespace "cattle-system" Error from server (Forbidden): clusterrolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "clusterrolebindings" in API group "rbac.authorization.k8s.io" at the cluster scope Error from server (Forbidden): rolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "rolebindings" in API group "rbac.authorization.k8s.io" in the namespace "cis-operator-system" Error from server (Forbidden): clusterrolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "clusterrolebindings" in API group "rbac.authorization.k8s.io" at the cluster scope Error from server (Forbidden): rolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "rolebindings" in API group "rbac.authorization.k8s.io" in the namespace "default" Error from server (Forbidden): clusterrolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "clusterrolebindings" in API group "rbac.authorization.k8s.io" at the cluster scope Error from server (Forbidden): rolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "rolebindings" in API group "rbac.authorization.k8s.io" in the namespace "kube-node-lease" Error from server (Forbidden): clusterrolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "clusterrolebindings" in API group "rbac.authorization.k8s.io" at the cluster scope Error from server (Forbidden): rolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "rolebindings" in API group "rbac.authorization.k8s.io" in the namespace "kube-public" Error from server (Forbidden): clusterrolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "clusterrolebindings" in API group "rbac.authorization.k8s.io" at the cluster scope Error from server (Forbidden): rolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "rolebindings" in API group "rbac.authorization.k8s.io" in the namespace "kube-system" Error from server (Forbidden): clusterrolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "clusterrolebindings" in API group "rbac.authorization.k8s.io" at the cluster scope Error from server (Forbidden): rolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "rolebindings" in API group "rbac.authorization.k8s.io" in the namespace "local" Error from server (Forbidden): clusterrolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "clusterrolebindings" in API group "rbac.authorization.k8s.io" at the cluster scope Error from server (Forbidden): rolebindings.rbac.authorization.k8s.io is forbidden: User "system:serviceaccount:cis-operator-system:cis-serviceaccount" cannot list resource "rolebindings" in API group "rbac.authorization.k8s.io" in the namespace "tigera-operator" true +``` + +### 5.1.6 Ensure that Service Account Tokens are only mounted where necessary (Manual) + + +**Result:** warn + +**Remediation:** +Modify the definition of pods and service accounts which do not need to mount service +account tokens to disable it. + +### 5.1.7 Avoid use of system:masters group (Manual) + + +**Result:** warn + +**Remediation:** +Remove the system:masters group from all users in the cluster. + +### 5.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual) + + +**Result:** warn + +**Remediation:** +Where possible, remove the impersonate, bind and escalate rights from subjects. + +## 5.2 Pod Security Standards +### 5.2.1 Ensure that the cluster has at least one active policy control mechanism in place (Manual) + + +**Result:** warn + +**Remediation:** +Ensure that either Pod Security Admission or an external policy control system is in place +for every namespace which contains user workloads. + +### 5.2.2 Minimize the admission of privileged containers (Manual) + + +**Result:** fail + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of privileged containers. + +**Audit:** + +```bash +kubectl get psp global-restricted-psp -o json | jq -r '.spec.runAsUser.rule' +``` + +**Expected Result**: + +```console +'MustRunAsNonRoot' is present +``` + +**Returned Value**: + +```console +error: the server doesn't have a resource type "psp" +``` + +### 5.2.3 Minimize the admission of containers wishing to share the host process ID namespace (Automated) + + +**Result:** fail + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of `hostPID` containers. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +'count' is greater than 0 +``` + +**Returned Value**: + +```console +error: the server doesn't have a resource type "psp" --count=0 +``` + +### 5.2.4 Minimize the admission of containers wishing to share the host IPC namespace (Automated) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of `hostIPC` containers. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +'count' is greater than 0 +``` + +**Returned Value**: + +```console +error: the server doesn't have a resource type "psp" --count=0 +``` + +### 5.2.5 Minimize the admission of containers wishing to share the host network namespace (Automated) + + +**Result:** fail + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of `hostNetwork` containers. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +'count' is greater than 0 +``` + +**Returned Value**: + +```console +error: the server doesn't have a resource type "psp" --count=0 +``` + +### 5.2.6 Minimize the admission of containers with allowPrivilegeEscalation (Automated) + + +**Result:** fail + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +'count' is greater than 0 +``` + +**Returned Value**: + +```console +error: the server doesn't have a resource type "psp" --count=0 +``` + +### 5.2.7 Minimize the admission of root containers (Automated) + + +**Result:** fail + +**Remediation:** +Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` +or `MustRunAs` with the range of UIDs not including 0, is set. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +'count' is greater than 0 +``` + +**Returned Value**: + +```console +error: the server doesn't have a resource type "psp" --count=0 +``` + +### 5.2.8 Minimize the admission of containers with the NET_RAW capability (Automated) + + +**Result:** fail + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers with the `NET_RAW` capability. + +**Audit:** + +```bash +kubectl get psp global-restricted-psp -o json | jq -r .spec.requiredDropCapabilities[] +``` + +**Expected Result**: + +```console +'ALL' is present +``` + +**Returned Value**: + +```console +error: the server doesn't have a resource type "psp" +``` + +### 5.2.9 Minimize the admission of containers with added capabilities (Automated) + + +**Result:** warn + +**Remediation:** +Ensure that `allowedCapabilities` is not present in policies for the cluster unless +it is set to an empty array. + +### 5.2.10 Minimize the admission of containers with capabilities assigned (Manual) + + +**Result:** warn + +**Remediation:** +Review the use of capabilites in applications running on your cluster. Where a namespace +contains applicaions which do not require any Linux capabities to operate consider adding +a PSP which forbids the admission of containers which do not drop all capabilities. + +### 5.2.11 Minimize the admission of Windows HostProcess containers (Manual) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + +### 5.2.12 Minimize the admission of HostPath volumes (Manual) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers with `hostPath` volumes. + +### 5.2.13 Minimize the admission of containers which use HostPorts (Manual) + + +**Result:** warn + +**Remediation:** +Add policies to each namespace in the cluster which has user workloads to restrict the +admission of containers which use `hostPort` sections. + +## 5.3 Network Policies and CNI +### 5.3.1 Ensure that the CNI in use supports Network Policies (Automated) + + +**Result:** pass + +**Remediation:** +If the CNI plugin in use does not support network policies, consideration should be given to +making use of a different plugin, or finding an alternate mechanism for restricting traffic +in the Kubernetes cluster. + +**Audit:** + +```bash +kubectl get pods --all-namespaces --selector='k8s-app in (calico-node, canal, cilium)' -o name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +'count' is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 +``` + +### 5.3.2 Ensure that all Namespaces have Network Policies defined (Automated) + + +**Result:** true + +**Remediation:** +Follow the documentation and create NetworkPolicy objects as you need them. + +**Audit Script:** `check_for_rke2_network_policies.sh` + +```bash +#!/bin/bash + +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + +for namespace in kube-system kube-public default; do + policy_count=$(/var/lib/rancher/rke2/bin/kubectl get networkpolicy -n ${namespace} -o json | jq -r '.items | length') + if [ ${policy_count} -eq 0 ]; then + echo "false" + exit + fi +done + +echo "true" + +``` + +**Audit Execution:** + +```bash +./check_for_rke2_network_policies.sh +``` + +**Expected Result**: + +```console +'true' is present +``` + +**Returned Value**: + +```console +true +``` + +## 5.4 Secrets Management +### 5.4.1 Prefer using Secrets as files over Secrets as environment variables (Manual) + + +**Result:** warn + +**Remediation:** +If possible, rewrite application code to read Secrets from mounted secret files, rather than +from environment variables. + +### 5.4.2 Consider external secret storage (Manual) + + +**Result:** warn + +**Remediation:** +Refer to the Secrets management options offered by your cloud provider or a third-party +secrets management solution. + +## 5.5 Extensible Admission Control +### 5.5.1 Configure Image Provenance using ImagePolicyWebhook admission controller (Manual) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and setup image provenance. + +## 5.7 General Policies +### 5.7.1 Create administrative boundaries between resources using namespaces (Manual) + + +**Result:** warn + +**Remediation:** +Follow the documentation and create namespaces for objects in your deployment as you need +them. + +### 5.7.2 Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual) + + +**Result:** warn + +**Remediation:** +Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. +An example is as below: +securityContext: +seccompProfile: +type: RuntimeDefault + +### 5.7.3 Apply SecurityContext to your Pods and Containers (Manual) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a +suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker +Containers. + +### 5.7.4 The default namespace should not be used (Manual) + + +**Result:** warn + +**Remediation:** +Ensure that namespaces are created to allow for appropriate segregation of Kubernetes +resources and that all new resources are created in a specific namespace. diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/security-advisories-and-cves.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/security-advisories-and-cves.md index b34a5f156269..c2baac88dcf0 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/security-advisories-and-cves.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/security-advisories-and-cves.md @@ -19,10 +19,10 @@ Rancher 致力于向社区披露我们产品的安全问题。我们会针对已 | [CVE-2022-31247](https://github.com/rancher/rancher/security/advisories/GHSA-6x34-89p7-95wg) | 在 Rancher 2.5.15 和 2.6.6 及之前的版本中发现了一个问题。授权逻辑缺陷允许在下游集群中通过集群角色模板绑定 (CRTB) 和项目角色模板绑定 (PRTB) 来提升权限。任何有权限创建/编辑 CRTB 或 PRTB 的用户(例如 `cluster-owner`、`manage cluster members`、`project-owner` 和 `manage project members`)都可以利用该漏洞,在同一集群的另一个项目或不同下游集群的另一个项目中获得所有者权限。 | 2022 年 8 月 18 日 | [Rancher 2.6.7](https://github.com/rancher/rancher/releases/tag/v2.6.7) 和 [Rancher 2.5.16](https://github.com/rancher/rancher/releases/tag/v2.5.16) | | [CVE-2021-36783](https://github.com/rancher/rancher/security/advisories/GHSA-8w87-58w6-hfv8) | 2.5.12 到 2.6.3 的 Rancher 版本无法正确清理集群模板 answer 中的凭证。此错误可能会导致明文存储以及凭证、密码和 API 令牌被暴露。在 Rancher 中,已认证的 `Cluster Owner`、`Cluster Member`、`Project Owner` 和 `Project Member` 可以在 `/v1/management.cattle.io.clusters`、`/v3/clusters` 和 `/k8s/clusters/local/apis/management.cattle.io/v3/clusters` 端点上看到暴露的凭证。 | 2022 年 8 月 18 日 | [Rancher 2.6.7](https://github.com/rancher/rancher/releases/tag/v2.6.7) 和 [Rancher 2.5.16](https://github.com/rancher/rancher/releases/tag/v2.5.16) | | [CVE-2021-36782](https://github.com/rancher/rancher/security/advisories/GHSA-g7j7-h4q8-8w2f) | 在 2.5.15 到 2.6.6 的 Rancher 版本中发现了一个问题,其中密码、API 密钥和 Rancher 的 ServiceAccount 令牌(用于配置集群)等敏感字段直接以明文形式存储在 `Cluster` 等 Kubernetes 对象上(例如,`cluster.management.cattle.io`)。任何能够读取 Kubernetes API 中的对象的用户都可以检索这些敏感数据的明文版本。该问题由 Florian Struck(来自 [Continum AG](https://www.continum.net/))和 [Marco Stuurman](https://github.com/fe-ax)(来自 [Shock Media B.V.](https://www.shockmedia.nl/))发现并报告。 | 2022 年 8 月 18 日 | [Rancher 2.6.7](https://github.com/rancher/rancher/releases/tag/v2.6.7) 和 [Rancher 2.5.16](https://github.com/rancher/rancher/releases/tag/v2.5.16) | -| [CVE-2022-21951](https://github.com/rancher/rancher/security/advisories/GHSA-vrph-m5jj-c46c) | 此漏洞仅影响通过 [RKE 模板](https://rancher.com/docs/rancher/v2.6/en/admin-settings/rke-templates/)配置 [Weave](https://rancher.com/docs/rancher/v2.6/en/faq/networking/cni-providers/#weave) 容器网络接口 (CNI) 的客户。在 Rancher 2.5.0 到 2.5.13 和 Rancher 2.6.0 到 2.6.4 版本中发现了一个漏洞。如果将 CNI 选为 Weave,RKE 模板的用户界面 (UI) 不包括 Weave 密码的值。如果基于上述模板创建集群,并且将 Weave 配置为 CNI,则 Weave 中不会为[网络加密](https://www.weave.works/docs/net/latest/tasks/manage/security-untrusted-networks/)创建密码。因此,集群中的网络流量将不加密发送。 | 2022 年 5 月 24 日 | [Rancher 2.6.5](https://github.com/rancher/rancher/releases/tag/v2.6.5) 和 [Rancher 2.5.14](https://github.com/rancher/rancher/releases/tag/v2.5.14) | -| [CVE-2021-36784](https://github.com/rancher/rancher/security/advisories/GHSA-jwvr-vv7p-gpwq) | 在 Rancher 2.5.0 到 2.5.12 和 Rancher 2.6.0 到 2.6.3 中发现了一个漏洞,该漏洞允许能创建或更新[全局角色](https://rancher.com/docs/rancher/v2.6/en/admin-settings/rbac/)的用户将他们或其他用户升级为管理员。全局角色能授予用户 Rancher 级别的权限,例如能创建集群。在已识别的 Rancher 版本中,如果用户被授予了编辑或创建全局角色的权限,他们不仅仅能授予他们已经拥有的权限。此漏洞影响使用能够创建或编辑全局角色的非管理员用户的客户。此场景最常见的用例是 `restricted-admin` 角色。 | 2022 年 4 月 14 日 | [Rancher 2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) 和 [Rancher 2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | +| [CVE-2022-21951](https://github.com/rancher/rancher/security/advisories/GHSA-vrph-m5jj-c46c) | 此漏洞仅影响通过 [RKE 模板](../../pages-for-subheaders/about-rke1-templates.md)配置 [Weave](../../faq/container-network-interface-providers.md#weave) 容器网络接口 (CNI) 的客户。在 Rancher 2.5.0 到 2.5.13 和 Rancher 2.6.0 到 2.6.4 版本中发现了一个漏洞。如果将 CNI 选为 Weave,RKE 模板的用户界面 (UI) 不包括 Weave 密码的值。如果基于上述模板创建集群,并且将 Weave 配置为 CNI,则 Weave 中不会为[网络加密](https://www.weave.works/docs/net/latest/tasks/manage/security-untrusted-networks/)创建密码。因此,集群中的网络流量将不加密发送。 | 2022 年 5 月 24 日 | [Rancher 2.6.5](https://github.com/rancher/rancher/releases/tag/v2.6.5) 和 [Rancher 2.5.14](https://github.com/rancher/rancher/releases/tag/v2.5.14) | +| [CVE-2021-36784](https://github.com/rancher/rancher/security/advisories/GHSA-jwvr-vv7p-gpwq) | 在 Rancher 2.5.0 到 2.5.12 和 Rancher 2.6.0 到 2.6.3 中发现了一个漏洞,该漏洞允许能创建或更新[全局角色](../../pages-for-subheaders/manage-role-based-access-control-rbac.md)的用户将他们或其他用户升级为管理员。全局角色能授予用户 Rancher 级别的权限,例如能创建集群。在已识别的 Rancher 版本中,如果用户被授予了编辑或创建全局角色的权限,他们不仅仅能授予他们已经拥有的权限。此漏洞影响使用能够创建或编辑全局角色的非管理员用户的客户。此场景最常见的用例是 `restricted-admin` 角色。 | 2022 年 4 月 14 日 | [Rancher 2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) 和 [Rancher 2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | | [CVE-2021-4200](https://github.com/rancher/rancher/security/advisories/GHSA-hx8w-ghh8-r4xf) | 此漏洞仅影响在 Rancher 中使用 `restricted-admin` 角色的客户。在 Rancher 2.5.0 到 2.5.12 和 2.6.0 到 2.6.3 中发现了一个漏洞,其中 `cattle-global-data` 命名空间中的 `global-data` 角色授予了应用商店的写权限。由于具有任何级别的应用商店访问权限的用户都会绑定到 `global-data` 角色,因此这些用户都能写入模板 `CatalogTemplates`) 和模板版本 (`CatalogTemplateVersions`)。在 Rancher 中创建的新用户默认分配到 `user` 角色(普通用户),该角色本不该具有写入应用商店的权限。此漏洞提升了能写入应用商店模板和应用商店模板版本资源的用户的权限。 | 2022 年 4 月 14 日 | [Rancher 2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) 和 [Rancher 2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | -| [GHSA-wm2r-rp98-8pmh](https://github.com/rancher/rancher/security/advisories/GHSA-wm2r-rp98-8pmh) | 此漏洞仅影响使用经过认证的 Git 和/或 Helm 仓库通过 [Fleet](https://rancher.com/docs/rancher/v2.6/en/deploy-across-clusters/fleet/) 进行持续交付的客户。在 [`v1.5.11`](https://github.com/hashicorp/go-getter/releases/tag/v1.5.11) 之前版本中的 `go-getter` 库中发现了一个问题,错误消息中没有删除 Base64 编码的 SSH 私钥,导致该信息暴露。Rancher 中 [`v0.3.9`](https://github.com/rancher/fleet/releases/tag/v0.3.9) 之前的 Fleet 版本使用了该库的漏洞版本。此问题影响 Rancher 2.5.0 到 2.5.12(包括 2.5.12)以及 2.6.0 到 2.6.3(包括 2.6.3)。该问题由 Raft Engineering 的 Dagan Henderson 发现并报告。 | 2022 年 4 月 14 日 | [Rancher 2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) 和 [Rancher 2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | +| [GHSA-wm2r-rp98-8pmh](https://github.com/rancher/rancher/security/advisories/GHSA-wm2r-rp98-8pmh) | 此漏洞仅影响使用经过认证的 Git 和/或 Helm 仓库通过 [Fleet](../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md) 进行持续交付的客户。在 [`v1.5.11`](https://github.com/hashicorp/go-getter/releases/tag/v1.5.11) 之前版本中的 `go-getter` 库中发现了一个问题,错误消息中没有删除 Base64 编码的 SSH 私钥,导致该信息暴露。Rancher 中 [`v0.3.9`](https://github.com/rancher/fleet/releases/tag/v0.3.9) 之前的 Fleet 版本使用了该库的漏洞版本。此问题影响 Rancher 2.5.0 到 2.5.12(包括 2.5.12)以及 2.6.0 到 2.6.3(包括 2.6.3)。该问题由 Raft Engineering 的 Dagan Henderson 发现并报告。 | 2022 年 4 月 14 日 | [Rancher 2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) 和 [Rancher 2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | | [CVE-2021-36778](https://github.com/rancher/rancher/security/advisories/GHSA-4fc7-hc63-7fjg) | 在 Rancher 2.5.0 到 2.5.11 和 Rancher 2.6.0 到 2.6.2 中发现了一个漏洞,当从配置的私有仓库下载 Helm Chart 时,对同源策略的检查不足可能导致仓库凭证暴露给第三方提供商。仅当用户在 Rancher 的`应用 & 应用市场 > 仓库`中配置私有仓库的访问凭证时才会出现此问题。该问题由 Martin Andreas Ullrich 发现并报告。 | 2022 年 4 月 14 日 | [Rancher 2.6.3](https://github.com/rancher/rancher/releases/tag/v2.6.3) 和 [Rancher 2.5.12](https://github.com/rancher/rancher/releases/tag/v2.5.12) | | [GHSA-hwm2-4ph6-w6m5](https://github.com/rancher/rancher/security/advisories/GHSA-hwm2-4ph6-w6m5) | 在 Rancher 2.0 到 2.6.3 中发现了一个漏洞。Rancher 提供的 `restricted` Pod 安全策略(PSP)与 Kubernetes 提供的上游 `restricted` 策略有差别,因此 Rancher 的 PSP 将 `runAsUser` 设置为 `runAsAny`,而上游将 `runAsUser` 设置为 `MustRunAsNonRoot`。因此,即使 Rancher 的 `restricted` 策略是在项目或集群级别上强制执行的,容器也可以以任何用户身份运行,包括特权用户 (`root`)。 | 2022 年 3 月 31 日 | [Rancher 2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) | | [CVE-2021-36775](https://github.com/rancher/rancher/security/advisories/GHSA-28g7-896h-695v) | 在 Rancher 2.4.17、2.5.11 和 2.6.2 以及更高的版本中发现了一个漏洞。从项目中删除与某个组关联的`项目角色`后,能让这些使用者访问集群级别资源的绑定(Binding)不会被删除。导致问题的原因是不完整的授权逻辑检查。如果用户是受影响组中的成员,且能对 Rancher 进行认证访问,那么用户可以利用此漏洞访问他们不应该能访问的资源。暴露级别取决于受影响项目角色的原始权限级别。此漏洞仅影响在 Rancher 中基于组进行身份验证的客户。 | 2022 年 3 月 31 日 | [Rancher 2.6.3](https://github.com/rancher/rancher/releases/tag/v2.6.3)、[Rancher 2.5.12](https://github.com/rancher/rancher/releases/tag/v2.5.12) 和 [Rancher 2.4.18](https://github.com/rancher/rancher/releases/tag/v2.4.18) | diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/contribute-to-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/contribute-to-rancher.md index f63c6a38e301..baff7b735b9f 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/contribute-to-rancher.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/contribute-to-rancher.md @@ -2,7 +2,7 @@ title: 参与 Rancher 社区贡献 --- -本节介绍 Rancher 使用的仓库、如何构建仓库以及提交 issue 时要包含的信息。 +本文介绍了 Rancher 仓库和 Rancher 文档、如何构建 Rancher 仓库以及提交 issue 时要包含哪些信息。 有关如何为 Rancher 项目开发做出贡献的更多详细信息,请参阅 [Rancher Developer Wiki](https://github.com/rancher/rancher/wiki)。Wiki 包含以下主题的资源: @@ -14,7 +14,15 @@ title: 参与 Rancher 社区贡献 在 Rancher Users Slack 上,开发者的频道是 **#developer**。 -## 仓库 +## Rancher 文档 + +如果你对此网站上的文档有建议,请在主 [Rancher 文档](https://github.com/rancher/rancher-docs)仓库中[提交 issue](https://github.com/rancher/rancher-docs/issues/new/choose)。此仓库包含 Rancher v2.0 及更高版本的文档。 + +有关贡献和构建 Rancher v2.x 文档仓库的更多详细信息,请参阅 [Rancher 文档 README](https://github.com/rancher/rancher-docs#readme)。 + +有关 Rancher v1.6 及更早版本的文档,请参阅 [Rancher 1.x docs](https://github.com/rancher/rancher.github.io) 仓库,其中包含 https://rancher.com/docs/rancher/v1.6/en/ 的源文件。 + +## Rancher 仓库 所有仓库都位于我们的主要 GitHub 组织内。Rancher 使用了很多仓库,以下是部分主要仓库的描述: @@ -38,19 +46,19 @@ title: 参与 Rancher 社区贡献 ![Rancher diagram](/img/ranchercomponentsdiagram-2.6.svg)
用于配置/管理 Kubernetes 集群的 Rancher 组件。 -## 构建 +### 构建 Rancher 仓库 每个仓库都应该有一个 Makefile,并且可以使用 `make` 命令进行构建。`make` 目标基于仓库中 `/scripts` 目录中的脚本,每个目标都使用 [Dapper](https://github.com/rancher/dapper) 在孤立的环境中运行。`Dockerfile.dapper` 将用于此操作,它包含了所需的所有构建工具。 默认目标是 `ci`,它将运行 `./scripts/validate`、`./scripts/build`、`./scripts/test ` 和 `./scripts/package`。生成的二进制文件将在 `./build/bin` 中,通常也打包在 Docker 镜像中。 -## Bug、Issue 和疑问 +### Rancher Bug、Issue 或疑问 如果你发现任何 bug 或问题,由于有人可能遇到过同样的问题,或者我们已经正在寻找解决方案,因此请先在[已报告 issue](https://github.com/rancher/rancher/issues) 中搜索。 如果找不到与你的问题相关的内容,请通过[提出 issue](https://github.com/rancher/rancher/issues/new) 与我们联系。与 Rancher 相关的仓库有很多,但请将 issue 提交到 Rancher 仓库中,这样能确保我们能看到这些 issue。如果你想就一个用例提出问题或询问其他用户,你可以在 [Rancher 论坛](https://forums.rancher.com)上发帖。 -### 提交 Issue 的检查清单 +#### 提交 Issue 的检查清单 提交问题时请遵循此清单,以便我们调查和解决问题。如果你能提供更多信息,我们就可以使用更多数据来确定导致问题的原因或发现更多相关的内容。 @@ -126,11 +134,3 @@ title: 参与 Rancher 社区贡献 - Docker Daemon 日志记录(可能并不全部存在,取决于操作系统) - `/var/log/docker.log` - **指标**:如果你遇到性能问题,请提供尽可能多的指标数据(文件或屏幕截图)来帮助我们确定问题。如果你遇到主机相关的问题,你可以提供 `top`、`free -m`、`df` 的输出,这些输出会显示进程/内存/磁盘的使用情况。 - -## 文档 - -如果你对我们的文档有修改意见,请在我们的文档仓库中提交 PR。 - -- [Rancher 2.x 文档仓库](https://github.com/rancher/docs):Rancher 2.x 所有文档都在这个仓库中。具体位于仓库的 `content` 文件夹中。 - -- [Rancher 1.x 文档仓库](https://github.com/rancher/rancher.github.io):Rancher 1.x 所有文档都在这个仓库中。具体位于仓库的 `rancher` 文件夹中。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md index 63ccf2d2934e..9762bf92bea4 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md @@ -4,7 +4,7 @@ title: 配置树外 vSphere 云提供商 Kubernetes 正在逐渐不在树内维护云提供商。vSphere 有一个树外云提供商,可通过安装 vSphere 云提供商和云存储插件来使用。 -本页介绍如何在启动集群后安装 Cloud Provider Interface (CPI) 和 Cloud Storage Interface (CSI) 插件。 +本文介绍了如何在设置集群后安装 Cloud Provider Interface (CPI) 和 Cloud Storage Interface (CSI) 插件。 ## 先决条件 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md index 7b26f4ebbc93..9c53d5d95a35 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md @@ -1,6 +1,7 @@ --- -title: 将 vSphere 树内卷迁移到 CSI +title: 将 vSphere 树内卷迁移到树外 --- + Kubernetes 正在逐渐不在树内维护云提供商。vSphere 有一个树外云提供商,可通过安装 vSphere 云提供商和云存储插件来使用。 本页介绍如何从树内 vSphere 云提供商迁移到树外,以及如何在迁移后管理现有虚拟机。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md index cf35ca3d921f..f09d0d00c29a 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md @@ -2,6 +2,7 @@ title: Kubernetes 镜像仓库和 Docker 镜像仓库 description: 了解 Docker 镜像仓库和 Kubernetes 镜像仓库、它们的用例以及如何在 Rancher UI 中使用私有镜像仓库 --- + 镜像仓库是 Kubernetes 密文(Secret),包含用于向[私有 Docker 镜像仓库](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/)进行身份验证的凭证。 “Registry” 这个词可能有两种意思,可指代 Docker 或 Kubernetes 镜像仓库: diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/ingress-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/ingress-configuration.md index 55242b557886..91e6192ed488 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/ingress-configuration.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/ingress-configuration.md @@ -7,7 +7,8 @@ description: Ingress 配置 在 Kubernetes v1.21 及更高版本中,NGINX Ingress Controller 不再默认运行在 hostNetwork 中。它改为将 hostPorts 用于端口 80 和端口 443,因此你可以将准入 Webhook 配置为只能通过 ClusterIP 访问。这确保了只能从集群内部访问 webhook。 -由于 controller 的这一更改,默认不再将 `hostNetwork` 设置为 `true`。但是,你必须将 `hostNetwork` 设置为 `true` 才能使基于 TCP 和 UDP 的 Service 正常工作。 +由于 controller 的这一更改,默认 RKE1 配置不再将 `hostNetwork` 设置为 `true`。但是,你必须将 `hostNetwork` 设置为 `true` 才能使基于 TCP 和 UDP 的 Service 正常工作。为此,请[编辑](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#使用-yaml-编辑集群)集群的 YAML 并按照[官方 RKE1 文档](https://rke.docs.rancher.com/config-options/add-ons/ingress-controllers#configuring-network-options)中的步骤操作。 + ::: diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md index 541a5f0f0a3a..c13f8dbf75e6 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md @@ -2,6 +2,7 @@ title: "四层和七层负载均衡" description: "Kubernetes 支持四层负载均衡和七层负载均衡。了解对不同 deployment 的支持" --- + Kubernetes 支持四层负载均衡和七层负载均衡。 ## 四层负载均衡器 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/create-kubernetes-persistent-storage.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/create-kubernetes-persistent-storage.md index 91ddc280e7e3..e59d36c843d4 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/create-kubernetes-persistent-storage.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/create-kubernetes-persistent-storage.md @@ -2,6 +2,7 @@ title: "Kubernetes 持久存储:卷和存储类" description: "了解在 Kubernetes 中创建持久存储的两种方式:持久卷和存储类" --- + 在部署需要保​​留数据的应用时,你需要创建持久存储。持久存储允许你在运行应用的 pod 之外存储应用数据。即使运行应用的 pod 发生故障,这种存储方式也能让你保留应用数据。 本文假设你已了解 Kubernetes 的持久卷、持久卷声明和存储类的概念。如需更多信息,请参阅[存储的工作原理](../how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/deploy-apps-across-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/deploy-apps-across-clusters.md index c349bcba4d91..5a6382f71b1b 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/deploy-apps-across-clusters.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/deploy-apps-across-clusters.md @@ -1,6 +1,7 @@ --- title: 跨集群部署应用 --- + ### Fleet Rancher 2.5 引入了 Fleet,这是一种跨集群部署应用的新方式。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/enable-experimental-features.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/enable-experimental-features.md index d4bac212a625..07baf3efd106 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/enable-experimental-features.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/enable-experimental-features.md @@ -1,6 +1,7 @@ --- title: 启用实验功能 --- + Rancher 包含一些默认关闭的实验功能。在某些情况下,例如当你认为使用[不支持的存储类型](../how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md)的好处大于使用未经测试的功能的风险时,你可能想要启用实验功能。为了让你能够试用这些默认关闭的功能,我们引入了功能开关(feature flag)。 实验功能可以通过以下三种方式启用: diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/quick-start-guides.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/quick-start-guides.md index 46359aab1ade..fd0013e07778 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/quick-start-guides.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/quick-start-guides.md @@ -1,6 +1,7 @@ --- title: Rancher 部署快速入门指南 --- + :::caution 本章节中提供的指南,旨在帮助你快速启动一个用于 Rancher 的沙盒,以评估 Rancher 是否能满足你的使用需求。快速入门指南不适用于生产环境。如果你需要获取生产环境的操作指导,请参见[安装](installation-and-upgrade.md)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/vsphere.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/vsphere.md index 888b5d45a3e1..db206b47f965 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/vsphere.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/vsphere.md @@ -2,6 +2,7 @@ title: 创建 vSphere 集群 description: 使用 Rancher 创建 vSphere 集群。集群可能包括具有不同属性的 VM 组,这些属性可用于细粒度控制节点的大小。 --- + import YouTube from '@site/src/components/YouTube' 你可以结合使用 Rancher 与 vSphere,从而在本地体验云环境的操作。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/contribute-to-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/contribute-to-rancher.md index f63c6a38e301..baff7b735b9f 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/contribute-to-rancher.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/contribute-to-rancher.md @@ -2,7 +2,7 @@ title: 参与 Rancher 社区贡献 --- -本节介绍 Rancher 使用的仓库、如何构建仓库以及提交 issue 时要包含的信息。 +本文介绍了 Rancher 仓库和 Rancher 文档、如何构建 Rancher 仓库以及提交 issue 时要包含哪些信息。 有关如何为 Rancher 项目开发做出贡献的更多详细信息,请参阅 [Rancher Developer Wiki](https://github.com/rancher/rancher/wiki)。Wiki 包含以下主题的资源: @@ -14,7 +14,15 @@ title: 参与 Rancher 社区贡献 在 Rancher Users Slack 上,开发者的频道是 **#developer**。 -## 仓库 +## Rancher 文档 + +如果你对此网站上的文档有建议,请在主 [Rancher 文档](https://github.com/rancher/rancher-docs)仓库中[提交 issue](https://github.com/rancher/rancher-docs/issues/new/choose)。此仓库包含 Rancher v2.0 及更高版本的文档。 + +有关贡献和构建 Rancher v2.x 文档仓库的更多详细信息,请参阅 [Rancher 文档 README](https://github.com/rancher/rancher-docs#readme)。 + +有关 Rancher v1.6 及更早版本的文档,请参阅 [Rancher 1.x docs](https://github.com/rancher/rancher.github.io) 仓库,其中包含 https://rancher.com/docs/rancher/v1.6/en/ 的源文件。 + +## Rancher 仓库 所有仓库都位于我们的主要 GitHub 组织内。Rancher 使用了很多仓库,以下是部分主要仓库的描述: @@ -38,19 +46,19 @@ title: 参与 Rancher 社区贡献 ![Rancher diagram](/img/ranchercomponentsdiagram-2.6.svg)
用于配置/管理 Kubernetes 集群的 Rancher 组件。 -## 构建 +### 构建 Rancher 仓库 每个仓库都应该有一个 Makefile,并且可以使用 `make` 命令进行构建。`make` 目标基于仓库中 `/scripts` 目录中的脚本,每个目标都使用 [Dapper](https://github.com/rancher/dapper) 在孤立的环境中运行。`Dockerfile.dapper` 将用于此操作,它包含了所需的所有构建工具。 默认目标是 `ci`,它将运行 `./scripts/validate`、`./scripts/build`、`./scripts/test ` 和 `./scripts/package`。生成的二进制文件将在 `./build/bin` 中,通常也打包在 Docker 镜像中。 -## Bug、Issue 和疑问 +### Rancher Bug、Issue 或疑问 如果你发现任何 bug 或问题,由于有人可能遇到过同样的问题,或者我们已经正在寻找解决方案,因此请先在[已报告 issue](https://github.com/rancher/rancher/issues) 中搜索。 如果找不到与你的问题相关的内容,请通过[提出 issue](https://github.com/rancher/rancher/issues/new) 与我们联系。与 Rancher 相关的仓库有很多,但请将 issue 提交到 Rancher 仓库中,这样能确保我们能看到这些 issue。如果你想就一个用例提出问题或询问其他用户,你可以在 [Rancher 论坛](https://forums.rancher.com)上发帖。 -### 提交 Issue 的检查清单 +#### 提交 Issue 的检查清单 提交问题时请遵循此清单,以便我们调查和解决问题。如果你能提供更多信息,我们就可以使用更多数据来确定导致问题的原因或发现更多相关的内容。 @@ -126,11 +134,3 @@ title: 参与 Rancher 社区贡献 - Docker Daemon 日志记录(可能并不全部存在,取决于操作系统) - `/var/log/docker.log` - **指标**:如果你遇到性能问题,请提供尽可能多的指标数据(文件或屏幕截图)来帮助我们确定问题。如果你遇到主机相关的问题,你可以提供 `top`、`free -m`、`df` 的输出,这些输出会显示进程/内存/磁盘的使用情况。 - -## 文档 - -如果你对我们的文档有修改意见,请在我们的文档仓库中提交 PR。 - -- [Rancher 2.x 文档仓库](https://github.com/rancher/docs):Rancher 2.x 所有文档都在这个仓库中。具体位于仓库的 `content` 文件夹中。 - -- [Rancher 1.x 文档仓库](https://github.com/rancher/rancher.github.io):Rancher 1.x 所有文档都在这个仓库中。具体位于仓库的 `rancher` 文件夹中。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md index 541453845e10..814d38c5349e 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md @@ -12,6 +12,6 @@ title: 安装 Rancher CIS Benchmark :::note -CIS Benchmark 4.0.0 及更高版本默认禁用 PSP。要在加固集群上安装 CIS Benchmark,在安装 Chart 之前将 values 中的 `global.psp.enabled` 设置为 `true`。 +如果你使用 Kubernetes v1.24 或更早版本,并且具有使用 [Pod 安全策略](../../new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md) (PSP) 加固的集群,则 CIS Benchmark 4.0.0 及更高版本会默认禁用 PSP。要在 PSP 加固集群上安装 CIS Benchmark,请在安装 Chart 之前将 values 中的 `global.psp.enabled` 设置为 `true`。[Pod 安全准入](../../new-user-guides/authentication-permissions-and-global-configuration/pod-security-standards.md) (PSA) 加固集群不受影响。 ::: diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md index b1f584639a09..f0461eb27b58 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md @@ -4,7 +4,7 @@ title: 配置树外 vSphere 云提供商 Kubernetes 正在逐渐不在树内维护云提供商。vSphere 有一个树外云提供商,可通过安装 vSphere 云提供商和云存储插件来使用。 -本页介绍如何在启动集群后安装 Cloud Provider Interface (CPI) 和 Cloud Storage Interface (CSI) 插件。 +本文介绍了如何在设置集群后安装 Cloud Provider Interface (CPI) 和 Cloud Storage Interface (CSI) 插件。 ## 先决条件 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md index 6d0b408f1dd0..065ff2deb842 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md @@ -1,6 +1,7 @@ --- -title: 将 vSphere 树内卷迁移到 CSI +title: 将 vSphere 树内卷迁移到树外 --- + Kubernetes 正在逐渐不在树内维护云提供商。vSphere 有一个树外云提供商,可通过安装 vSphere 云提供商和云存储插件来使用。 本页介绍如何从树内 vSphere 云提供商迁移到树外,以及如何在迁移后管理现有虚拟机。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md index 9d59762ad3be..6231ec1697a5 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md @@ -2,6 +2,7 @@ title: Kubernetes 镜像仓库和容器镜像仓库 description: 了解容器镜像仓库和 Kubernetes 镜像仓库、它们的用例以及如何在 Rancher UI 中使用私有镜像仓库 --- + 镜像仓库是 Kubernetes 密文(Secret),包含用于向[私有容器镜像仓库](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/)进行身份验证的凭证。 “Registry” 这个词可能有两种意思,可指代容器或 Kubernetes 镜像仓库: diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/ingress-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/ingress-configuration.md index a7e41c4488f5..380629411b41 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/ingress-configuration.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/ingress-configuration.md @@ -7,7 +7,8 @@ description: Ingress 配置 在 Kubernetes v1.21 及更高版本中,NGINX Ingress Controller 不再默认运行在 hostNetwork 中。它改为将 hostPorts 用于端口 80 和端口 443,因此你可以将准入 Webhook 配置为只能通过 ClusterIP 访问。这确保了只能从集群内部访问 webhook。 -由于 controller 的这一更改,默认不再将 `hostNetwork` 设置为 `true`。但是,你必须将 `hostNetwork` 设置为 `true` 才能使基于 TCP 和 UDP 的 Service 正常工作。 +由于 controller 的这一更改,默认 RKE1 配置不再将 `hostNetwork` 设置为 `true`。但是,你必须将 `hostNetwork` 设置为 `true` 才能使基于 TCP 和 UDP 的 Service 正常工作。为此,请[编辑](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#使用-yaml-编辑集群)集群的 YAML 并按照[官方 RKE1 文档](https://rke.docs.rancher.com/config-options/add-ons/ingress-controllers#configuring-network-options)中的步骤操作。 + ::: diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md index 541a5f0f0a3a..c13f8dbf75e6 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md @@ -2,6 +2,7 @@ title: "四层和七层负载均衡" description: "Kubernetes 支持四层负载均衡和七层负载均衡。了解对不同 deployment 的支持" --- + Kubernetes 支持四层负载均衡和七层负载均衡。 ## 四层负载均衡器 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/create-kubernetes-persistent-storage.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/create-kubernetes-persistent-storage.md index 91ddc280e7e3..e59d36c843d4 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/create-kubernetes-persistent-storage.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/create-kubernetes-persistent-storage.md @@ -2,6 +2,7 @@ title: "Kubernetes 持久存储:卷和存储类" description: "了解在 Kubernetes 中创建持久存储的两种方式:持久卷和存储类" --- + 在部署需要保​​留数据的应用时,你需要创建持久存储。持久存储允许你在运行应用的 pod 之外存储应用数据。即使运行应用的 pod 发生故障,这种存储方式也能让你保留应用数据。 本文假设你已了解 Kubernetes 的持久卷、持久卷声明和存储类的概念。如需更多信息,请参阅[存储的工作原理](../how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/deploy-apps-across-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/deploy-apps-across-clusters.md index c349bcba4d91..5a6382f71b1b 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/deploy-apps-across-clusters.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/deploy-apps-across-clusters.md @@ -1,6 +1,7 @@ --- title: 跨集群部署应用 --- + ### Fleet Rancher 2.5 引入了 Fleet,这是一种跨集群部署应用的新方式。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/enable-experimental-features.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/enable-experimental-features.md index 3c47fcf0cf17..4f759f616fec 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/enable-experimental-features.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/enable-experimental-features.md @@ -1,6 +1,7 @@ --- title: 启用实验功能 --- + Rancher 包含一些默认关闭的实验功能。在某些情况下,例如当你认为使用[不支持的存储类型](../how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md)的好处大于使用未经测试的功能的风险时,你可能想要启用实验功能。为了让你能够试用这些默认关闭的功能,我们引入了功能开关(feature flag)。 实验功能可以通过以下三种方式启用: diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/quick-start-guides.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/quick-start-guides.md index 46359aab1ade..fd0013e07778 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/quick-start-guides.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/quick-start-guides.md @@ -1,6 +1,7 @@ --- title: Rancher 部署快速入门指南 --- + :::caution 本章节中提供的指南,旨在帮助你快速启动一个用于 Rancher 的沙盒,以评估 Rancher 是否能满足你的使用需求。快速入门指南不适用于生产环境。如果你需要获取生产环境的操作指导,请参见[安装](installation-and-upgrade.md)。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/rancher-security.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/rancher-security.md index 542ddb1db768..15677f4a9a76 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/rancher-security.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/rancher-security.md @@ -22,9 +22,8 @@ title: 安全 安全是 Rancher 全部功能的基础。Rancher 集成了全部主流身份验证工具和服务,并提供了企业级的 [RBAC 功能](manage-role-based-access-control-rbac.md),让你的 Kubernetes 集群更加安全。 本文介绍了安全相关的文档以及资源,让你的 Rancher 安装和下游 Kubernetes 集群更加安全。 -### NeuVector 与 Rancher 的集成 -_2.6.5 的新功能_ +### NeuVector 与 Rancher 的集成 NeuVector 是一个开源的、以容器为中心的安全应用程序,现已集成到 Rancher 中。NeuVector 提供生产安全、DevOps 漏洞保护和容器防火墙等功能。请参阅 [Rancher 文档](../integrations-in-rancher/neuvector.md)和 [NeuVector 文档](https://open-docs.neuvector.com/)了解更多信息。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/vsphere.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/vsphere.md index 888b5d45a3e1..db206b47f965 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/vsphere.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/vsphere.md @@ -2,6 +2,7 @@ title: 创建 vSphere 集群 description: 使用 Rancher 创建 vSphere 集群。集群可能包括具有不同属性的 VM 组,这些属性可用于细粒度控制节点的大小。 --- + import YouTube from '@site/src/components/YouTube' 你可以结合使用 Rancher 与 vSphere,从而在本地体验云环境的操作。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/cli-with-rancher/rancher-cli.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/cli-with-rancher/rancher-cli.md index 03c9137205ef..4cf442380040 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/cli-with-rancher/rancher-cli.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/cli-with-rancher/rancher-cli.md @@ -10,7 +10,7 @@ Rancher CLI(命令行界面)是一个命令行工具,可用于与 Rancher 你可以直接 UI 下载二进制文件。 1. 点击左上角的 **☰**。 -1. 单击底部的 **v2.6.x**,**v2.6.x** 是一个超链接文本,表示已安装的 Rancher 版本。 +1. 在导航侧边栏菜单底部,单击**简介**。 1. 在 **CLI 下载**中,有 Windows、Mac 和 Linux 的二进制文件下载链接。你还可以访问我们的 CLI [发布页面](https://github.com/rancher/cli/releases)直接下载二进制文件。 ### 要求 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md index a8956d548d5e..7ac1c106d1d8 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md @@ -13,7 +13,7 @@ title: vSphere 节点模板配置 | 凭证字段 | 描述 | |-----------------|--------------| | vCenter 或 ESXi Server | 输入 vCenter 或 ESXi 主机名/IP。ESXi 是你创建和运行虚拟机和虚拟设备的虚拟化平台。你可以通过 vCenter Server 服务来管理网络中连接的多个主机并池化主机资源。 | -| 端口 | 可选:配置 vCenter 或 ESXi Server 的端口。 | +| 端口 | 配置 vCenter 或 ESXi Server 的端口。 | | 用户名和密码 | 你的 vSphere 登录用户名和密码。 | ## 调度 @@ -92,4 +92,4 @@ title: vSphere 节点模板配置 如果要配置 Red Hat Enterprise Linux (RHEL) 或 CentOS 节点,请将 **Docker Install URL** 字段保留为默认值,或选择 **none**。由于 Docker 已经安装在这些节点上,因此将绕过 Docker 安装检查。 如果没有将 **Docker Install URL** 设置为默认值或 **none**,你可能会看到错误消息:`Error creating machine: RHEL ssh command error: command: sudo -E yum install -y curl err: exit status 1 output: Updating Subscription Management repositories`。 -::: \ No newline at end of file +::: From 5cd6c930f18cd418a7da39e81eb41ceba9bee4c8 Mon Sep 17 00:00:00 2001 From: vickyhella Date: Fri, 8 Sep 2023 17:15:16 +0800 Subject: [PATCH 33/54] Fix broken links --- .../back-up-restore-usage-guide.md | 2 +- ...tore-rancher-launched-kubernetes-clusters-from-backup.md | 2 +- .../monitoring-and-alerting/built-in-dashboards.md | 2 +- docs/pages-for-subheaders/access-clusters.md | 2 +- .../authentication-permissions-and-global-configuration.md | 2 +- docs/pages-for-subheaders/monitoring-and-alerting.md | 2 +- .../communicating-with-downstream-user-clusters.md | 2 +- .../back-up-restore-usage-guide.md | 4 ++-- ...tore-rancher-launched-kubernetes-clusters-from-backup.md | 2 +- .../current/pages-for-subheaders/access-clusters.md | 2 +- .../authentication-permissions-and-global-configuration.md | 2 +- .../current/pages-for-subheaders/monitoring-and-alerting.md | 2 +- .../communicating-with-downstream-user-clusters.md | 2 +- .../back-up-restore-usage-guide.md | 4 ++-- .../communicating-with-downstream-user-clusters.md | 2 +- .../back-up-restore-usage-guide.md | 4 ++-- ...tore-rancher-launched-kubernetes-clusters-from-backup.md | 2 +- .../pages-for-subheaders/rancher-hardening-guides.md | 6 +++--- .../communicating-with-downstream-user-clusters.md | 2 +- .../communicating-with-downstream-user-clusters.md | 2 +- .../back-up-restore-usage-guide.md | 2 +- .../new-user-guides/deploy-apps-across-clusters/fleet.md | 2 +- .../communicating-with-downstream-user-clusters.md | 2 +- .../back-up-restore-usage-guide.md | 2 +- ...tore-rancher-launched-kubernetes-clusters-from-backup.md | 2 +- .../monitoring-and-alerting/built-in-dashboards.md | 2 +- .../version-2.7/pages-for-subheaders/access-clusters.md | 2 +- .../authentication-permissions-and-global-configuration.md | 2 +- .../pages-for-subheaders/monitoring-and-alerting.md | 2 +- .../communicating-with-downstream-user-clusters.md | 2 +- 30 files changed, 35 insertions(+), 35 deletions(-) diff --git a/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md index 78579c62c46b..3ef56172eb12 100644 --- a/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md +++ b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md @@ -101,7 +101,7 @@ Below are some examples of some **incorrect** uses or expectations of Rancher Ba ### Upgrades -- Using Rancher backups for upgrading Rancher versions is not a valid use case. The recommended procedure is to take a backup of the current version, then upgrade your Rancher instance using [these instructions](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades), and then taking **another** backup after the upgrade is complete. This way if the upgrade fails you have a backup to restore to, and the second backup will be valid to restore to the upgraded Rancher version. +- Using Rancher backups for upgrading Rancher versions is not a valid use case. The recommended procedure is to take a backup of the current version, then upgrade your Rancher instance using [these instructions](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md), and then taking **another** backup after the upgrade is complete. This way if the upgrade fails you have a backup to restore to, and the second backup will be valid to restore to the upgraded Rancher version. - Using Rancher backups for upgrading Kubernetes versions is not a valid use case either. Because the Kubernetes API and available resources are tied to the version, upgrading using backup restore can lead to issues with misaligned sets of resources which may be deprecated, unsupported, or updated. How to upgrade your cluster version will depend on how it was provisioned however the same format as above is recommended (backup, upgrade, backup). ### ResourceSet diff --git a/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md index 1d6407c2280e..313812b6129c 100644 --- a/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md +++ b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md @@ -8,7 +8,7 @@ title: Restoring a Cluster from Backup Etcd backup and recovery for [Rancher launched Kubernetes clusters](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) can be easily performed. Snapshots of the etcd database are taken and saved either locally onto the etcd nodes or to a S3 compatible target. The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. -Rancher recommends enabling the [ability to set up recurring snapshots of etcd](back-up-rancher-launched-kubernetes-clusters.md#configuring-recurring-snapshots), but [one-time snapshots](back-up-rancher-launched-kubernetes-clusters.md#one-time-snapshots) can easily be taken as well. Rancher allows restore from [saved snapshots](#restoring-a-cluster-from-a-snapshot) or if you don't have any snapshots, you can still [restore etcd](#recovering-etcd-without-a-snapshot). +Rancher recommends enabling the [ability to set up recurring snapshots of etcd](back-up-rancher-launched-kubernetes-clusters.md#configuring-recurring-snapshots), but [one-time snapshots](back-up-rancher-launched-kubernetes-clusters.md#one-time-snapshots) can easily be taken as well. Rancher allows restore from [saved snapshots](#restoring-a-cluster-from-a-snapshot) or if you don't have any snapshots, you can still [restore etcd](#recovering-etcd-without-a-snapshot-rke). Clusters can also be restored to a prior Kubernetes version and cluster configuration. diff --git a/docs/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md b/docs/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md index 83bf8987e16b..9a464ba3ede2 100644 --- a/docs/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md +++ b/docs/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md @@ -114,4 +114,4 @@ For more information on configuring PrometheusRules in Rancher, see [this page.] ## Legacy UI -For information on the dashboards available in v2.2 to v2.4 of Rancher, before the introduction of the `rancher-monitoring` application, see the [Rancher v2.0—v2.4 docs](../../versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/viewing-metrics.md). +For information on the dashboards available in v2.2 to v2.4 of Rancher, before the introduction of the `rancher-monitoring` application, see the [Rancher v2.0—v2.4 docs](../../../versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/viewing-metrics.md). diff --git a/docs/pages-for-subheaders/access-clusters.md b/docs/pages-for-subheaders/access-clusters.md index 43ea26c3cab3..69d18515700f 100644 --- a/docs/pages-for-subheaders/access-clusters.md +++ b/docs/pages-for-subheaders/access-clusters.md @@ -29,7 +29,7 @@ You can also access the **Clusters** page by clicking the **Manage** button abov On the **Clusters** page, select **⁝** at the end of each row to view a submenu with the following options: -* [Kubectl Shell](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) +* [Kubectl Shell](../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) * Download KubeConfig * Copy KubeConfig to Clipboard * Edit Config diff --git a/docs/pages-for-subheaders/authentication-permissions-and-global-configuration.md b/docs/pages-for-subheaders/authentication-permissions-and-global-configuration.md index 98494a94cf90..509c7bdedec9 100644 --- a/docs/pages-for-subheaders/authentication-permissions-and-global-configuration.md +++ b/docs/pages-for-subheaders/authentication-permissions-and-global-configuration.md @@ -82,4 +82,4 @@ The following features are available under **Global Configuration**: - **Global DNS Entries** - **Global DNS Providers** -As these are legacy features, please see the Rancher v2.0—v2.4 docs on [catalogs](../../versioned_docs/v2.0-v2.4/pages-for-subheaders/helm-charts-in-rancher.md), [global DNS entries](../../versioned_docs/v2.0-v2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#adding-a-global-dns-entry), and [global DNS providers](../../versioned_docs/v2.0-v2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#editing-a-global-dns-provider) for more details. \ No newline at end of file +As these are legacy features, please see the Rancher v2.0—v2.4 docs on [catalogs](../../versioned_docs/version-2.0-2.4/pages-for-subheaders/helm-charts-in-rancher.md), [global DNS entries](../../versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#adding-a-global-dns-entry), and [global DNS providers](../../versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#editing-a-global-dns-provider) for more details. \ No newline at end of file diff --git a/docs/pages-for-subheaders/monitoring-and-alerting.md b/docs/pages-for-subheaders/monitoring-and-alerting.md index d9d00e1aa4eb..e76dba53edbc 100644 --- a/docs/pages-for-subheaders/monitoring-and-alerting.md +++ b/docs/pages-for-subheaders/monitoring-and-alerting.md @@ -11,7 +11,7 @@ The `rancher-monitoring` application can quickly deploy leading open-source moni Introduced in Rancher v2.5, the application is powered by [Prometheus](https://prometheus.io/), [Grafana](https://grafana.com/grafana/), [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/), the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator), and the [Prometheus adapter.](https://github.com/DirectXMan12/k8s-prometheus-adapter) -For information on V1 monitoring and alerting, available in Rancher v2.2 up to v2.4, please see the Rancher v2.0—v2.4 docs on [cluster monitoring](../../versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-monitoring.md), [alerting](../../versioned_docs/v2.0-v2.4/pages-for-subheaders/cluster-alerts.md), [notifiers](../../versioned_docs/v2.0-v2.4/explanations/integrations-in-rancher/notifiers) and other [tools](../../versioned_docs/v2.0-v2.4/pages-for-subheaders/project-tools.md). +For information on V1 monitoring and alerting, available in Rancher v2.2 up to v2.4, please see the Rancher v2.0—v2.4 docs on [cluster monitoring](../../versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-monitoring.md), [alerting](../../versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-alerts.md), [notifiers](../../versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/notifiers.md) and other [tools](../../versioned_docs/version-2.0-2.4/pages-for-subheaders/project-tools.md). Using the `rancher-monitoring` application, you can quickly deploy leading open-source monitoring and alerting solutions onto your cluster. diff --git a/docs/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md b/docs/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md index c79e1b8c27e3..61a87f558d8e 100644 --- a/docs/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md +++ b/docs/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md @@ -129,4 +129,4 @@ The GitHub repositories for Rancher can be found at the following links: - [Rancher CLI](https://github.com/rancher/cli) - [Catalog applications](https://github.com/rancher/helm) -This is a partial list of the most important Rancher repositories. For more details about Rancher source code, refer to the section on [contributing to Rancher.](../../contribute-to-rancher.md#repositories) To see all libraries and projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. \ No newline at end of file +This is a partial list of the most important Rancher repositories. For more details about Rancher source code, refer to the section on [contributing to Rancher.](../../contribute-to-rancher.md#rancher-repositories) To see all libraries and projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md index bff8c6ac3104..03b4a7041851 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md @@ -22,7 +22,7 @@ Rancher Backups Chart 是我们的灾难恢复和迁移解决方案。此 Chart ### 还原 -有两种主要的还原场景:还原正在运行 Rancher 的集群以及还原新集群。只有将备份还原到该备份的源集群,且在还原过程中启用了 [`prune` 选项](../../../reference-guides/backup-restore-configuration/restore-configuration.md#在还原期间修剪)时,你才能还原正在运行 Rancher 的集群。还原具有与备份类似的输入。它需要备份文件名、encryptionConfigSecret 名称和存储位置。 +有两种主要的还原场景:还原正在运行 Rancher 的集群以及还原新集群。只有将备份还原到该备份的源集群,且在还原过程中启用了 [`prune` 选项](../../../reference-guides/backup-restore-configuration/restore-configuration.md#还原过程中修剪)时,你才能还原正在运行 Rancher 的集群。还原具有与备份类似的输入。它需要备份文件名、encryptionConfigSecret 名称和存储位置。 资源按以下顺序还原: @@ -97,7 +97,7 @@ Rancher Backups Chart 包含了一个[默认 resourceSet](https://github.com/ran ### 升级 -- 使用 Rancher Backups 来升级 Rancher 版本不是一个有效用法。推荐的做法是:先备份当前版本,然后按照[说明](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades)升级你的 Rancher 实例,在升级完成后再进行**另一个**备份。这样,如果升级失败,你就有一个可以用来还原的备份,而第二个备份将能用于还原到升级后的 Rancher 版本。 +- 使用 Rancher Backups 来升级 Rancher 版本不是一个有效用法。推荐的做法是:先备份当前版本,然后按照[说明](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md)升级你的 Rancher 实例,在升级完成后再进行**另一个**备份。这样,如果升级失败,你就有一个可以用来还原的备份,而第二个备份将能用于还原到升级后的 Rancher 版本。 - 使用 Rancher Backups 来升级 Kubernetes 版本也不是一个有效用法。由于 Kubernetes API 以及可用资源与版本相关,因此使用备份还原的方法来进行升级可能会导致资源集不对齐的问题,这些资源可能已被弃用、不受支持或已更新。升级集群版本的方式取决于其配置方式,但建议使用上述的流程(备份、升级、备份)。 ### ResourceSet diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md index c92705018984..97e23f814460 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md @@ -4,7 +4,7 @@ title: 使用备份恢复集群 你可以轻松备份和恢复 [Rancher 启动的 Kubernetes 集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)的 etcd。etcd 数据库的快照会保存在 etcd 节点或 S3 兼容目标上。配置 S3 的好处是,如果所有 etcd 节点都丢失了,你的快照会保存到远端并能用于恢复集群。 -Rancher 建议启用 [etcd 定期快照的功能](back-up-rancher-launched-kubernetes-clusters.md#配置定期快照),但你也可以轻松创建[一次性快照](back-up-rancher-launched-kubernetes-clusters.md#单次快照)。Rancher 允许使用[保存的快照](#使用快照恢复集群)进行恢复。如果你没有任何快照,你仍然可以[恢复 etcd](#在没有快照的情况下恢复-etcd)。 +Rancher 建议启用 [etcd 定期快照的功能](back-up-rancher-launched-kubernetes-clusters.md#配置定期快照),但你也可以轻松创建[一次性快照](back-up-rancher-launched-kubernetes-clusters.md#单次快照)。Rancher 允许使用[保存的快照](#使用快照恢复集群)进行恢复。如果你没有任何快照,你仍然可以[恢复 etcd](#在没有快照的情况下恢复-etcdrke)。 集群也可以恢复到之前的 Kubernetes 版本和集群配置。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/access-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/access-clusters.md index 4860f8a2a830..72cd215bb474 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/access-clusters.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/access-clusters.md @@ -25,7 +25,7 @@ title: 集群访问 在**集群**页面上,选择每行末尾的 **⁝** 以查看包含以下选项的子菜单: -* [Kubectl Shell](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) +* [Kubectl Shell](../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) * 下载 KubeConfig * 将 KubeConfig 复制到剪切板 * 编辑配置 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/authentication-permissions-and-global-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/authentication-permissions-and-global-configuration.md index ff9a4f49504f..88ed5f387516 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/authentication-permissions-and-global-configuration.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/authentication-permissions-and-global-configuration.md @@ -78,4 +78,4 @@ Rancher 包含一些实验性或默认禁用的功能。你可以使用功能开 - **全局 DNS 条目** - **全局 DNS 提供商** -由于这些是旧版功能,因此请参阅有关[应用商店](/versioned_docs/v2.0-v2.4/pages-for-subheaders/helm-charts-in-rancher.md)、[全局 DNS 条目](/versioned_docs/v2.0-v2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#adding-a-global-dns-entry)和[全局 DNS 提供商](/versioned_docs/v2.0-v2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#editing-a-global-dns-provider)的 Rancher v2.0-v2.4 文档了解更多详情。 \ No newline at end of file +由于这些是旧版功能,因此请参阅有关[应用商店](/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm-charts-in-rancher.md)、[全局 DNS 条目](/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#adding-a-global-dns-entry)和[全局 DNS 提供商](/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#editing-a-global-dns-provider)的 Rancher v2.0-v2.4 文档了解更多详情。 \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/monitoring-and-alerting.md b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/monitoring-and-alerting.md index 11597dd034dc..84d29ee6ac01 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/monitoring-and-alerting.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/monitoring-and-alerting.md @@ -7,7 +7,7 @@ description: Prometheus 允许你查看来自不同 Rancher 和 Kubernetes 对 该应用程序在 Rancher v2.5 中引入,由 [Prometheus](https://prometheus.io/)、[Grafana](https://grafana.com/grafana/)、[Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/)、[Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator) 和 [Prometheus Adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter) 提供支持。 -有关 Rancher v2.2 至 v2.4 中 V1 monitoring 和 alerting 的信息,请参阅有关[集群监控](/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-monitoring.m)、[告警](/versioned_docs/v2.0-v2.4/pages-for-subheaders/cluster-alerts.md)、[notifiers](/versioned_docs/v2.0-v2.4/explanations/integrations-in-rancher/notifiers) 和其他[工具](/versioned_docs/v2.0-v2.4/pages-for-subheaders/project-tools.md)的 Rancher v2.0-v2.4 文档。 +有关 Rancher v2.2 至 v2.4 中 V1 monitoring 和 alerting 的信息,请参阅有关[集群监控](/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-monitoring.md)、[告警](/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-alerts.md)、[notifiers](/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/notifiers.md) 和其他[工具](/versioned_docs/version-2.0-2.4/pages-for-subheaders/project-tools.md)的 Rancher v2.0-v2.4 文档。 你可以使用 `rancher-monitoring` 应用,将业界领先的开源监控和告警解决方案快速部署到你的集群中。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md index 353d90a66611..56800763102e 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md @@ -129,4 +129,4 @@ Rancher 的 GitHub 代码仓库如下: - [Rancher CLI](https://github.com/rancher/cli) - [应用商店](https://github.com/rancher/helm) -以上仅列出部分 Rancher 最重要的仓库。详情请参见[参与 Rancher 开源贡献](../../contribute-to-rancher.md#仓库)。如需获取 Rancher 使用的所有库和项目,请参见 `rancher/rancher` 仓库中的 [`go.mod` 文件](https://github.com/rancher/rancher/blob/master/go.mod)。 \ No newline at end of file +以上仅列出部分 Rancher 最重要的仓库。详情请参见[参与 Rancher 开源贡献](../../contribute-to-rancher.md#rancher-仓库)。如需获取 Rancher 使用的所有库和项目,请参见 `rancher/rancher` 仓库中的 [`go.mod` 文件](https://github.com/rancher/rancher/blob/master/go.mod)。 \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md index bff8c6ac3104..03b4a7041851 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md @@ -22,7 +22,7 @@ Rancher Backups Chart 是我们的灾难恢复和迁移解决方案。此 Chart ### 还原 -有两种主要的还原场景:还原正在运行 Rancher 的集群以及还原新集群。只有将备份还原到该备份的源集群,且在还原过程中启用了 [`prune` 选项](../../../reference-guides/backup-restore-configuration/restore-configuration.md#在还原期间修剪)时,你才能还原正在运行 Rancher 的集群。还原具有与备份类似的输入。它需要备份文件名、encryptionConfigSecret 名称和存储位置。 +有两种主要的还原场景:还原正在运行 Rancher 的集群以及还原新集群。只有将备份还原到该备份的源集群,且在还原过程中启用了 [`prune` 选项](../../../reference-guides/backup-restore-configuration/restore-configuration.md#还原过程中修剪)时,你才能还原正在运行 Rancher 的集群。还原具有与备份类似的输入。它需要备份文件名、encryptionConfigSecret 名称和存储位置。 资源按以下顺序还原: @@ -97,7 +97,7 @@ Rancher Backups Chart 包含了一个[默认 resourceSet](https://github.com/ran ### 升级 -- 使用 Rancher Backups 来升级 Rancher 版本不是一个有效用法。推荐的做法是:先备份当前版本,然后按照[说明](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades)升级你的 Rancher 实例,在升级完成后再进行**另一个**备份。这样,如果升级失败,你就有一个可以用来还原的备份,而第二个备份将能用于还原到升级后的 Rancher 版本。 +- 使用 Rancher Backups 来升级 Rancher 版本不是一个有效用法。推荐的做法是:先备份当前版本,然后按照[说明](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md)升级你的 Rancher 实例,在升级完成后再进行**另一个**备份。这样,如果升级失败,你就有一个可以用来还原的备份,而第二个备份将能用于还原到升级后的 Rancher 版本。 - 使用 Rancher Backups 来升级 Kubernetes 版本也不是一个有效用法。由于 Kubernetes API 以及可用资源与版本相关,因此使用备份还原的方法来进行升级可能会导致资源集不对齐的问题,这些资源可能已被弃用、不受支持或已更新。升级集群版本的方式取决于其配置方式,但建议使用上述的流程(备份、升级、备份)。 ### ResourceSet diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md index 353d90a66611..56800763102e 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md @@ -129,4 +129,4 @@ Rancher 的 GitHub 代码仓库如下: - [Rancher CLI](https://github.com/rancher/cli) - [应用商店](https://github.com/rancher/helm) -以上仅列出部分 Rancher 最重要的仓库。详情请参见[参与 Rancher 开源贡献](../../contribute-to-rancher.md#仓库)。如需获取 Rancher 使用的所有库和项目,请参见 `rancher/rancher` 仓库中的 [`go.mod` 文件](https://github.com/rancher/rancher/blob/master/go.mod)。 \ No newline at end of file +以上仅列出部分 Rancher 最重要的仓库。详情请参见[参与 Rancher 开源贡献](../../contribute-to-rancher.md#rancher-仓库)。如需获取 Rancher 使用的所有库和项目,请参见 `rancher/rancher` 仓库中的 [`go.mod` 文件](https://github.com/rancher/rancher/blob/master/go.mod)。 \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md index bff8c6ac3104..03b4a7041851 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md @@ -22,7 +22,7 @@ Rancher Backups Chart 是我们的灾难恢复和迁移解决方案。此 Chart ### 还原 -有两种主要的还原场景:还原正在运行 Rancher 的集群以及还原新集群。只有将备份还原到该备份的源集群,且在还原过程中启用了 [`prune` 选项](../../../reference-guides/backup-restore-configuration/restore-configuration.md#在还原期间修剪)时,你才能还原正在运行 Rancher 的集群。还原具有与备份类似的输入。它需要备份文件名、encryptionConfigSecret 名称和存储位置。 +有两种主要的还原场景:还原正在运行 Rancher 的集群以及还原新集群。只有将备份还原到该备份的源集群,且在还原过程中启用了 [`prune` 选项](../../../reference-guides/backup-restore-configuration/restore-configuration.md#还原过程中修剪)时,你才能还原正在运行 Rancher 的集群。还原具有与备份类似的输入。它需要备份文件名、encryptionConfigSecret 名称和存储位置。 资源按以下顺序还原: @@ -97,7 +97,7 @@ Rancher Backups Chart 包含了一个[默认 resourceSet](https://github.com/ran ### 升级 -- 使用 Rancher Backups 来升级 Rancher 版本不是一个有效用法。推荐的做法是:先备份当前版本,然后按照[说明](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades)升级你的 Rancher 实例,在升级完成后再进行**另一个**备份。这样,如果升级失败,你就有一个可以用来还原的备份,而第二个备份将能用于还原到升级后的 Rancher 版本。 +- 使用 Rancher Backups 来升级 Rancher 版本不是一个有效用法。推荐的做法是:先备份当前版本,然后按照[说明](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md)升级你的 Rancher 实例,在升级完成后再进行**另一个**备份。这样,如果升级失败,你就有一个可以用来还原的备份,而第二个备份将能用于还原到升级后的 Rancher 版本。 - 使用 Rancher Backups 来升级 Kubernetes 版本也不是一个有效用法。由于 Kubernetes API 以及可用资源与版本相关,因此使用备份还原的方法来进行升级可能会导致资源集不对齐的问题,这些资源可能已被弃用、不受支持或已更新。升级集群版本的方式取决于其配置方式,但建议使用上述的流程(备份、升级、备份)。 ### ResourceSet diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md index c92705018984..97e23f814460 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md @@ -4,7 +4,7 @@ title: 使用备份恢复集群 你可以轻松备份和恢复 [Rancher 启动的 Kubernetes 集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)的 etcd。etcd 数据库的快照会保存在 etcd 节点或 S3 兼容目标上。配置 S3 的好处是,如果所有 etcd 节点都丢失了,你的快照会保存到远端并能用于恢复集群。 -Rancher 建议启用 [etcd 定期快照的功能](back-up-rancher-launched-kubernetes-clusters.md#配置定期快照),但你也可以轻松创建[一次性快照](back-up-rancher-launched-kubernetes-clusters.md#单次快照)。Rancher 允许使用[保存的快照](#使用快照恢复集群)进行恢复。如果你没有任何快照,你仍然可以[恢复 etcd](#在没有快照的情况下恢复-etcd)。 +Rancher 建议启用 [etcd 定期快照的功能](back-up-rancher-launched-kubernetes-clusters.md#配置定期快照),但你也可以轻松创建[一次性快照](back-up-rancher-launched-kubernetes-clusters.md#单次快照)。Rancher 允许使用[保存的快照](#使用快照恢复集群)进行恢复。如果你没有任何快照,你仍然可以[恢复 etcd](#在没有快照的情况下恢复-etcdrke)。 集群也可以恢复到之前的 Kubernetes 版本和集群配置。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/rancher-hardening-guides.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/rancher-hardening-guides.md index fcbbf594054c..48386cdd8712 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/rancher-hardening-guides.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/rancher-hardening-guides.md @@ -24,7 +24,7 @@ Each self-assessment guide is accompanied by a hardening guide. These guides wer |--------------------|-----------------------|-----------------------|------------------| | Kubernetes v1.23 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.23-k8s-v1.23.md) | [Link](rke1-hardening-guide.md) | | Kubernetes v1.24 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.23-k8s-v1.24.md) | [Link](rke1-hardening-guide.md) | -| Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md) | [Link](rke1-hardening-guide.md) | +| Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.23-k8s-v1.25.md) | [Link](rke1-hardening-guide.md) | ### RKE2 Guides @@ -32,7 +32,7 @@ Each self-assessment guide is accompanied by a hardening guide. These guides wer |------|--------------------|-----------------------|-----------------------|------------------| | Rancher provisioned RKE2 | Kubernetes v1.23 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.23-k8s-v1.23.md) | [Link](rke2-hardening-guide.md) | | Rancher provisioned RKE2 | Kubernetes v1.24 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.23-k8s-v1.24.md) | [Link](rke2-hardening-guide.md) | -| Rancher provisioned RKE2 | Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md) | [Link](rke2-hardening-guide.md) | +| Rancher provisioned RKE2 | Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.23-k8s-v1.25.md) | [Link](rke2-hardening-guide.md) | | Standalone RKE2 | Kubernetes v1.25 | CIS v1.23 | [Link](https://docs.rke2.io/security/cis_self_assessment123) | [Link](https://docs.rke2.io/security/hardening_guide) | ### K3s Guides @@ -41,7 +41,7 @@ Each self-assessment guide is accompanied by a hardening guide. These guides wer |------|--------------------|-----------------------|-----------------------|------------------| | Rancher provisioned K3s cluster | Kubernetes v1.23 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.23-k8s-v1.23.md) | [Link](k3s-hardening-guide.md) | | Rancher provisioned K3s cluster | Kubernetes v1.24 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.23-k8s-v1.24.md) | [Link](k3s-hardening-guide.md) | -| Rancher provisioned K3s cluster | Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.7-k8s-v1.25.md) | [Link](k3s-hardening-guide.md) | +| Rancher provisioned K3s cluster | Kubernetes v1.25 | CIS v1.23 | [Link](../reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.23-k8s-v1.25.md) | [Link](k3s-hardening-guide.md) | | Standalone K3s | Kubernetes v1.22 up to v1.24 | CIS v1.23 | [Link](https://docs.k3s.io/security/self-assessment) | [Link](https://docs.k3s.io/security/hardening-guide) | ## Rancher with SELinux diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md index 353d90a66611..56800763102e 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md +++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md @@ -129,4 +129,4 @@ Rancher 的 GitHub 代码仓库如下: - [Rancher CLI](https://github.com/rancher/cli) - [应用商店](https://github.com/rancher/helm) -以上仅列出部分 Rancher 最重要的仓库。详情请参见[参与 Rancher 开源贡献](../../contribute-to-rancher.md#仓库)。如需获取 Rancher 使用的所有库和项目,请参见 `rancher/rancher` 仓库中的 [`go.mod` 文件](https://github.com/rancher/rancher/blob/master/go.mod)。 \ No newline at end of file +以上仅列出部分 Rancher 最重要的仓库。详情请参见[参与 Rancher 开源贡献](../../contribute-to-rancher.md#rancher-仓库)。如需获取 Rancher 使用的所有库和项目,请参见 `rancher/rancher` 仓库中的 [`go.mod` 文件](https://github.com/rancher/rancher/blob/master/go.mod)。 \ No newline at end of file diff --git a/versioned_docs/version-2.5/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md b/versioned_docs/version-2.5/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md index aee16457d711..b676e11d6eed 100644 --- a/versioned_docs/version-2.5/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md +++ b/versioned_docs/version-2.5/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md @@ -129,4 +129,4 @@ The GitHub repositories for Rancher can be found at the following links: - [Rancher CLI](https://github.com/rancher/cli) - [Catalog applications](https://github.com/rancher/helm) -This is a partial list of the most important Rancher repositories. For more details about Rancher source code, refer to the section on [contributing to Rancher.](../../contribute-to-rancher.md#repositories) To see all libraries and projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. \ No newline at end of file +This is a partial list of the most important Rancher repositories. For more details about Rancher source code, refer to the section on [contributing to Rancher.](../../contribute-to-rancher.md#rancher-repositories) To see all libraries and projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. \ No newline at end of file diff --git a/versioned_docs/version-2.6/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md b/versioned_docs/version-2.6/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md index 78579c62c46b..3ef56172eb12 100644 --- a/versioned_docs/version-2.6/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md +++ b/versioned_docs/version-2.6/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md @@ -101,7 +101,7 @@ Below are some examples of some **incorrect** uses or expectations of Rancher Ba ### Upgrades -- Using Rancher backups for upgrading Rancher versions is not a valid use case. The recommended procedure is to take a backup of the current version, then upgrade your Rancher instance using [these instructions](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades), and then taking **another** backup after the upgrade is complete. This way if the upgrade fails you have a backup to restore to, and the second backup will be valid to restore to the upgraded Rancher version. +- Using Rancher backups for upgrading Rancher versions is not a valid use case. The recommended procedure is to take a backup of the current version, then upgrade your Rancher instance using [these instructions](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md), and then taking **another** backup after the upgrade is complete. This way if the upgrade fails you have a backup to restore to, and the second backup will be valid to restore to the upgraded Rancher version. - Using Rancher backups for upgrading Kubernetes versions is not a valid use case either. Because the Kubernetes API and available resources are tied to the version, upgrading using backup restore can lead to issues with misaligned sets of resources which may be deprecated, unsupported, or updated. How to upgrade your cluster version will depend on how it was provisioned however the same format as above is recommended (backup, upgrade, backup). ### ResourceSet diff --git a/versioned_docs/version-2.6/how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md b/versioned_docs/version-2.6/how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md index 8ff8e4cd5bef..975231b10900 100644 --- a/versioned_docs/version-2.6/how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md +++ b/versioned_docs/version-2.6/how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md @@ -59,7 +59,7 @@ The Helm chart in the git repository must include its dependencies in the charts ## Troubleshooting --- -* **Known Issue:** clientSecretName and helmSecretName secrets for Fleet gitrepos are not included in the backup nor restore created by the [backup-restore-operator](../backup-restore-and-disaster-recovery/back-up-rancher.md#1-install-the-rancher-backup-operator). We will update the community once a permanent solution is in place. +* **Known Issue:** clientSecretName and helmSecretName secrets for Fleet gitrepos are not included in the backup nor restore created by the [backup-restore-operator](../backup-restore-and-disaster-recovery/back-up-rancher.md#1-install-the-rancher-backups-operator). We will update the community once a permanent solution is in place. * **Temporary Workaround:**
By default, user-defined secrets are not backed up in Fleet. It is necessary to recreate secrets if performing a disaster recovery restore or migration of Rancher into a fresh cluster. To modify resourceSet to include extra resources you want to backup, refer to docs [here](https://github.com/rancher/backup-restore-operator#user-flow). diff --git a/versioned_docs/version-2.6/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md b/versioned_docs/version-2.6/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md index c79e1b8c27e3..61a87f558d8e 100644 --- a/versioned_docs/version-2.6/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md +++ b/versioned_docs/version-2.6/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md @@ -129,4 +129,4 @@ The GitHub repositories for Rancher can be found at the following links: - [Rancher CLI](https://github.com/rancher/cli) - [Catalog applications](https://github.com/rancher/helm) -This is a partial list of the most important Rancher repositories. For more details about Rancher source code, refer to the section on [contributing to Rancher.](../../contribute-to-rancher.md#repositories) To see all libraries and projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. \ No newline at end of file +This is a partial list of the most important Rancher repositories. For more details about Rancher source code, refer to the section on [contributing to Rancher.](../../contribute-to-rancher.md#rancher-repositories) To see all libraries and projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. \ No newline at end of file diff --git a/versioned_docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md b/versioned_docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md index 78579c62c46b..3ef56172eb12 100644 --- a/versioned_docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md +++ b/versioned_docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md @@ -101,7 +101,7 @@ Below are some examples of some **incorrect** uses or expectations of Rancher Ba ### Upgrades -- Using Rancher backups for upgrading Rancher versions is not a valid use case. The recommended procedure is to take a backup of the current version, then upgrade your Rancher instance using [these instructions](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades), and then taking **another** backup after the upgrade is complete. This way if the upgrade fails you have a backup to restore to, and the second backup will be valid to restore to the upgraded Rancher version. +- Using Rancher backups for upgrading Rancher versions is not a valid use case. The recommended procedure is to take a backup of the current version, then upgrade your Rancher instance using [these instructions](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md), and then taking **another** backup after the upgrade is complete. This way if the upgrade fails you have a backup to restore to, and the second backup will be valid to restore to the upgraded Rancher version. - Using Rancher backups for upgrading Kubernetes versions is not a valid use case either. Because the Kubernetes API and available resources are tied to the version, upgrading using backup restore can lead to issues with misaligned sets of resources which may be deprecated, unsupported, or updated. How to upgrade your cluster version will depend on how it was provisioned however the same format as above is recommended (backup, upgrade, backup). ### ResourceSet diff --git a/versioned_docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md b/versioned_docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md index 1d6407c2280e..313812b6129c 100644 --- a/versioned_docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md +++ b/versioned_docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md @@ -8,7 +8,7 @@ title: Restoring a Cluster from Backup Etcd backup and recovery for [Rancher launched Kubernetes clusters](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) can be easily performed. Snapshots of the etcd database are taken and saved either locally onto the etcd nodes or to a S3 compatible target. The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. -Rancher recommends enabling the [ability to set up recurring snapshots of etcd](back-up-rancher-launched-kubernetes-clusters.md#configuring-recurring-snapshots), but [one-time snapshots](back-up-rancher-launched-kubernetes-clusters.md#one-time-snapshots) can easily be taken as well. Rancher allows restore from [saved snapshots](#restoring-a-cluster-from-a-snapshot) or if you don't have any snapshots, you can still [restore etcd](#recovering-etcd-without-a-snapshot). +Rancher recommends enabling the [ability to set up recurring snapshots of etcd](back-up-rancher-launched-kubernetes-clusters.md#configuring-recurring-snapshots), but [one-time snapshots](back-up-rancher-launched-kubernetes-clusters.md#one-time-snapshots) can easily be taken as well. Rancher allows restore from [saved snapshots](#restoring-a-cluster-from-a-snapshot) or if you don't have any snapshots, you can still [restore etcd](#recovering-etcd-without-a-snapshot-rke). Clusters can also be restored to a prior Kubernetes version and cluster configuration. diff --git a/versioned_docs/version-2.7/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md b/versioned_docs/version-2.7/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md index 3d1096a8bed3..6186565900b1 100644 --- a/versioned_docs/version-2.7/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md +++ b/versioned_docs/version-2.7/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md @@ -114,4 +114,4 @@ For more information on configuring PrometheusRules in Rancher, see [this page.] ## Legacy UI -For information on the dashboards available in v2.2 to v2.4 of Rancher, before the introduction of the `rancher-monitoring` application, see the [Rancher v2.0—v2.4 docs](../../versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/viewing-metrics.md). \ No newline at end of file +For information on the dashboards available in v2.2 to v2.4 of Rancher, before the introduction of the `rancher-monitoring` application, see the [Rancher v2.0—v2.4 docs](../../../version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/viewing-metrics.md). \ No newline at end of file diff --git a/versioned_docs/version-2.7/pages-for-subheaders/access-clusters.md b/versioned_docs/version-2.7/pages-for-subheaders/access-clusters.md index b1c72ddc266a..b31664726f9e 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/access-clusters.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/access-clusters.md @@ -16,7 +16,7 @@ For information on how to set up an authentication system, see [this section.](a On the **Clusters** page, select **⁝** at the end of each row to view a submenu with the following options: -* [Kubectl Shell](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) +* [Kubectl Shell](../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) * Download KubeConfig * Copy KubeConfig to Clipboard * Edit Config diff --git a/versioned_docs/version-2.7/pages-for-subheaders/authentication-permissions-and-global-configuration.md b/versioned_docs/version-2.7/pages-for-subheaders/authentication-permissions-and-global-configuration.md index 98494a94cf90..ae1a7e2fdd4b 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/authentication-permissions-and-global-configuration.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/authentication-permissions-and-global-configuration.md @@ -82,4 +82,4 @@ The following features are available under **Global Configuration**: - **Global DNS Entries** - **Global DNS Providers** -As these are legacy features, please see the Rancher v2.0—v2.4 docs on [catalogs](../../versioned_docs/v2.0-v2.4/pages-for-subheaders/helm-charts-in-rancher.md), [global DNS entries](../../versioned_docs/v2.0-v2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#adding-a-global-dns-entry), and [global DNS providers](../../versioned_docs/v2.0-v2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#editing-a-global-dns-provider) for more details. \ No newline at end of file +As these are legacy features, please see the Rancher v2.0—v2.4 docs on [catalogs](../../version-2.0-2.4/pages-for-subheaders/helm-charts-in-rancher.md), [global DNS entries](../../version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#adding-a-global-dns-entry), and [global DNS providers](../../version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#editing-a-global-dns-provider) for more details. \ No newline at end of file diff --git a/versioned_docs/version-2.7/pages-for-subheaders/monitoring-and-alerting.md b/versioned_docs/version-2.7/pages-for-subheaders/monitoring-and-alerting.md index d9d00e1aa4eb..a4d38ca86dd5 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/monitoring-and-alerting.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/monitoring-and-alerting.md @@ -11,7 +11,7 @@ The `rancher-monitoring` application can quickly deploy leading open-source moni Introduced in Rancher v2.5, the application is powered by [Prometheus](https://prometheus.io/), [Grafana](https://grafana.com/grafana/), [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/), the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator), and the [Prometheus adapter.](https://github.com/DirectXMan12/k8s-prometheus-adapter) -For information on V1 monitoring and alerting, available in Rancher v2.2 up to v2.4, please see the Rancher v2.0—v2.4 docs on [cluster monitoring](../../versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-monitoring.md), [alerting](../../versioned_docs/v2.0-v2.4/pages-for-subheaders/cluster-alerts.md), [notifiers](../../versioned_docs/v2.0-v2.4/explanations/integrations-in-rancher/notifiers) and other [tools](../../versioned_docs/v2.0-v2.4/pages-for-subheaders/project-tools.md). +For information on V1 monitoring and alerting, available in Rancher v2.2 up to v2.4, please see the Rancher v2.0—v2.4 docs on [cluster monitoring](../../version-2.0-2.4/pages-for-subheaders/cluster-monitoring.md), [alerting](../../version-2.0-2.4/pages-for-subheaders/cluster-alerts.md), [notifiers](../../version-2.0-2.4/explanations/integrations-in-rancher/notifiers.md) and other [tools](../../version-2.0-2.4/pages-for-subheaders/project-tools.md). Using the `rancher-monitoring` application, you can quickly deploy leading open-source monitoring and alerting solutions onto your cluster. diff --git a/versioned_docs/version-2.7/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md b/versioned_docs/version-2.7/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md index c79e1b8c27e3..61a87f558d8e 100644 --- a/versioned_docs/version-2.7/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md +++ b/versioned_docs/version-2.7/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md @@ -129,4 +129,4 @@ The GitHub repositories for Rancher can be found at the following links: - [Rancher CLI](https://github.com/rancher/cli) - [Catalog applications](https://github.com/rancher/helm) -This is a partial list of the most important Rancher repositories. For more details about Rancher source code, refer to the section on [contributing to Rancher.](../../contribute-to-rancher.md#repositories) To see all libraries and projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. \ No newline at end of file +This is a partial list of the most important Rancher repositories. For more details about Rancher source code, refer to the section on [contributing to Rancher.](../../contribute-to-rancher.md#rancher-repositories) To see all libraries and projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. \ No newline at end of file From d5e9e0d11169c7ce1b4694c77931650f215c97f5 Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Fri, 8 Sep 2023 10:37:00 -0400 Subject: [PATCH 34/54] #748 Update info about which K8s distros can enable ACE (#749) * 748 Update info about which K8s distros can enable ACE * rephrase, typo fix, links, acronyms * avilable > available * Rancher, not Kubernetes * Apply suggestions from code review Co-authored-by: Brad Davidson Co-authored-by: Billy Tat * added missing 2.6 page * ace available on imported clusters as well as rancher-provisioned --------- Co-authored-by: Brad Davidson Co-authored-by: Billy Tat --- .../rke1-cluster-configuration.md | 6 +++--- .../communicating-with-downstream-user-clusters.md | 5 +++-- .../rke1-cluster-configuration.md | 6 +++--- .../communicating-with-downstream-user-clusters.md | 5 +++-- .../rke1-cluster-configuration.md | 6 +++--- .../communicating-with-downstream-user-clusters.md | 4 ++-- 6 files changed, 17 insertions(+), 15 deletions(-) diff --git a/docs/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md b/docs/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md index 43d52dc866e9..94c3b2c34423 100644 --- a/docs/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md +++ b/docs/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md @@ -119,15 +119,15 @@ Rancher v2.6 introduced the ability to configure [ECR registries for RKE cluster ### Authorized Cluster Endpoint -Authorized Cluster Endpoint can be used to directly access the Kubernetes API server, without requiring communication through Rancher. +Authorized Cluster Endpoint (ACE) can be used to directly access the Kubernetes API server, without requiring communication through Rancher. :::note -The authorized cluster endpoint only works on Rancher-launched Kubernetes clusters. In other words, it only works in clusters where Rancher [used RKE](../../../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#tools-for-provisioning-kubernetes-clusters) to provision the cluster. It is not available for clusters in a hosted Kubernetes provider, such as Amazon's EKS. +ACE is available on RKE, RKE2, and K3s clusters that are provisioned or registered with Rancher. It's not available on clusters in a hosted Kubernetes provider, such as Amazon's EKS. ::: -This is enabled by default in Rancher-launched Kubernetes clusters, using the IP of the node with the `controlplane` role and the default Kubernetes self signed certificates. +ACE must be set up [manually](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters#authorized-cluster-endpoint-support-for-rke2-and-k3s-clusters.md) on RKE2 and K3s clusters. In RKE, ACE is enabled by default in Rancher-launched Kubernetes clusters, using the IP of the node with the `controlplane` role and the default Kubernetes self-signed certificates. For more detail on how an authorized cluster endpoint works and why it is used, refer to the [architecture section.](../../../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#4-authorized-cluster-endpoint) diff --git a/docs/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md b/docs/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md index c79e1b8c27e3..72b3ed3f8148 100644 --- a/docs/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md +++ b/docs/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md @@ -56,9 +56,10 @@ The `cattle-node-agent` is deployed using a [DaemonSet](https://kubernetes.io/do ### 4. Authorized Cluster Endpoint -An authorized cluster endpoint allows users to connect to the Kubernetes API server of a downstream cluster without having to route their requests through the Rancher authentication proxy. +An authorized cluster endpoint (ACE) allows users to connect to the Kubernetes API server of a downstream cluster without having to route their requests through the Rancher authentication proxy. + +> ACE is available on RKE, RKE2, and K3s clusters that are provisioned or registered with Rancher. It's not available on clusters in a hosted Kubernetes provider, such as Amazon's EKS. -> The authorized cluster endpoint only works on Rancher-launched Kubernetes clusters. In other words, it only works in clusters where Rancher [used RKE](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) to provision the cluster. It is not available for imported clusters, or for clusters in a hosted Kubernetes provider, such as Amazon's EKS. There are two main reasons why a user might need the authorized cluster endpoint: diff --git a/versioned_docs/version-2.6/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md b/versioned_docs/version-2.6/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md index 3e66b99e6f81..0b6e10c248ce 100644 --- a/versioned_docs/version-2.6/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md +++ b/versioned_docs/version-2.6/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md @@ -119,15 +119,15 @@ Rancher v2.6 introduced the ability to configure [ECR registries for RKE cluster ### Authorized Cluster Endpoint -Authorized Cluster Endpoint can be used to directly access the Kubernetes API server, without requiring communication through Rancher. +Authorized Cluster Endpoint (ACE) can be used to directly access the Kubernetes API server, without requiring communication through Rancher. :::note -The authorized cluster endpoint only works on Rancher-launched Kubernetes clusters. In other words, it only works in clusters where Rancher [used RKE](../../../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#tools-for-provisioning-kubernetes-clusters) to provision the cluster. It is not available for clusters in a hosted Kubernetes provider, such as Amazon's EKS. +ACE is available on Kubernetes clusters provisioned by or registered with Rancher. In Rancher v2.6.3 and later, RKE, RKE2, and K3s all support ACE. Prior to Rancher v2.6.3, it was only available for RKE. Regardless of version, ACE isn't available on clusters in a hosted Kubernetes provider, such as Amazon's EKS. ::: -This is enabled by default in Rancher-launched Kubernetes clusters, using the IP of the node with the `controlplane` role and the default Kubernetes self signed certificates. +ACE must be set up [manually](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters#authorized-cluster-endpoint-support-for-rke2-and-k3s-clusters.md) on RKE2 and K3s clusters in Rancher v2.6.3 and later. In RKE, ACE is enabled by default in Rancher-launched Kubernetes clusters, using the IP of the node with the `controlplane` role and the default Kubernetes self-signed certificates. For more detail on how an authorized cluster endpoint works and why it is used, refer to the [architecture section.](../../../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#4-authorized-cluster-endpoint) diff --git a/versioned_docs/version-2.6/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md b/versioned_docs/version-2.6/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md index c79e1b8c27e3..72b3ed3f8148 100644 --- a/versioned_docs/version-2.6/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md +++ b/versioned_docs/version-2.6/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md @@ -56,9 +56,10 @@ The `cattle-node-agent` is deployed using a [DaemonSet](https://kubernetes.io/do ### 4. Authorized Cluster Endpoint -An authorized cluster endpoint allows users to connect to the Kubernetes API server of a downstream cluster without having to route their requests through the Rancher authentication proxy. +An authorized cluster endpoint (ACE) allows users to connect to the Kubernetes API server of a downstream cluster without having to route their requests through the Rancher authentication proxy. + +> ACE is available on RKE, RKE2, and K3s clusters that are provisioned or registered with Rancher. It's not available on clusters in a hosted Kubernetes provider, such as Amazon's EKS. -> The authorized cluster endpoint only works on Rancher-launched Kubernetes clusters. In other words, it only works in clusters where Rancher [used RKE](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) to provision the cluster. It is not available for imported clusters, or for clusters in a hosted Kubernetes provider, such as Amazon's EKS. There are two main reasons why a user might need the authorized cluster endpoint: diff --git a/versioned_docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md b/versioned_docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md index 43d52dc866e9..0d03db56bb13 100644 --- a/versioned_docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md +++ b/versioned_docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md @@ -119,15 +119,15 @@ Rancher v2.6 introduced the ability to configure [ECR registries for RKE cluster ### Authorized Cluster Endpoint -Authorized Cluster Endpoint can be used to directly access the Kubernetes API server, without requiring communication through Rancher. +Authorized Cluster Endpoint (ACE) can be used to directly access the Kubernetes API server, without requiring communication through Rancher. :::note -The authorized cluster endpoint only works on Rancher-launched Kubernetes clusters. In other words, it only works in clusters where Rancher [used RKE](../../../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#tools-for-provisioning-kubernetes-clusters) to provision the cluster. It is not available for clusters in a hosted Kubernetes provider, such as Amazon's EKS. +ACE is available on RKE, RKE2, and K3s clusters that are provisioned or registered with Rancher. It's not available on clusters in a hosted Kubernetes provider, such as Amazon's EKS. ::: -This is enabled by default in Rancher-launched Kubernetes clusters, using the IP of the node with the `controlplane` role and the default Kubernetes self signed certificates. +ACE must be set up [manually](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters#authorized-cluster-endpoint-support-for-rke2-and-k3s-clusters.md) on RKE2 and K3s clusters. In RKE, ACE is enabled by default in Rancher-launched Kubernetes clusters, using the IP of the node with the `controlplane` role and the default Kubernetes self-signed certificates. For more detail on how an authorized cluster endpoint works and why it is used, refer to the [architecture section.](../../../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#4-authorized-cluster-endpoint) diff --git a/versioned_docs/version-2.7/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md b/versioned_docs/version-2.7/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md index c79e1b8c27e3..9441bdcea543 100644 --- a/versioned_docs/version-2.7/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md +++ b/versioned_docs/version-2.7/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md @@ -56,9 +56,9 @@ The `cattle-node-agent` is deployed using a [DaemonSet](https://kubernetes.io/do ### 4. Authorized Cluster Endpoint -An authorized cluster endpoint allows users to connect to the Kubernetes API server of a downstream cluster without having to route their requests through the Rancher authentication proxy. +An authorized cluster endpoint (ACE) allows users to connect to the Kubernetes API server of a downstream cluster without having to route their requests through the Rancher authentication proxy. -> The authorized cluster endpoint only works on Rancher-launched Kubernetes clusters. In other words, it only works in clusters where Rancher [used RKE](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) to provision the cluster. It is not available for imported clusters, or for clusters in a hosted Kubernetes provider, such as Amazon's EKS. +> ACE is available on RKE, RKE2, and K3s clusters that are provisioned or registered with Rancher. It's not available on clusters in a hosted Kubernetes provider, such as Amazon's EKS. There are two main reasons why a user might need the authorized cluster endpoint: From a2952cd469a50ff4df80dc96033b9855ede77839 Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Fri, 8 Sep 2023 15:35:01 -0400 Subject: [PATCH 35/54] #420 canonical links for Troubleshooting (#835) * canonicized troubleshooting/kubernetes-components * canonicized troubleshooting/other-troubleshooting-tips * canonicized troubleshooting * files for 2.0-2.4, and 2.5 --- docs/troubleshooting/general-troubleshooting.md | 4 ++++ .../troubleshooting-controlplane-nodes.md | 4 ++++ .../kubernetes-components/troubleshooting-etcd-nodes.md | 4 ++++ .../kubernetes-components/troubleshooting-nginx-proxy.md | 4 ++++ .../troubleshooting-worker-nodes-and-generic-components.md | 4 ++++ docs/troubleshooting/other-troubleshooting-tips/dns.md | 4 ++++ .../expired-webhook-certificate-rotation.md | 4 ++++ .../other-troubleshooting-tips/kubernetes-resources.md | 4 ++++ docs/troubleshooting/other-troubleshooting-tips/logging.md | 4 ++++ docs/troubleshooting/other-troubleshooting-tips/networking.md | 4 ++++ docs/troubleshooting/other-troubleshooting-tips/rancher-ha.md | 4 ++++ .../other-troubleshooting-tips/registered-clusters.md | 4 ++++ .../user-id-tracking-in-audit-logs.md | 4 ++++ versioned_docs/version-2.0-2.4/troubleshooting.md | 4 ++++ .../troubleshooting-controlplane-nodes.md | 4 ++++ .../kubernetes-components/troubleshooting-etcd-nodes.md | 4 ++++ .../kubernetes-components/troubleshooting-nginx-proxy.md | 4 ++++ .../troubleshooting-worker-nodes-and-generic-components.md | 4 ++++ .../troubleshooting/other-troubleshooting-tips/dns.md | 4 ++++ .../other-troubleshooting-tips/kubernetes-resources.md | 4 ++++ .../troubleshooting/other-troubleshooting-tips/logging.md | 4 ++++ .../troubleshooting/other-troubleshooting-tips/networking.md | 4 ++++ .../troubleshooting/other-troubleshooting-tips/rancher-ha.md | 4 ++++ .../other-troubleshooting-tips/registered-clusters.md | 4 ++++ versioned_docs/version-2.5/troubleshooting.md | 4 ++++ .../troubleshooting-controlplane-nodes.md | 4 ++++ .../kubernetes-components/troubleshooting-etcd-nodes.md | 4 ++++ .../kubernetes-components/troubleshooting-nginx-proxy.md | 4 ++++ .../troubleshooting-worker-nodes-and-generic-components.md | 4 ++++ .../troubleshooting/other-troubleshooting-tips/dns.md | 4 ++++ .../expired-webhook-certificate-rotation.md | 4 ++++ .../other-troubleshooting-tips/kubernetes-resources.md | 4 ++++ .../troubleshooting/other-troubleshooting-tips/logging.md | 4 ++++ .../troubleshooting/other-troubleshooting-tips/networking.md | 4 ++++ .../troubleshooting/other-troubleshooting-tips/rancher-ha.md | 4 ++++ .../other-troubleshooting-tips/registered-clusters.md | 4 ++++ .../version-2.6/troubleshooting/general-troubleshooting.md | 4 ++++ .../troubleshooting-controlplane-nodes.md | 4 ++++ .../kubernetes-components/troubleshooting-etcd-nodes.md | 4 ++++ .../kubernetes-components/troubleshooting-nginx-proxy.md | 4 ++++ .../troubleshooting-worker-nodes-and-generic-components.md | 4 ++++ .../troubleshooting/other-troubleshooting-tips/dns.md | 4 ++++ .../expired-webhook-certificate-rotation.md | 4 ++++ .../other-troubleshooting-tips/kubernetes-resources.md | 4 ++++ .../troubleshooting/other-troubleshooting-tips/logging.md | 4 ++++ .../troubleshooting/other-troubleshooting-tips/networking.md | 4 ++++ .../troubleshooting/other-troubleshooting-tips/rancher-ha.md | 4 ++++ .../other-troubleshooting-tips/registered-clusters.md | 4 ++++ .../user-id-tracking-in-audit-logs.md | 4 ++++ .../version-2.7/troubleshooting/general-troubleshooting.md | 4 ++++ .../troubleshooting-controlplane-nodes.md | 4 ++++ .../kubernetes-components/troubleshooting-etcd-nodes.md | 4 ++++ .../kubernetes-components/troubleshooting-nginx-proxy.md | 4 ++++ .../troubleshooting-worker-nodes-and-generic-components.md | 4 ++++ .../troubleshooting/other-troubleshooting-tips/dns.md | 4 ++++ .../expired-webhook-certificate-rotation.md | 4 ++++ .../other-troubleshooting-tips/kubernetes-resources.md | 4 ++++ .../troubleshooting/other-troubleshooting-tips/logging.md | 4 ++++ .../troubleshooting/other-troubleshooting-tips/networking.md | 4 ++++ .../troubleshooting/other-troubleshooting-tips/rancher-ha.md | 4 ++++ .../other-troubleshooting-tips/registered-clusters.md | 4 ++++ .../user-id-tracking-in-audit-logs.md | 4 ++++ 62 files changed, 248 insertions(+) diff --git a/docs/troubleshooting/general-troubleshooting.md b/docs/troubleshooting/general-troubleshooting.md index 4b28735357ba..77ab7e248061 100644 --- a/docs/troubleshooting/general-troubleshooting.md +++ b/docs/troubleshooting/general-troubleshooting.md @@ -2,6 +2,10 @@ title: General Troubleshooting --- + + + + This section contains information to help you troubleshoot issues when using Rancher. - [Kubernetes components](../pages-for-subheaders/kubernetes-components.md) diff --git a/docs/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md b/docs/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md index 2d352dd7bc54..84877bc8fab0 100644 --- a/docs/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md +++ b/docs/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md @@ -2,6 +2,10 @@ title: Troubleshooting Controlplane Nodes --- + + + + This section applies to nodes with the `controlplane` role. ## Check if the Controlplane Containers are Running diff --git a/docs/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md b/docs/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md index 10936b96d628..9785d8e4f680 100644 --- a/docs/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md +++ b/docs/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md @@ -2,6 +2,10 @@ title: Troubleshooting etcd Nodes --- + + + + This section contains commands and tips for troubleshooting nodes with the `etcd` role. diff --git a/docs/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md b/docs/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md index 08d71c4c0286..6c381ee5bb0a 100644 --- a/docs/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md +++ b/docs/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md @@ -2,6 +2,10 @@ title: Troubleshooting nginx-proxy --- + + + + The `nginx-proxy` container is deployed on every node that does not have the `controlplane` role. It provides access to all the nodes with the `controlplane` role by dynamically generating the NGINX configuration based on available nodes with the `controlplane` role. ## Check if the Container is Running diff --git a/docs/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md b/docs/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md index 66c36f68bb73..5a5de6c01563 100644 --- a/docs/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md +++ b/docs/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md @@ -2,6 +2,10 @@ title: Troubleshooting Worker Nodes and Generic Components --- + + + + This section applies to every node as it includes components that run on nodes with any role. ## Check if the Containers are Running diff --git a/docs/troubleshooting/other-troubleshooting-tips/dns.md b/docs/troubleshooting/other-troubleshooting-tips/dns.md index 2d8c9218ab98..af1108b6f80b 100644 --- a/docs/troubleshooting/other-troubleshooting-tips/dns.md +++ b/docs/troubleshooting/other-troubleshooting-tips/dns.md @@ -2,6 +2,10 @@ title: DNS --- + + + + The commands/steps listed on this page can be used to check name resolution issues in your cluster. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. diff --git a/docs/troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md b/docs/troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md index 72b643e8a7aa..106479c0bb71 100644 --- a/docs/troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md +++ b/docs/troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md @@ -2,6 +2,10 @@ title: Rotation of Expired Webhook Certificates --- + + + + For Rancher versions that have `rancher-webhook` installed, certain versions created certificates that will expire after one year. It will be necessary for you to rotate your webhook certificate if the certificate did not renew. In Rancher v2.6.3 and up, rancher-webhook deployments will automatically renew their TLS certificate when it is within 30 or fewer days of its expiration date. If you are using v2.6.2 or below, there are two methods to work around this issue: diff --git a/docs/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md b/docs/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md index 788c5fce5732..f1e6b8f8594d 100644 --- a/docs/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md +++ b/docs/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md @@ -2,6 +2,10 @@ title: Kubernetes Resources --- + + + + The commands/steps listed on this page can be used to check the most important Kubernetes resources and apply to [Rancher Launched Kubernetes](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) clusters. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. diff --git a/docs/troubleshooting/other-troubleshooting-tips/logging.md b/docs/troubleshooting/other-troubleshooting-tips/logging.md index 1c539e92c1dc..d4d6353442e1 100644 --- a/docs/troubleshooting/other-troubleshooting-tips/logging.md +++ b/docs/troubleshooting/other-troubleshooting-tips/logging.md @@ -2,6 +2,10 @@ title: Logging --- + + + + ## Log levels The following log levels are used in Rancher: diff --git a/docs/troubleshooting/other-troubleshooting-tips/networking.md b/docs/troubleshooting/other-troubleshooting-tips/networking.md index 5ac7962fd00a..bea850ffe13f 100644 --- a/docs/troubleshooting/other-troubleshooting-tips/networking.md +++ b/docs/troubleshooting/other-troubleshooting-tips/networking.md @@ -2,6 +2,10 @@ title: Networking --- + + + + The commands/steps listed on this page can be used to check networking related issues in your cluster. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. diff --git a/docs/troubleshooting/other-troubleshooting-tips/rancher-ha.md b/docs/troubleshooting/other-troubleshooting-tips/rancher-ha.md index 6bfef988bd44..8917f80da4da 100644 --- a/docs/troubleshooting/other-troubleshooting-tips/rancher-ha.md +++ b/docs/troubleshooting/other-troubleshooting-tips/rancher-ha.md @@ -2,6 +2,10 @@ title: Rancher HA --- + + + + The commands/steps listed on this page can be used to check your Rancher Kubernetes Installation. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml`). diff --git a/docs/troubleshooting/other-troubleshooting-tips/registered-clusters.md b/docs/troubleshooting/other-troubleshooting-tips/registered-clusters.md index 6416333cd09e..cce0e089621f 100644 --- a/docs/troubleshooting/other-troubleshooting-tips/registered-clusters.md +++ b/docs/troubleshooting/other-troubleshooting-tips/registered-clusters.md @@ -2,6 +2,10 @@ title: Registered Clusters --- + + + + The commands/steps listed on this page can be used to check clusters that you are registering or that are registered in Rancher. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kubeconfig_from_imported_cluster.yml`) diff --git a/docs/troubleshooting/other-troubleshooting-tips/user-id-tracking-in-audit-logs.md b/docs/troubleshooting/other-troubleshooting-tips/user-id-tracking-in-audit-logs.md index fe2d380f853e..6a25ae1565e9 100644 --- a/docs/troubleshooting/other-troubleshooting-tips/user-id-tracking-in-audit-logs.md +++ b/docs/troubleshooting/other-troubleshooting-tips/user-id-tracking-in-audit-logs.md @@ -2,6 +2,10 @@ title: User ID Tracking in Audit Logs --- + + + + The following audit logs are used in Rancher to track events occuring on the local and downstream clusters: * [Kubernetes Audit Logs](https://rancher.com/docs/rke/latest/en/config-options/audit-log/) diff --git a/versioned_docs/version-2.0-2.4/troubleshooting.md b/versioned_docs/version-2.0-2.4/troubleshooting.md index 07ae43668ace..fbee6cef33c9 100644 --- a/versioned_docs/version-2.0-2.4/troubleshooting.md +++ b/versioned_docs/version-2.0-2.4/troubleshooting.md @@ -2,6 +2,10 @@ title: Troubleshooting --- + + + + This section contains information to help you troubleshoot issues when using Rancher. - [Kubernetes components](pages-for-subheaders/kubernetes-components.md) diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md b/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md index 3680d702ecb4..890acddcaa43 100644 --- a/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md +++ b/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md @@ -2,6 +2,10 @@ title: Troubleshooting Controlplane Nodes --- + + + + This section applies to nodes with the `controlplane` role. ## Check if the Controlplane Containers are Running diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md b/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md index aaf9fcc493d4..0317b885def3 100644 --- a/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md +++ b/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md @@ -2,6 +2,10 @@ title: Troubleshooting etcd Nodes --- + + + + This section contains commands and tips for troubleshooting nodes with the `etcd` role. diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md b/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md index 08d71c4c0286..6c381ee5bb0a 100644 --- a/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md +++ b/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md @@ -2,6 +2,10 @@ title: Troubleshooting nginx-proxy --- + + + + The `nginx-proxy` container is deployed on every node that does not have the `controlplane` role. It provides access to all the nodes with the `controlplane` role by dynamically generating the NGINX configuration based on available nodes with the `controlplane` role. ## Check if the Container is Running diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md b/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md index 66c36f68bb73..5a5de6c01563 100644 --- a/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md +++ b/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md @@ -2,6 +2,10 @@ title: Troubleshooting Worker Nodes and Generic Components --- + + + + This section applies to every node as it includes components that run on nodes with any role. ## Check if the Containers are Running diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/dns.md b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/dns.md index c3accc96b208..31e342dc21e1 100644 --- a/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/dns.md +++ b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/dns.md @@ -2,6 +2,10 @@ title: DNS --- + + + + The commands/steps listed on this page can be used to check name resolution issues in your cluster. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md index cbc265b8b20b..b18d96407900 100644 --- a/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md +++ b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md @@ -2,6 +2,10 @@ title: Kubernetes resources --- + + + + The commands/steps listed on this page can be used to check the most important Kubernetes resources and apply to [Rancher Launched Kubernetes](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) clusters. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/logging.md b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/logging.md index 0d0ab290de57..be78487ddfc2 100644 --- a/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/logging.md +++ b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/logging.md @@ -2,6 +2,10 @@ title: Logging --- + + + + The following log levels are used in Rancher: | Name | Description | diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/networking.md b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/networking.md index 5e9656d09bbf..f1bffa50c6d0 100644 --- a/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/networking.md +++ b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/networking.md @@ -2,6 +2,10 @@ title: Networking --- + + + + The commands/steps listed on this page can be used to check networking related issues in your cluster. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/rancher-ha.md b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/rancher-ha.md index f1db2d953291..3ce3d3881bc4 100644 --- a/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/rancher-ha.md +++ b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/rancher-ha.md @@ -2,6 +2,10 @@ title: Rancher HA --- + + + + The commands/steps listed on this page can be used to check your Rancher Kubernetes Installation. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml`). diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/registered-clusters.md b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/registered-clusters.md index 063c35a38b63..0df5c0aa0f84 100644 --- a/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/registered-clusters.md +++ b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/registered-clusters.md @@ -2,6 +2,10 @@ title: Imported clusters --- + + + + The commands/steps listed on this page can be used to check clusters that you are importing or that are imported in Rancher. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kubeconfig_from_imported_cluster.yml`) diff --git a/versioned_docs/version-2.5/troubleshooting.md b/versioned_docs/version-2.5/troubleshooting.md index 426109d6a3a7..c2a257d54ca7 100644 --- a/versioned_docs/version-2.5/troubleshooting.md +++ b/versioned_docs/version-2.5/troubleshooting.md @@ -2,6 +2,10 @@ title: Troubleshooting --- + + + + This section contains information to help you troubleshoot issues when using Rancher. - [Kubernetes components](pages-for-subheaders/kubernetes-components.md) diff --git a/versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md b/versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md index 3680d702ecb4..890acddcaa43 100644 --- a/versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md +++ b/versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md @@ -2,6 +2,10 @@ title: Troubleshooting Controlplane Nodes --- + + + + This section applies to nodes with the `controlplane` role. ## Check if the Controlplane Containers are Running diff --git a/versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md b/versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md index aaf9fcc493d4..0317b885def3 100644 --- a/versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md +++ b/versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md @@ -2,6 +2,10 @@ title: Troubleshooting etcd Nodes --- + + + + This section contains commands and tips for troubleshooting nodes with the `etcd` role. diff --git a/versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md b/versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md index 08d71c4c0286..6c381ee5bb0a 100644 --- a/versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md +++ b/versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md @@ -2,6 +2,10 @@ title: Troubleshooting nginx-proxy --- + + + + The `nginx-proxy` container is deployed on every node that does not have the `controlplane` role. It provides access to all the nodes with the `controlplane` role by dynamically generating the NGINX configuration based on available nodes with the `controlplane` role. ## Check if the Container is Running diff --git a/versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md b/versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md index 66c36f68bb73..5a5de6c01563 100644 --- a/versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md +++ b/versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md @@ -2,6 +2,10 @@ title: Troubleshooting Worker Nodes and Generic Components --- + + + + This section applies to every node as it includes components that run on nodes with any role. ## Check if the Containers are Running diff --git a/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/dns.md b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/dns.md index 5d12cb313cc7..255df3440166 100644 --- a/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/dns.md +++ b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/dns.md @@ -2,6 +2,10 @@ title: DNS --- + + + + The commands/steps listed on this page can be used to check name resolution issues in your cluster. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. diff --git a/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md index bd090b57a55a..601d062984df 100644 --- a/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md +++ b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md @@ -2,6 +2,10 @@ title: Rotation of Expired Webhook Certificates --- + + + + For Rancher versions that have `rancher-webhook` installed, certain versions created certificates that will expire after one year. It will be necessary for you to rotate your webhook certificate if the certificate did not renew. In Rancher v2.5.12 and up, rancher-webhook deployments will automatically renew their TLS certificate when it is within 30 or fewer days of its expiration date. If you are using v2.5.11 or below, there are two methods to work around this issue: diff --git a/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md index ce5fd3c661c3..6682b27d5089 100644 --- a/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md +++ b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md @@ -2,6 +2,10 @@ title: Kubernetes resources --- + + + + The commands/steps listed on this page can be used to check the most important Kubernetes resources and apply to [Rancher Launched Kubernetes](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) clusters. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. diff --git a/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/logging.md b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/logging.md index 3cf2967d7e3d..04930414ce28 100644 --- a/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/logging.md +++ b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/logging.md @@ -2,6 +2,10 @@ title: Logging --- + + + + The following log levels are used in Rancher: | Name | Description | diff --git a/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/networking.md b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/networking.md index 629a18bb1279..9427ae11f9e2 100644 --- a/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/networking.md +++ b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/networking.md @@ -2,6 +2,10 @@ title: Networking --- + + + + The commands/steps listed on this page can be used to check networking related issues in your cluster. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. diff --git a/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/rancher-ha.md b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/rancher-ha.md index 6bfef988bd44..8917f80da4da 100644 --- a/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/rancher-ha.md +++ b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/rancher-ha.md @@ -2,6 +2,10 @@ title: Rancher HA --- + + + + The commands/steps listed on this page can be used to check your Rancher Kubernetes Installation. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml`). diff --git a/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/registered-clusters.md b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/registered-clusters.md index fcea8fb66341..b8a354cf0fee 100644 --- a/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/registered-clusters.md +++ b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/registered-clusters.md @@ -2,6 +2,10 @@ title: Registered clusters --- + + + + The commands/steps listed on this page can be used to check clusters that you are registering or that are registered in Rancher. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kubeconfig_from_imported_cluster.yml`) diff --git a/versioned_docs/version-2.6/troubleshooting/general-troubleshooting.md b/versioned_docs/version-2.6/troubleshooting/general-troubleshooting.md index 4b28735357ba..77ab7e248061 100644 --- a/versioned_docs/version-2.6/troubleshooting/general-troubleshooting.md +++ b/versioned_docs/version-2.6/troubleshooting/general-troubleshooting.md @@ -2,6 +2,10 @@ title: General Troubleshooting --- + + + + This section contains information to help you troubleshoot issues when using Rancher. - [Kubernetes components](../pages-for-subheaders/kubernetes-components.md) diff --git a/versioned_docs/version-2.6/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md b/versioned_docs/version-2.6/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md index 2d352dd7bc54..84877bc8fab0 100644 --- a/versioned_docs/version-2.6/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md +++ b/versioned_docs/version-2.6/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md @@ -2,6 +2,10 @@ title: Troubleshooting Controlplane Nodes --- + + + + This section applies to nodes with the `controlplane` role. ## Check if the Controlplane Containers are Running diff --git a/versioned_docs/version-2.6/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md b/versioned_docs/version-2.6/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md index aaf9fcc493d4..0317b885def3 100644 --- a/versioned_docs/version-2.6/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md +++ b/versioned_docs/version-2.6/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md @@ -2,6 +2,10 @@ title: Troubleshooting etcd Nodes --- + + + + This section contains commands and tips for troubleshooting nodes with the `etcd` role. diff --git a/versioned_docs/version-2.6/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md b/versioned_docs/version-2.6/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md index 08d71c4c0286..6c381ee5bb0a 100644 --- a/versioned_docs/version-2.6/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md +++ b/versioned_docs/version-2.6/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md @@ -2,6 +2,10 @@ title: Troubleshooting nginx-proxy --- + + + + The `nginx-proxy` container is deployed on every node that does not have the `controlplane` role. It provides access to all the nodes with the `controlplane` role by dynamically generating the NGINX configuration based on available nodes with the `controlplane` role. ## Check if the Container is Running diff --git a/versioned_docs/version-2.6/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md b/versioned_docs/version-2.6/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md index 66c36f68bb73..5a5de6c01563 100644 --- a/versioned_docs/version-2.6/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md +++ b/versioned_docs/version-2.6/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md @@ -2,6 +2,10 @@ title: Troubleshooting Worker Nodes and Generic Components --- + + + + This section applies to every node as it includes components that run on nodes with any role. ## Check if the Containers are Running diff --git a/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/dns.md b/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/dns.md index 2d8c9218ab98..af1108b6f80b 100644 --- a/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/dns.md +++ b/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/dns.md @@ -2,6 +2,10 @@ title: DNS --- + + + + The commands/steps listed on this page can be used to check name resolution issues in your cluster. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. diff --git a/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md b/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md index 72b643e8a7aa..106479c0bb71 100644 --- a/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md +++ b/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md @@ -2,6 +2,10 @@ title: Rotation of Expired Webhook Certificates --- + + + + For Rancher versions that have `rancher-webhook` installed, certain versions created certificates that will expire after one year. It will be necessary for you to rotate your webhook certificate if the certificate did not renew. In Rancher v2.6.3 and up, rancher-webhook deployments will automatically renew their TLS certificate when it is within 30 or fewer days of its expiration date. If you are using v2.6.2 or below, there are two methods to work around this issue: diff --git a/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md b/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md index 788c5fce5732..f1e6b8f8594d 100644 --- a/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md +++ b/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md @@ -2,6 +2,10 @@ title: Kubernetes Resources --- + + + + The commands/steps listed on this page can be used to check the most important Kubernetes resources and apply to [Rancher Launched Kubernetes](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) clusters. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. diff --git a/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/logging.md b/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/logging.md index 1c539e92c1dc..d4d6353442e1 100644 --- a/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/logging.md +++ b/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/logging.md @@ -2,6 +2,10 @@ title: Logging --- + + + + ## Log levels The following log levels are used in Rancher: diff --git a/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/networking.md b/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/networking.md index 5ac7962fd00a..bea850ffe13f 100644 --- a/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/networking.md +++ b/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/networking.md @@ -2,6 +2,10 @@ title: Networking --- + + + + The commands/steps listed on this page can be used to check networking related issues in your cluster. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. diff --git a/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/rancher-ha.md b/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/rancher-ha.md index 6bfef988bd44..8917f80da4da 100644 --- a/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/rancher-ha.md +++ b/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/rancher-ha.md @@ -2,6 +2,10 @@ title: Rancher HA --- + + + + The commands/steps listed on this page can be used to check your Rancher Kubernetes Installation. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml`). diff --git a/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/registered-clusters.md b/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/registered-clusters.md index 6416333cd09e..cce0e089621f 100644 --- a/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/registered-clusters.md +++ b/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/registered-clusters.md @@ -2,6 +2,10 @@ title: Registered Clusters --- + + + + The commands/steps listed on this page can be used to check clusters that you are registering or that are registered in Rancher. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kubeconfig_from_imported_cluster.yml`) diff --git a/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/user-id-tracking-in-audit-logs.md b/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/user-id-tracking-in-audit-logs.md index fe2d380f853e..6a25ae1565e9 100644 --- a/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/user-id-tracking-in-audit-logs.md +++ b/versioned_docs/version-2.6/troubleshooting/other-troubleshooting-tips/user-id-tracking-in-audit-logs.md @@ -2,6 +2,10 @@ title: User ID Tracking in Audit Logs --- + + + + The following audit logs are used in Rancher to track events occuring on the local and downstream clusters: * [Kubernetes Audit Logs](https://rancher.com/docs/rke/latest/en/config-options/audit-log/) diff --git a/versioned_docs/version-2.7/troubleshooting/general-troubleshooting.md b/versioned_docs/version-2.7/troubleshooting/general-troubleshooting.md index 4b28735357ba..77ab7e248061 100644 --- a/versioned_docs/version-2.7/troubleshooting/general-troubleshooting.md +++ b/versioned_docs/version-2.7/troubleshooting/general-troubleshooting.md @@ -2,6 +2,10 @@ title: General Troubleshooting --- + + + + This section contains information to help you troubleshoot issues when using Rancher. - [Kubernetes components](../pages-for-subheaders/kubernetes-components.md) diff --git a/versioned_docs/version-2.7/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md b/versioned_docs/version-2.7/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md index 2d352dd7bc54..84877bc8fab0 100644 --- a/versioned_docs/version-2.7/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md +++ b/versioned_docs/version-2.7/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md @@ -2,6 +2,10 @@ title: Troubleshooting Controlplane Nodes --- + + + + This section applies to nodes with the `controlplane` role. ## Check if the Controlplane Containers are Running diff --git a/versioned_docs/version-2.7/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md b/versioned_docs/version-2.7/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md index 10936b96d628..9785d8e4f680 100644 --- a/versioned_docs/version-2.7/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md +++ b/versioned_docs/version-2.7/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md @@ -2,6 +2,10 @@ title: Troubleshooting etcd Nodes --- + + + + This section contains commands and tips for troubleshooting nodes with the `etcd` role. diff --git a/versioned_docs/version-2.7/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md b/versioned_docs/version-2.7/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md index 08d71c4c0286..6c381ee5bb0a 100644 --- a/versioned_docs/version-2.7/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md +++ b/versioned_docs/version-2.7/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md @@ -2,6 +2,10 @@ title: Troubleshooting nginx-proxy --- + + + + The `nginx-proxy` container is deployed on every node that does not have the `controlplane` role. It provides access to all the nodes with the `controlplane` role by dynamically generating the NGINX configuration based on available nodes with the `controlplane` role. ## Check if the Container is Running diff --git a/versioned_docs/version-2.7/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md b/versioned_docs/version-2.7/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md index 66c36f68bb73..5a5de6c01563 100644 --- a/versioned_docs/version-2.7/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md +++ b/versioned_docs/version-2.7/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md @@ -2,6 +2,10 @@ title: Troubleshooting Worker Nodes and Generic Components --- + + + + This section applies to every node as it includes components that run on nodes with any role. ## Check if the Containers are Running diff --git a/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/dns.md b/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/dns.md index 2d8c9218ab98..af1108b6f80b 100644 --- a/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/dns.md +++ b/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/dns.md @@ -2,6 +2,10 @@ title: DNS --- + + + + The commands/steps listed on this page can be used to check name resolution issues in your cluster. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. diff --git a/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md b/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md index 72b643e8a7aa..106479c0bb71 100644 --- a/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md +++ b/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md @@ -2,6 +2,10 @@ title: Rotation of Expired Webhook Certificates --- + + + + For Rancher versions that have `rancher-webhook` installed, certain versions created certificates that will expire after one year. It will be necessary for you to rotate your webhook certificate if the certificate did not renew. In Rancher v2.6.3 and up, rancher-webhook deployments will automatically renew their TLS certificate when it is within 30 or fewer days of its expiration date. If you are using v2.6.2 or below, there are two methods to work around this issue: diff --git a/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md b/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md index 788c5fce5732..f1e6b8f8594d 100644 --- a/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md +++ b/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md @@ -2,6 +2,10 @@ title: Kubernetes Resources --- + + + + The commands/steps listed on this page can be used to check the most important Kubernetes resources and apply to [Rancher Launched Kubernetes](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) clusters. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. diff --git a/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/logging.md b/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/logging.md index 1c539e92c1dc..d4d6353442e1 100644 --- a/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/logging.md +++ b/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/logging.md @@ -2,6 +2,10 @@ title: Logging --- + + + + ## Log levels The following log levels are used in Rancher: diff --git a/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/networking.md b/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/networking.md index 5ac7962fd00a..bea850ffe13f 100644 --- a/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/networking.md +++ b/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/networking.md @@ -2,6 +2,10 @@ title: Networking --- + + + + The commands/steps listed on this page can be used to check networking related issues in your cluster. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. diff --git a/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/rancher-ha.md b/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/rancher-ha.md index 6bfef988bd44..8917f80da4da 100644 --- a/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/rancher-ha.md +++ b/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/rancher-ha.md @@ -2,6 +2,10 @@ title: Rancher HA --- + + + + The commands/steps listed on this page can be used to check your Rancher Kubernetes Installation. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml`). diff --git a/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/registered-clusters.md b/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/registered-clusters.md index 6416333cd09e..cce0e089621f 100644 --- a/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/registered-clusters.md +++ b/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/registered-clusters.md @@ -2,6 +2,10 @@ title: Registered Clusters --- + + + + The commands/steps listed on this page can be used to check clusters that you are registering or that are registered in Rancher. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kubeconfig_from_imported_cluster.yml`) diff --git a/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/user-id-tracking-in-audit-logs.md b/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/user-id-tracking-in-audit-logs.md index fe2d380f853e..6a25ae1565e9 100644 --- a/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/user-id-tracking-in-audit-logs.md +++ b/versioned_docs/version-2.7/troubleshooting/other-troubleshooting-tips/user-id-tracking-in-audit-logs.md @@ -2,6 +2,10 @@ title: User ID Tracking in Audit Logs --- + + + + The following audit logs are used in Rancher to track events occuring on the local and downstream clusters: * [Kubernetes Audit Logs](https://rancher.com/docs/rke/latest/en/config-options/audit-log/) From 82668462be1e481bb1d8a237bf44ae0a44a5e068 Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Fri, 8 Sep 2023 16:35:41 -0400 Subject: [PATCH 36/54] #420 canonical links for FAQ (#836) * #420 canonical links for FAQ * missing files for 2.0-2.4, 2.5 --- docs/faq/container-network-interface-providers.md | 4 ++++ docs/faq/deprecated-features-in-v2.5.md | 4 ++++ docs/faq/dockershim.md | 4 ++++ docs/faq/general-faq.md | 4 ++++ docs/faq/install-and-configure-kubectl.md | 4 ++++ docs/faq/rancher-is-no-longer-needed.md | 4 ++++ docs/faq/security.md | 4 ++++ docs/faq/technical-items.md | 4 ++++ docs/faq/telemetry.md | 4 ++++ versioned_docs/version-2.0-2.4/faq.md | 4 ++++ .../faq/container-network-interface-providers.md | 4 ++++ .../version-2.0-2.4/faq/install-and-configure-kubectl.md | 4 ++++ .../version-2.0-2.4/faq/rancher-is-no-longer-needed.md | 4 ++++ versioned_docs/version-2.0-2.4/faq/security.md | 4 ++++ versioned_docs/version-2.0-2.4/faq/technical-items.md | 4 ++++ versioned_docs/version-2.0-2.4/faq/telemetry.md | 4 ++++ versioned_docs/version-2.5/faq.md | 4 ++++ .../version-2.5/faq/container-network-interface-providers.md | 4 ++++ versioned_docs/version-2.5/faq/deprecated-features-in-v2.5.md | 4 ++++ .../version-2.5/faq/install-and-configure-kubectl.md | 4 ++++ versioned_docs/version-2.5/faq/rancher-is-no-longer-needed.md | 4 ++++ versioned_docs/version-2.5/faq/security.md | 4 ++++ versioned_docs/version-2.5/faq/technical-items.md | 4 ++++ versioned_docs/version-2.5/faq/telemetry.md | 4 ++++ .../version-2.6/faq/container-network-interface-providers.md | 4 ++++ versioned_docs/version-2.6/faq/deprecated-features-in-v2.5.md | 4 ++++ versioned_docs/version-2.6/faq/dockershim.md | 4 ++++ versioned_docs/version-2.6/faq/general-faq.md | 4 ++++ .../version-2.6/faq/install-and-configure-kubectl.md | 4 ++++ versioned_docs/version-2.6/faq/rancher-is-no-longer-needed.md | 4 ++++ versioned_docs/version-2.6/faq/security.md | 4 ++++ versioned_docs/version-2.6/faq/technical-items.md | 4 ++++ versioned_docs/version-2.6/faq/telemetry.md | 4 ++++ .../version-2.7/faq/container-network-interface-providers.md | 4 ++++ versioned_docs/version-2.7/faq/deprecated-features-in-v2.5.md | 4 ++++ versioned_docs/version-2.7/faq/dockershim.md | 4 ++++ versioned_docs/version-2.7/faq/general-faq.md | 4 ++++ .../version-2.7/faq/install-and-configure-kubectl.md | 4 ++++ versioned_docs/version-2.7/faq/rancher-is-no-longer-needed.md | 4 ++++ versioned_docs/version-2.7/faq/security.md | 4 ++++ versioned_docs/version-2.7/faq/technical-items.md | 4 ++++ versioned_docs/version-2.7/faq/telemetry.md | 4 ++++ 42 files changed, 168 insertions(+) diff --git a/docs/faq/container-network-interface-providers.md b/docs/faq/container-network-interface-providers.md index b741c84f3370..490713c7b5a7 100644 --- a/docs/faq/container-network-interface-providers.md +++ b/docs/faq/container-network-interface-providers.md @@ -3,6 +3,10 @@ title: Container Network Interface (CNI) Providers description: Learn about Container Network Interface (CNI), the CNI providers Rancher provides, the features they offer, and how to choose a provider for you --- + + + + ## What is CNI? CNI (Container Network Interface), a [Cloud Native Computing Foundation project](https://cncf.io/), consists of a specification and libraries for writing plugins to configure network interfaces in Linux containers, along with a number of plugins. CNI concerns itself only with network connectivity of containers and removing allocated resources when the container is deleted. diff --git a/docs/faq/deprecated-features-in-v2.5.md b/docs/faq/deprecated-features-in-v2.5.md index d901807423b4..86313d482385 100644 --- a/docs/faq/deprecated-features-in-v2.5.md +++ b/docs/faq/deprecated-features-in-v2.5.md @@ -2,6 +2,10 @@ title: Deprecated Features in Rancher --- + + + + ### What is Rancher's Deprecation policy? We have published our official deprecation policy in the support [terms of service](https://rancher.com/support-maintenance-terms). diff --git a/docs/faq/dockershim.md b/docs/faq/dockershim.md index 0921678a728c..23b982f7b305 100644 --- a/docs/faq/dockershim.md +++ b/docs/faq/dockershim.md @@ -2,6 +2,10 @@ title: Dockershim --- + + + + The Dockershim is the CRI compliant layer between the Kubelet and the Docker daemon. As part of the Kubernetes 1.20 release, the [deprecation of the in-tree Dockershim was announced](https://kubernetes.io/blog/2020/12/02/dont-panic-kubernetes-and-docker/). Removal is currently scheduled for Kubernetes 1.24. For more information on the deprecation and its timelines, see the [Kubernetes Dockershim Deprecation FAQ](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed). RKE clusters, starting with Kubernetes 1.21, now support the external Dockershim to continue leveraging Docker as the CRI runtime. We now implement the upstream open source community Dockershim announced by [Mirantis and Docker](https://www.mirantis.com/blog/mirantis-to-take-over-support-of-kubernetes-dockershim-2/) to ensure RKE clusters can continue to leverage Docker. diff --git a/docs/faq/general-faq.md b/docs/faq/general-faq.md index 142c684f9a0e..417875a6c6c1 100644 --- a/docs/faq/general-faq.md +++ b/docs/faq/general-faq.md @@ -2,6 +2,10 @@ title: General FAQ --- + + + + This FAQ is a work in progress designed to answers the questions our users most frequently ask about Rancher v2.x. See [Technical FAQ](technical-items.md), for frequently asked technical questions. diff --git a/docs/faq/install-and-configure-kubectl.md b/docs/faq/install-and-configure-kubectl.md index 861e1b7b1eb1..9bcb56bc3d28 100644 --- a/docs/faq/install-and-configure-kubectl.md +++ b/docs/faq/install-and-configure-kubectl.md @@ -2,6 +2,10 @@ title: Installing and Configuring kubectl --- + + + + `kubectl` is a CLI utility for running commands against Kubernetes clusters. It's required for many maintenance and administrative tasks in Rancher 2.x. ### Installation diff --git a/docs/faq/rancher-is-no-longer-needed.md b/docs/faq/rancher-is-no-longer-needed.md index f3b430d5646c..ce33edd6aece 100644 --- a/docs/faq/rancher-is-no-longer-needed.md +++ b/docs/faq/rancher-is-no-longer-needed.md @@ -2,6 +2,10 @@ title: Rancher is No Longer Needed --- + + + + This page is intended to answer questions about what happens if you don't want Rancher anymore, if you don't want a cluster to be managed by Rancher anymore, or if the Rancher server is deleted. diff --git a/docs/faq/security.md b/docs/faq/security.md index 14fdc2e5a776..447fb53d7de6 100644 --- a/docs/faq/security.md +++ b/docs/faq/security.md @@ -3,6 +3,10 @@ title: Security --- + + + + **Is there a Hardening Guide?** The Hardening Guide is now located in the main [Security](../pages-for-subheaders/rancher-security.md) section. diff --git a/docs/faq/technical-items.md b/docs/faq/technical-items.md index c860dac95ccb..0aed55b5d0db 100644 --- a/docs/faq/technical-items.md +++ b/docs/faq/technical-items.md @@ -2,6 +2,10 @@ title: Technical --- + + + + ### How can I reset the administrator password? Docker Install: diff --git a/docs/faq/telemetry.md b/docs/faq/telemetry.md index f0866ac2b559..a510347e5564 100644 --- a/docs/faq/telemetry.md +++ b/docs/faq/telemetry.md @@ -2,6 +2,10 @@ title: Telemetry --- + + + + ### What is Telemetry? Telemetry collects aggregate information about the size of Rancher installations, versions of components used, and which features are used. This information is used by Rancher Labs to help make the product better and is not shared with third-parties. diff --git a/versioned_docs/version-2.0-2.4/faq.md b/versioned_docs/version-2.0-2.4/faq.md index 7307f18ba255..31c63d146f58 100644 --- a/versioned_docs/version-2.0-2.4/faq.md +++ b/versioned_docs/version-2.0-2.4/faq.md @@ -2,6 +2,10 @@ title: FAQ --- + + + + This FAQ is a work in progress designed to answers the questions our users most frequently ask about Rancher v2.x. See [Technical FAQ](faq/technical-items.md), for frequently asked technical questions. diff --git a/versioned_docs/version-2.0-2.4/faq/container-network-interface-providers.md b/versioned_docs/version-2.0-2.4/faq/container-network-interface-providers.md index 372290f8a9d4..9081a7f4ae8f 100644 --- a/versioned_docs/version-2.0-2.4/faq/container-network-interface-providers.md +++ b/versioned_docs/version-2.0-2.4/faq/container-network-interface-providers.md @@ -3,6 +3,10 @@ title: Container Network Interface (CNI) Providers description: Learn about Container Network Interface (CNI), the CNI providers Rancher provides, the features they offer, and how to choose a provider for you --- + + + + ## What is CNI? CNI (Container Network Interface), a [Cloud Native Computing Foundation project](https://cncf.io/), consists of a specification and libraries for writing plugins to configure network interfaces in Linux containers, along with a number of plugins. CNI concerns itself only with network connectivity of containers and removing allocated resources when the container is deleted. diff --git a/versioned_docs/version-2.0-2.4/faq/install-and-configure-kubectl.md b/versioned_docs/version-2.0-2.4/faq/install-and-configure-kubectl.md index f059edb284dd..9623812941bf 100644 --- a/versioned_docs/version-2.0-2.4/faq/install-and-configure-kubectl.md +++ b/versioned_docs/version-2.0-2.4/faq/install-and-configure-kubectl.md @@ -2,6 +2,10 @@ title: Installing and Configuring kubectl --- + + + + `kubectl` is a CLI utility for running commands against Kubernetes clusters. It's required for many maintenance and administrative tasks in Rancher 2.x. ### Installation diff --git a/versioned_docs/version-2.0-2.4/faq/rancher-is-no-longer-needed.md b/versioned_docs/version-2.0-2.4/faq/rancher-is-no-longer-needed.md index 082324b73901..aac48b44baae 100644 --- a/versioned_docs/version-2.0-2.4/faq/rancher-is-no-longer-needed.md +++ b/versioned_docs/version-2.0-2.4/faq/rancher-is-no-longer-needed.md @@ -2,6 +2,10 @@ title: Rancher is No Longer Needed --- + + + + This page is intended to answer questions about what happens if you don't want Rancher anymore, if you don't want a cluster to be managed by Rancher anymore, or if the Rancher server is deleted. diff --git a/versioned_docs/version-2.0-2.4/faq/security.md b/versioned_docs/version-2.0-2.4/faq/security.md index e86c80236720..805dd1471921 100644 --- a/versioned_docs/version-2.0-2.4/faq/security.md +++ b/versioned_docs/version-2.0-2.4/faq/security.md @@ -2,6 +2,10 @@ title: Security --- + + + + **Is there a Hardening Guide?** The Hardening Guide is now located in the main [Security](../pages-for-subheaders/rancher-security.md) section. diff --git a/versioned_docs/version-2.0-2.4/faq/technical-items.md b/versioned_docs/version-2.0-2.4/faq/technical-items.md index 909f2cbfa46f..851253790465 100644 --- a/versioned_docs/version-2.0-2.4/faq/technical-items.md +++ b/versioned_docs/version-2.0-2.4/faq/technical-items.md @@ -2,6 +2,10 @@ title: Technical --- + + + + ### How can I reset the administrator password? Docker Install: diff --git a/versioned_docs/version-2.0-2.4/faq/telemetry.md b/versioned_docs/version-2.0-2.4/faq/telemetry.md index f0866ac2b559..a510347e5564 100644 --- a/versioned_docs/version-2.0-2.4/faq/telemetry.md +++ b/versioned_docs/version-2.0-2.4/faq/telemetry.md @@ -2,6 +2,10 @@ title: Telemetry --- + + + + ### What is Telemetry? Telemetry collects aggregate information about the size of Rancher installations, versions of components used, and which features are used. This information is used by Rancher Labs to help make the product better and is not shared with third-parties. diff --git a/versioned_docs/version-2.5/faq.md b/versioned_docs/version-2.5/faq.md index 3f778a9e7fdd..b10b3f8daf7e 100644 --- a/versioned_docs/version-2.5/faq.md +++ b/versioned_docs/version-2.5/faq.md @@ -2,6 +2,10 @@ title: FAQ --- + + + + This FAQ is a work in progress designed to answer the questions our users most frequently ask about Rancher v2.x. See [Technical FAQ](faq/technical-items.md), for frequently asked technical questions. diff --git a/versioned_docs/version-2.5/faq/container-network-interface-providers.md b/versioned_docs/version-2.5/faq/container-network-interface-providers.md index 0b9c8a56f6fa..e66d3a1dfe57 100644 --- a/versioned_docs/version-2.5/faq/container-network-interface-providers.md +++ b/versioned_docs/version-2.5/faq/container-network-interface-providers.md @@ -3,6 +3,10 @@ title: Container Network Interface (CNI) Providers description: Learn about Container Network Interface (CNI), the CNI providers Rancher provides, the features they offer, and how to choose a provider for you --- + + + + ## What is CNI? CNI (Container Network Interface), a [Cloud Native Computing Foundation project](https://cncf.io/), consists of a specification and libraries for writing plugins to configure network interfaces in Linux containers, along with a number of plugins. CNI concerns itself only with network connectivity of containers and removing allocated resources when the container is deleted. diff --git a/versioned_docs/version-2.5/faq/deprecated-features-in-v2.5.md b/versioned_docs/version-2.5/faq/deprecated-features-in-v2.5.md index 7f15d9798148..dba2fd3493ae 100644 --- a/versioned_docs/version-2.5/faq/deprecated-features-in-v2.5.md +++ b/versioned_docs/version-2.5/faq/deprecated-features-in-v2.5.md @@ -2,6 +2,10 @@ title: Deprecated Features in Rancher v2.5 --- + + + + ### What is Rancher's Deprecation policy? Starting in Rancher 2.5 we have published our official deprecation policy in the support [terms of service](https://rancher.com/support-maintenance-terms). diff --git a/versioned_docs/version-2.5/faq/install-and-configure-kubectl.md b/versioned_docs/version-2.5/faq/install-and-configure-kubectl.md index 6d1a86cae76e..2c4820540a99 100644 --- a/versioned_docs/version-2.5/faq/install-and-configure-kubectl.md +++ b/versioned_docs/version-2.5/faq/install-and-configure-kubectl.md @@ -2,6 +2,10 @@ title: Installing and Configuring kubectl --- + + + + `kubectl` is a CLI utility for running commands against Kubernetes clusters. It's required for many maintenance and administrative tasks in Rancher 2.x. ### Installation diff --git a/versioned_docs/version-2.5/faq/rancher-is-no-longer-needed.md b/versioned_docs/version-2.5/faq/rancher-is-no-longer-needed.md index a39c26f7b684..386241a0e7ce 100644 --- a/versioned_docs/version-2.5/faq/rancher-is-no-longer-needed.md +++ b/versioned_docs/version-2.5/faq/rancher-is-no-longer-needed.md @@ -2,6 +2,10 @@ title: Rancher is No Longer Needed --- + + + + This page is intended to answer questions about what happens if you don't want Rancher anymore, if you don't want a cluster to be managed by Rancher anymore, or if the Rancher server is deleted. diff --git a/versioned_docs/version-2.5/faq/security.md b/versioned_docs/version-2.5/faq/security.md index e86c80236720..805dd1471921 100644 --- a/versioned_docs/version-2.5/faq/security.md +++ b/versioned_docs/version-2.5/faq/security.md @@ -2,6 +2,10 @@ title: Security --- + + + + **Is there a Hardening Guide?** The Hardening Guide is now located in the main [Security](../pages-for-subheaders/rancher-security.md) section. diff --git a/versioned_docs/version-2.5/faq/technical-items.md b/versioned_docs/version-2.5/faq/technical-items.md index 7ee7c8c113fb..7d3491a6c6f5 100644 --- a/versioned_docs/version-2.5/faq/technical-items.md +++ b/versioned_docs/version-2.5/faq/technical-items.md @@ -2,6 +2,10 @@ title: Technical --- + + + + ### How can I reset the administrator password? Docker Install: diff --git a/versioned_docs/version-2.5/faq/telemetry.md b/versioned_docs/version-2.5/faq/telemetry.md index f0866ac2b559..a510347e5564 100644 --- a/versioned_docs/version-2.5/faq/telemetry.md +++ b/versioned_docs/version-2.5/faq/telemetry.md @@ -2,6 +2,10 @@ title: Telemetry --- + + + + ### What is Telemetry? Telemetry collects aggregate information about the size of Rancher installations, versions of components used, and which features are used. This information is used by Rancher Labs to help make the product better and is not shared with third-parties. diff --git a/versioned_docs/version-2.6/faq/container-network-interface-providers.md b/versioned_docs/version-2.6/faq/container-network-interface-providers.md index b741c84f3370..490713c7b5a7 100644 --- a/versioned_docs/version-2.6/faq/container-network-interface-providers.md +++ b/versioned_docs/version-2.6/faq/container-network-interface-providers.md @@ -3,6 +3,10 @@ title: Container Network Interface (CNI) Providers description: Learn about Container Network Interface (CNI), the CNI providers Rancher provides, the features they offer, and how to choose a provider for you --- + + + + ## What is CNI? CNI (Container Network Interface), a [Cloud Native Computing Foundation project](https://cncf.io/), consists of a specification and libraries for writing plugins to configure network interfaces in Linux containers, along with a number of plugins. CNI concerns itself only with network connectivity of containers and removing allocated resources when the container is deleted. diff --git a/versioned_docs/version-2.6/faq/deprecated-features-in-v2.5.md b/versioned_docs/version-2.6/faq/deprecated-features-in-v2.5.md index d901807423b4..86313d482385 100644 --- a/versioned_docs/version-2.6/faq/deprecated-features-in-v2.5.md +++ b/versioned_docs/version-2.6/faq/deprecated-features-in-v2.5.md @@ -2,6 +2,10 @@ title: Deprecated Features in Rancher --- + + + + ### What is Rancher's Deprecation policy? We have published our official deprecation policy in the support [terms of service](https://rancher.com/support-maintenance-terms). diff --git a/versioned_docs/version-2.6/faq/dockershim.md b/versioned_docs/version-2.6/faq/dockershim.md index 0921678a728c..23b982f7b305 100644 --- a/versioned_docs/version-2.6/faq/dockershim.md +++ b/versioned_docs/version-2.6/faq/dockershim.md @@ -2,6 +2,10 @@ title: Dockershim --- + + + + The Dockershim is the CRI compliant layer between the Kubelet and the Docker daemon. As part of the Kubernetes 1.20 release, the [deprecation of the in-tree Dockershim was announced](https://kubernetes.io/blog/2020/12/02/dont-panic-kubernetes-and-docker/). Removal is currently scheduled for Kubernetes 1.24. For more information on the deprecation and its timelines, see the [Kubernetes Dockershim Deprecation FAQ](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed). RKE clusters, starting with Kubernetes 1.21, now support the external Dockershim to continue leveraging Docker as the CRI runtime. We now implement the upstream open source community Dockershim announced by [Mirantis and Docker](https://www.mirantis.com/blog/mirantis-to-take-over-support-of-kubernetes-dockershim-2/) to ensure RKE clusters can continue to leverage Docker. diff --git a/versioned_docs/version-2.6/faq/general-faq.md b/versioned_docs/version-2.6/faq/general-faq.md index 91b604a45ec1..c41c70a5bc25 100644 --- a/versioned_docs/version-2.6/faq/general-faq.md +++ b/versioned_docs/version-2.6/faq/general-faq.md @@ -2,6 +2,10 @@ title: General FAQ --- + + + + This FAQ is a work in progress designed to answers the questions our users most frequently ask about Rancher v2.x. See [Technical FAQ](technical-items.md), for frequently asked technical questions. diff --git a/versioned_docs/version-2.6/faq/install-and-configure-kubectl.md b/versioned_docs/version-2.6/faq/install-and-configure-kubectl.md index 861e1b7b1eb1..9bcb56bc3d28 100644 --- a/versioned_docs/version-2.6/faq/install-and-configure-kubectl.md +++ b/versioned_docs/version-2.6/faq/install-and-configure-kubectl.md @@ -2,6 +2,10 @@ title: Installing and Configuring kubectl --- + + + + `kubectl` is a CLI utility for running commands against Kubernetes clusters. It's required for many maintenance and administrative tasks in Rancher 2.x. ### Installation diff --git a/versioned_docs/version-2.6/faq/rancher-is-no-longer-needed.md b/versioned_docs/version-2.6/faq/rancher-is-no-longer-needed.md index 6fa05b7566b7..7c74b3946b68 100644 --- a/versioned_docs/version-2.6/faq/rancher-is-no-longer-needed.md +++ b/versioned_docs/version-2.6/faq/rancher-is-no-longer-needed.md @@ -2,6 +2,10 @@ title: Rancher is No Longer Needed --- + + + + This page is intended to answer questions about what happens if you don't want Rancher anymore, if you don't want a cluster to be managed by Rancher anymore, or if the Rancher server is deleted. diff --git a/versioned_docs/version-2.6/faq/security.md b/versioned_docs/version-2.6/faq/security.md index 14fdc2e5a776..447fb53d7de6 100644 --- a/versioned_docs/version-2.6/faq/security.md +++ b/versioned_docs/version-2.6/faq/security.md @@ -3,6 +3,10 @@ title: Security --- + + + + **Is there a Hardening Guide?** The Hardening Guide is now located in the main [Security](../pages-for-subheaders/rancher-security.md) section. diff --git a/versioned_docs/version-2.6/faq/technical-items.md b/versioned_docs/version-2.6/faq/technical-items.md index c860dac95ccb..0aed55b5d0db 100644 --- a/versioned_docs/version-2.6/faq/technical-items.md +++ b/versioned_docs/version-2.6/faq/technical-items.md @@ -2,6 +2,10 @@ title: Technical --- + + + + ### How can I reset the administrator password? Docker Install: diff --git a/versioned_docs/version-2.6/faq/telemetry.md b/versioned_docs/version-2.6/faq/telemetry.md index f0866ac2b559..a510347e5564 100644 --- a/versioned_docs/version-2.6/faq/telemetry.md +++ b/versioned_docs/version-2.6/faq/telemetry.md @@ -2,6 +2,10 @@ title: Telemetry --- + + + + ### What is Telemetry? Telemetry collects aggregate information about the size of Rancher installations, versions of components used, and which features are used. This information is used by Rancher Labs to help make the product better and is not shared with third-parties. diff --git a/versioned_docs/version-2.7/faq/container-network-interface-providers.md b/versioned_docs/version-2.7/faq/container-network-interface-providers.md index b741c84f3370..490713c7b5a7 100644 --- a/versioned_docs/version-2.7/faq/container-network-interface-providers.md +++ b/versioned_docs/version-2.7/faq/container-network-interface-providers.md @@ -3,6 +3,10 @@ title: Container Network Interface (CNI) Providers description: Learn about Container Network Interface (CNI), the CNI providers Rancher provides, the features they offer, and how to choose a provider for you --- + + + + ## What is CNI? CNI (Container Network Interface), a [Cloud Native Computing Foundation project](https://cncf.io/), consists of a specification and libraries for writing plugins to configure network interfaces in Linux containers, along with a number of plugins. CNI concerns itself only with network connectivity of containers and removing allocated resources when the container is deleted. diff --git a/versioned_docs/version-2.7/faq/deprecated-features-in-v2.5.md b/versioned_docs/version-2.7/faq/deprecated-features-in-v2.5.md index d901807423b4..86313d482385 100644 --- a/versioned_docs/version-2.7/faq/deprecated-features-in-v2.5.md +++ b/versioned_docs/version-2.7/faq/deprecated-features-in-v2.5.md @@ -2,6 +2,10 @@ title: Deprecated Features in Rancher --- + + + + ### What is Rancher's Deprecation policy? We have published our official deprecation policy in the support [terms of service](https://rancher.com/support-maintenance-terms). diff --git a/versioned_docs/version-2.7/faq/dockershim.md b/versioned_docs/version-2.7/faq/dockershim.md index 0921678a728c..23b982f7b305 100644 --- a/versioned_docs/version-2.7/faq/dockershim.md +++ b/versioned_docs/version-2.7/faq/dockershim.md @@ -2,6 +2,10 @@ title: Dockershim --- + + + + The Dockershim is the CRI compliant layer between the Kubelet and the Docker daemon. As part of the Kubernetes 1.20 release, the [deprecation of the in-tree Dockershim was announced](https://kubernetes.io/blog/2020/12/02/dont-panic-kubernetes-and-docker/). Removal is currently scheduled for Kubernetes 1.24. For more information on the deprecation and its timelines, see the [Kubernetes Dockershim Deprecation FAQ](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed). RKE clusters, starting with Kubernetes 1.21, now support the external Dockershim to continue leveraging Docker as the CRI runtime. We now implement the upstream open source community Dockershim announced by [Mirantis and Docker](https://www.mirantis.com/blog/mirantis-to-take-over-support-of-kubernetes-dockershim-2/) to ensure RKE clusters can continue to leverage Docker. diff --git a/versioned_docs/version-2.7/faq/general-faq.md b/versioned_docs/version-2.7/faq/general-faq.md index 142c684f9a0e..417875a6c6c1 100644 --- a/versioned_docs/version-2.7/faq/general-faq.md +++ b/versioned_docs/version-2.7/faq/general-faq.md @@ -2,6 +2,10 @@ title: General FAQ --- + + + + This FAQ is a work in progress designed to answers the questions our users most frequently ask about Rancher v2.x. See [Technical FAQ](technical-items.md), for frequently asked technical questions. diff --git a/versioned_docs/version-2.7/faq/install-and-configure-kubectl.md b/versioned_docs/version-2.7/faq/install-and-configure-kubectl.md index 861e1b7b1eb1..9bcb56bc3d28 100644 --- a/versioned_docs/version-2.7/faq/install-and-configure-kubectl.md +++ b/versioned_docs/version-2.7/faq/install-and-configure-kubectl.md @@ -2,6 +2,10 @@ title: Installing and Configuring kubectl --- + + + + `kubectl` is a CLI utility for running commands against Kubernetes clusters. It's required for many maintenance and administrative tasks in Rancher 2.x. ### Installation diff --git a/versioned_docs/version-2.7/faq/rancher-is-no-longer-needed.md b/versioned_docs/version-2.7/faq/rancher-is-no-longer-needed.md index f3b430d5646c..ce33edd6aece 100644 --- a/versioned_docs/version-2.7/faq/rancher-is-no-longer-needed.md +++ b/versioned_docs/version-2.7/faq/rancher-is-no-longer-needed.md @@ -2,6 +2,10 @@ title: Rancher is No Longer Needed --- + + + + This page is intended to answer questions about what happens if you don't want Rancher anymore, if you don't want a cluster to be managed by Rancher anymore, or if the Rancher server is deleted. diff --git a/versioned_docs/version-2.7/faq/security.md b/versioned_docs/version-2.7/faq/security.md index 14fdc2e5a776..447fb53d7de6 100644 --- a/versioned_docs/version-2.7/faq/security.md +++ b/versioned_docs/version-2.7/faq/security.md @@ -3,6 +3,10 @@ title: Security --- + + + + **Is there a Hardening Guide?** The Hardening Guide is now located in the main [Security](../pages-for-subheaders/rancher-security.md) section. diff --git a/versioned_docs/version-2.7/faq/technical-items.md b/versioned_docs/version-2.7/faq/technical-items.md index c860dac95ccb..0aed55b5d0db 100644 --- a/versioned_docs/version-2.7/faq/technical-items.md +++ b/versioned_docs/version-2.7/faq/technical-items.md @@ -2,6 +2,10 @@ title: Technical --- + + + + ### How can I reset the administrator password? Docker Install: diff --git a/versioned_docs/version-2.7/faq/telemetry.md b/versioned_docs/version-2.7/faq/telemetry.md index f0866ac2b559..a510347e5564 100644 --- a/versioned_docs/version-2.7/faq/telemetry.md +++ b/versioned_docs/version-2.7/faq/telemetry.md @@ -2,6 +2,10 @@ title: Telemetry --- + + + + ### What is Telemetry? Telemetry collects aggregate information about the size of Rancher installations, versions of components used, and which features are used. This information is used by Rancher Labs to help make the product better and is not shared with third-parties. From ae0281478a2beb54b826f97dbc571f1f3666fdb2 Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Fri, 8 Sep 2023 17:23:37 -0400 Subject: [PATCH 37/54] #420 Canonical links for smaller directories (#837) * canonicized cluster-provisioning/rke-clusters/options/options.md * canonicized security scans * canonicized contributing to rancher * canonicized 'what is rancher?' --- docs/cluster-provisioning/rke-clusters/options/options.md | 4 ++++ docs/contribute-to-rancher.md | 4 ++++ docs/rancher-manager.md | 5 +++++ docs/security/security-scan/security-scan.md | 4 ++++ versioned_docs/version-2.0-2.4/contribute-to-rancher.md | 4 ++++ .../version-2.0-2.4/security/security-scan/security-scan.md | 6 +++++- versioned_docs/version-2.5/contribute-to-rancher.md | 4 ++++ .../rke1-cluster-configuration.md | 4 ++++ .../version-2.5/security/security-scan/security-scan.md | 4 ++++ .../cluster-provisioning/rke-clusters/options/options.md | 4 ++++ versioned_docs/version-2.6/contribute-to-rancher.md | 4 ++++ versioned_docs/version-2.6/rancher-manager.md | 5 +++++ .../version-2.6/security/security-scan/security-scan.md | 4 ++++ .../cluster-provisioning/rke-clusters/options/options.md | 4 ++++ versioned_docs/version-2.7/contribute-to-rancher.md | 4 ++++ versioned_docs/version-2.7/rancher-manager.md | 5 +++++ .../version-2.7/security/security-scan/security-scan.md | 4 ++++ 17 files changed, 72 insertions(+), 1 deletion(-) diff --git a/docs/cluster-provisioning/rke-clusters/options/options.md b/docs/cluster-provisioning/rke-clusters/options/options.md index 4cb7e51109e2..da8df215bd2e 100644 --- a/docs/cluster-provisioning/rke-clusters/options/options.md +++ b/docs/cluster-provisioning/rke-clusters/options/options.md @@ -2,4 +2,8 @@ title: RKE Cluster Configuration --- + + + + This page has moved [here.](../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) diff --git a/docs/contribute-to-rancher.md b/docs/contribute-to-rancher.md index 45874694b861..b894ec64e665 100644 --- a/docs/contribute-to-rancher.md +++ b/docs/contribute-to-rancher.md @@ -2,6 +2,10 @@ title: Contributing to Rancher --- + + + + Learn about the repositories used for Rancher and Rancher docs, how to build Rancher repositories, and what information to include when you file an issue. For more detailed information on how to contribute to the development of Rancher projects, refer to the [Rancher Developer Wiki](https://github.com/rancher/rancher/wiki). The wiki has resources on many topics, including the following: diff --git a/docs/rancher-manager.md b/docs/rancher-manager.md index 0abaef71dea0..452b4f33ae5e 100644 --- a/docs/rancher-manager.md +++ b/docs/rancher-manager.md @@ -4,6 +4,11 @@ title: "What is Rancher?" sidebar_label: What is Rancher? description: "Rancher adds significant value on top of Kubernetes: managing hundreds of clusters from one interface, centralizing RBAC, enabling monitoring and alerting. Read more." --- + + + + + Rancher is a Kubernetes management tool to deploy and run clusters anywhere and on any provider. Rancher can provision Kubernetes from a hosted provider, provision compute nodes and then install Kubernetes onto them, or import existing Kubernetes clusters running anywhere. diff --git a/docs/security/security-scan/security-scan.md b/docs/security/security-scan/security-scan.md index 7c9c5a3ce486..061d0af8edd3 100644 --- a/docs/security/security-scan/security-scan.md +++ b/docs/security/security-scan/security-scan.md @@ -2,4 +2,8 @@ title: Security Scans --- + + + + The documentation about CIS security scans has moved [here.](../../pages-for-subheaders/cis-scan-guides.md) diff --git a/versioned_docs/version-2.0-2.4/contribute-to-rancher.md b/versioned_docs/version-2.0-2.4/contribute-to-rancher.md index 45874694b861..b894ec64e665 100644 --- a/versioned_docs/version-2.0-2.4/contribute-to-rancher.md +++ b/versioned_docs/version-2.0-2.4/contribute-to-rancher.md @@ -2,6 +2,10 @@ title: Contributing to Rancher --- + + + + Learn about the repositories used for Rancher and Rancher docs, how to build Rancher repositories, and what information to include when you file an issue. For more detailed information on how to contribute to the development of Rancher projects, refer to the [Rancher Developer Wiki](https://github.com/rancher/rancher/wiki). The wiki has resources on many topics, including the following: diff --git a/versioned_docs/version-2.0-2.4/security/security-scan/security-scan.md b/versioned_docs/version-2.0-2.4/security/security-scan/security-scan.md index f9af3528bb5f..061d0af8edd3 100644 --- a/versioned_docs/version-2.0-2.4/security/security-scan/security-scan.md +++ b/versioned_docs/version-2.0-2.4/security/security-scan/security-scan.md @@ -2,4 +2,8 @@ title: Security Scans --- -The documentation about CIS security scans has moved [here.](cis-scans) + + + + +The documentation about CIS security scans has moved [here.](../../pages-for-subheaders/cis-scan-guides.md) diff --git a/versioned_docs/version-2.5/contribute-to-rancher.md b/versioned_docs/version-2.5/contribute-to-rancher.md index 45874694b861..b894ec64e665 100644 --- a/versioned_docs/version-2.5/contribute-to-rancher.md +++ b/versioned_docs/version-2.5/contribute-to-rancher.md @@ -2,6 +2,10 @@ title: Contributing to Rancher --- + + + + Learn about the repositories used for Rancher and Rancher docs, how to build Rancher repositories, and what information to include when you file an issue. For more detailed information on how to contribute to the development of Rancher projects, refer to the [Rancher Developer Wiki](https://github.com/rancher/rancher/wiki). The wiki has resources on many topics, including the following: diff --git a/versioned_docs/version-2.5/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md b/versioned_docs/version-2.5/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md index 6e69a75231ee..a6cc8ddb8fa0 100644 --- a/versioned_docs/version-2.5/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md +++ b/versioned_docs/version-2.5/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md @@ -2,6 +2,10 @@ title: RKE Cluster Configuration --- + + + + In [clusters launched by RKE](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md), you can edit any of the remaining options that follow. - [Configuration Options in the Rancher UI](#configuration-options-in-the-rancher-ui) diff --git a/versioned_docs/version-2.5/security/security-scan/security-scan.md b/versioned_docs/version-2.5/security/security-scan/security-scan.md index 7c9c5a3ce486..061d0af8edd3 100644 --- a/versioned_docs/version-2.5/security/security-scan/security-scan.md +++ b/versioned_docs/version-2.5/security/security-scan/security-scan.md @@ -2,4 +2,8 @@ title: Security Scans --- + + + + The documentation about CIS security scans has moved [here.](../../pages-for-subheaders/cis-scan-guides.md) diff --git a/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/options/options.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/options/options.md index 4cb7e51109e2..da8df215bd2e 100644 --- a/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/options/options.md +++ b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/options/options.md @@ -2,4 +2,8 @@ title: RKE Cluster Configuration --- + + + + This page has moved [here.](../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) diff --git a/versioned_docs/version-2.6/contribute-to-rancher.md b/versioned_docs/version-2.6/contribute-to-rancher.md index 45874694b861..b894ec64e665 100644 --- a/versioned_docs/version-2.6/contribute-to-rancher.md +++ b/versioned_docs/version-2.6/contribute-to-rancher.md @@ -2,6 +2,10 @@ title: Contributing to Rancher --- + + + + Learn about the repositories used for Rancher and Rancher docs, how to build Rancher repositories, and what information to include when you file an issue. For more detailed information on how to contribute to the development of Rancher projects, refer to the [Rancher Developer Wiki](https://github.com/rancher/rancher/wiki). The wiki has resources on many topics, including the following: diff --git a/versioned_docs/version-2.6/rancher-manager.md b/versioned_docs/version-2.6/rancher-manager.md index 0abaef71dea0..452b4f33ae5e 100644 --- a/versioned_docs/version-2.6/rancher-manager.md +++ b/versioned_docs/version-2.6/rancher-manager.md @@ -4,6 +4,11 @@ title: "What is Rancher?" sidebar_label: What is Rancher? description: "Rancher adds significant value on top of Kubernetes: managing hundreds of clusters from one interface, centralizing RBAC, enabling monitoring and alerting. Read more." --- + + + + + Rancher is a Kubernetes management tool to deploy and run clusters anywhere and on any provider. Rancher can provision Kubernetes from a hosted provider, provision compute nodes and then install Kubernetes onto them, or import existing Kubernetes clusters running anywhere. diff --git a/versioned_docs/version-2.6/security/security-scan/security-scan.md b/versioned_docs/version-2.6/security/security-scan/security-scan.md index 7c9c5a3ce486..061d0af8edd3 100644 --- a/versioned_docs/version-2.6/security/security-scan/security-scan.md +++ b/versioned_docs/version-2.6/security/security-scan/security-scan.md @@ -2,4 +2,8 @@ title: Security Scans --- + + + + The documentation about CIS security scans has moved [here.](../../pages-for-subheaders/cis-scan-guides.md) diff --git a/versioned_docs/version-2.7/cluster-provisioning/rke-clusters/options/options.md b/versioned_docs/version-2.7/cluster-provisioning/rke-clusters/options/options.md index 4cb7e51109e2..da8df215bd2e 100644 --- a/versioned_docs/version-2.7/cluster-provisioning/rke-clusters/options/options.md +++ b/versioned_docs/version-2.7/cluster-provisioning/rke-clusters/options/options.md @@ -2,4 +2,8 @@ title: RKE Cluster Configuration --- + + + + This page has moved [here.](../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) diff --git a/versioned_docs/version-2.7/contribute-to-rancher.md b/versioned_docs/version-2.7/contribute-to-rancher.md index 45874694b861..b894ec64e665 100644 --- a/versioned_docs/version-2.7/contribute-to-rancher.md +++ b/versioned_docs/version-2.7/contribute-to-rancher.md @@ -2,6 +2,10 @@ title: Contributing to Rancher --- + + + + Learn about the repositories used for Rancher and Rancher docs, how to build Rancher repositories, and what information to include when you file an issue. For more detailed information on how to contribute to the development of Rancher projects, refer to the [Rancher Developer Wiki](https://github.com/rancher/rancher/wiki). The wiki has resources on many topics, including the following: diff --git a/versioned_docs/version-2.7/rancher-manager.md b/versioned_docs/version-2.7/rancher-manager.md index 0abaef71dea0..ad5eeb33b871 100644 --- a/versioned_docs/version-2.7/rancher-manager.md +++ b/versioned_docs/version-2.7/rancher-manager.md @@ -4,6 +4,11 @@ title: "What is Rancher?" sidebar_label: What is Rancher? description: "Rancher adds significant value on top of Kubernetes: managing hundreds of clusters from one interface, centralizing RBAC, enabling monitoring and alerting. Read more." --- + + + + + Rancher is a Kubernetes management tool to deploy and run clusters anywhere and on any provider. Rancher can provision Kubernetes from a hosted provider, provision compute nodes and then install Kubernetes onto them, or import existing Kubernetes clusters running anywhere. diff --git a/versioned_docs/version-2.7/security/security-scan/security-scan.md b/versioned_docs/version-2.7/security/security-scan/security-scan.md index 7c9c5a3ce486..061d0af8edd3 100644 --- a/versioned_docs/version-2.7/security/security-scan/security-scan.md +++ b/versioned_docs/version-2.7/security/security-scan/security-scan.md @@ -2,4 +2,8 @@ title: Security Scans --- + + + + The documentation about CIS security scans has moved [here.](../../pages-for-subheaders/cis-scan-guides.md) From 946c22780a7ff26e60ef29adbb614d8ad5419412 Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Mon, 11 Sep 2023 11:10:12 -0400 Subject: [PATCH 38/54] sync v2.7 page with #797 Update k3s-hardening-guide based on CIS-1.24 and CIS-1.7 (#838) --- .../k3s-hardening-guide.md | 2 +- .../k3s-hardening-guide.md | 18 +++++++++++++++--- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/docs/pages-for-subheaders/k3s-hardening-guide.md b/docs/pages-for-subheaders/k3s-hardening-guide.md index ee564ada2b4c..285577fbca5c 100644 --- a/docs/pages-for-subheaders/k3s-hardening-guide.md +++ b/docs/pages-for-subheaders/k3s-hardening-guide.md @@ -21,7 +21,7 @@ This hardening guide is intended to be used for K3s clusters and is associated w | Rancher v2.7 | Benchmark v1.7 | Kubernetes v1.25 up to v1.26 | :::note -- In Benchmark v1.7, the `--protect-kernel-defaults` (4.2.6) parameter isn't required anymore, and was removed by CIS. +In Benchmark v1.7, the `--protect-kernel-defaults` (4.2.6) parameter isn't required anymore, and was removed by CIS. ::: For more details on how to evaluate a hardened K3s cluster against the official CIS benchmark, refer to the K3s self-assessment guides for specific Kubernetes and CIS benchmark versions. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/k3s-hardening-guide.md b/versioned_docs/version-2.7/pages-for-subheaders/k3s-hardening-guide.md index 1fdcce7964bf..e44593ec7903 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/k3s-hardening-guide.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/k3s-hardening-guide.md @@ -16,10 +16,12 @@ This hardening guide is intended to be used for K3s clusters and is associated w | Rancher Version | CIS Benchmark Version | Kubernetes Version | |-----------------|-----------------------|------------------------------| -| Rancher v2.7 | Benchmark v1.23 | Kubernetes v1.23 up to v1.25 | +| Rancher v2.7 | Benchmark v1.23 | Kubernetes v1.23 | +| Rancher v2.7 | Benchmark v1.24 | Kubernetes v1.24 | +| Rancher v2.7 | Benchmark v1.7 | Kubernetes v1.25 up to v1.26 | :::note -At the time of writing, the upstream CIS Kubernetes v1.25 benchmark is not yet available in Rancher. At this time Rancher is using the CIS v1.23 benchmark when scanning Kubernetes v1.25 clusters. +In Benchmark v1.7, the `--protect-kernel-defaults` (4.2.6) parameter isn't required anymore, and was removed by CIS. ::: For more details on how to evaluate a hardened K3s cluster against the official CIS benchmark, refer to the K3s self-assessment guides for specific Kubernetes and CIS benchmark versions. @@ -37,6 +39,14 @@ The first section (1.1) of the CIS Benchmark primarily focuses on pod manifest ### Ensure `protect-kernel-defaults` is set + + + +The `protect-kernel-defaults` is no longer required since CIS benchmark 1.7. + + + + This is a kubelet flag that will cause the kubelet to exit if the required kernel parameters are unset or are set to values that are different from the kubelet's defaults. The `protect-kernel-defaults` flag can be set in the cluster configuration in Rancher. @@ -49,6 +59,9 @@ spec: protect-kernel-defaults: true ``` + + + ### Set kernel parameters The following `sysctl` configuration is recommended for all nodes type in the cluster. Set the following parameters in `/etc/sysctl.d/90-kubelet.conf`: @@ -721,7 +734,6 @@ spec: - config: kubelet-arg: - make-iptables-util-chains=true # CIS 4.2.7 - protect-kernel-defaults: true # CIS 4.2.6 ```
From cbe7fe5d69dcff15fd99e34e3a6f36001a51591c Mon Sep 17 00:00:00 2001 From: Andy Pitcher Date: Mon, 11 Sep 2023 11:31:45 -0400 Subject: [PATCH 39/54] Remove protect-kernel-defaults --- docs/pages-for-subheaders/rke1-hardening-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/pages-for-subheaders/rke1-hardening-guide.md b/docs/pages-for-subheaders/rke1-hardening-guide.md index 783d4c85a3e5..19bc19bc64c9 100644 --- a/docs/pages-for-subheaders/rke1-hardening-guide.md +++ b/docs/pages-for-subheaders/rke1-hardening-guide.md @@ -295,6 +295,7 @@ services: kubelet: extra_args: feature-gates: RotateKubeletServerCertificate=true + protect-kernel-defaults: true generate_serving_certificate: true addons: | # Upstream Kubernetes restricted PSP policy @@ -441,7 +442,6 @@ rancher_kubernetes_engine_config: kubelet: extra_args: feature-gates: RotateKubeletServerCertificate=true - protect-kernel-defaults: true tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 generate_serving_certificate: true scheduler: From 8ea5033ca60b68a6ae86630277bbdd87e3746df7 Mon Sep 17 00:00:00 2001 From: Michael Bolot Date: Thu, 31 Aug 2023 12:59:52 -0500 Subject: [PATCH 40/54] Updating webhook docs Updates the webhook docs to point toward the docs for individual checks, and adds docs for common issues/version specific issues --- docs/reference-guides/rancher-webhook.md | 72 ++++++++++++++++++- .../reference-guides/rancher-webhook.md | 68 +++++++++++++++++- 2 files changed, 137 insertions(+), 3 deletions(-) diff --git a/docs/reference-guides/rancher-webhook.md b/docs/reference-guides/rancher-webhook.md index 41bd42226b33..6f993753254f 100644 --- a/docs/reference-guides/rancher-webhook.md +++ b/docs/reference-guides/rancher-webhook.md @@ -11,12 +11,47 @@ Rancher-Webhook is an essential component of Rancher that works in conjunction w It integrates with Kubernetes' extensible admission controllers, as described in the [Kubernetes documentation](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/), which allows Rancher-Webhook to inspect specific requests sent to the Kubernetes API server, and add custom, Rancher-specific validation and mutations to the requests that are specific to Rancher. Rancher-Webhook manages the resources to be validated using the `rancher.cattle.io` `ValidatingWebhookConfiguration` and the `rancher.cattle.io` `MutatingWebhookConfiguration`, and will override any manual edits. Rancher deploys Rancher-Webhook as a separate deployment and service in both local and downstream clusters. Rancher manages Rancher-Webhook using Helm. It's important to note that Rancher may override modifications made by users to the Helm release. -## Why do we need it? +Each Rancher version is designed to be compatible with a single version of the webhook. The compatible versions are provided below for convenience. + +**Note:** Rancher manages deployment and upgrade of the webhook. Under most circumstances, no user intervention should be needed to ensure that the webhook version is compatible with the version of Rancher that you are running. + +| Rancher Version | Webhook Version | +|-----------------|:---------------:| +| v2.7.0 | v0.3.0 | +| v2.7.1 | v0.3.0 | +| v2.7.2 | v0.3.2 | +| v2.7.3 | v0.3.3 | +| v2.7.4 | v0.3.4 | +| v2.7.5 | v0.3.5 | +| v2.7.6 | v0.3.5 | + + +## Why Do We Need It? Rancher-Webhook is crucial for Rancher to protect clusters against malicious attacks and enable various features. Rancher relies on the Rancher-Webhook as an integral part of its functionality. Without the webhook, Rancher would not be a complete product. It provides essential protection for Rancher-managed clusters, preventing security vulnerabilities and ensuring the consistency and stability of the cluster. +## What Resources Does the Webhook Validate? + +An in-progress list of the resources that the webhook validates can be found in the [webhook's repo](https://github.com/rancher/webhook/blob/release/v0.4/docs.md). These docs are organized by group/version and resource (top-level header is group/version, next level header is resource). Checks specific to one version can be found by viewing the `docs.md` file associated with a particular tag (note that webhook versions prior to `v0.3.6` won't have this file). + +## Bypassing the Webhook + +Sometimes, it may be necessary to bypass Rancher's webhook validation to perform emergency restore operations, or fix other critical issues. The bypass operation is exhaustive, meaning that no webhook validations or mutations will apply when this is used. It is not possible to bypass some mutations or validations and have others still apply - they are either all bypassed, or all active. + +:::danger + +Rancher's webhook provides critical security protections. Bypassing the webhook should only be done by administrators in specific scenarios, after all other options have been exhausted. In addition, permission to bypass the webhook should be carefully controlled, and never given to users who are not admins. + +::: + +To bypass the webhook, impersonate both the `rancher-webhook-sudo` service account and the `system:masters` group (both are required): + +```bash +kubectl create -f example.yaml --as=system:serviceaccount:cattle-system:rancher-webhook-sudo --as-group=system:masters +``` + ## Common Issues ### EKS Cluster with Calico CNI @@ -26,10 +61,12 @@ One workaround for this issue [documented by Calico](https://docs.tigera.io/cali ``` bash helm repo add rancher-charts https://charts.rancher.io -helm upgrade --reuse-values rancher-webhook rancher-chart/rancher-webhook -n cattle-system --set global.hostNetwork=true +helm upgrade --reuse-values rancher-webhook rancher-charts/rancher-webhook -n cattle-system --set global.hostNetwork=true ``` **Note:** This temporary workaround may violate an environment's security policy. This workaround also requires that port 9443 is unused on the host network. +**Note:** Helm, by default, uses a type that some webhook versions validate (secrets) to store information. In these cases, it's recommended to first directly update the deployment with the hostNetwork=true value using kubectl, and then perform the helm commands listed above to avoid drift between the helm configuration and the actual state in the cluster. + ### Private GKE Cluster When using a private GKE cluster, errors may occur that prevent the Kubernetes API server from communicating with the webhook. The following error message may appear: @@ -38,3 +75,34 @@ When using a private GKE cluster, errors may occur that prevent the Kubernetes A Internal error occurred: failed calling webhook "rancher.cattle.io.namespaces.create-non-kubesystem": failed to call webhook: Post "https://rancher-webhook.cattle-system.svc:443/v1/webhook/validation/namespaces?timeout=10s": context deadline exceeded ``` This issue occurs because firewall rules restrict communication between the API server and the private cluster. To resolve this communication problem, users must add firewall rules to allow the GKE control plane to communicate with the Rancher-Webhook on port 9443. Please refer to the [GKE documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#add_firewall_rules) for detailed information and steps on updating the firewall rules. + +### Application Fails to Deploy Due to rancher-webhook Blocking Access + +The webhook provides extra validations on [namespaces](https://github.com/rancher/webhook/blob/release/v0.4/docs.md#psa-label-validation). One of these validations ensures that users can only update PSA relevant labels if they have the proper permissions (`updatepsa` for `projects` in `management.cattle.io`). This can result in specific operators, such as Tigera or Trident, failing when they attempt to deploy namespaces with PSA labels. There are several ways to resolve this issue: + +- Configure the application to create a namespace with no PSA labels. If users wish to apply a PSA to these namespaces, they can add them to a project with the desired PSA after configuration. See the [docs on PSS and PSA resources](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/pod-security-standards) for instructions on how. + - This is the preferred option, though not all applications can be configured in this fashion. +- Manually grant the operator permissions to manage PSAs for namespaces. + - This option will introduce security risks, since the operator will now be able to set the PSA for the namespaces it has access to. This could allow the operator to deploy a privileged pod, or effect cluster takeover through other means. +- A user account with the proper permissions can pre-create the namespace with the appropriate configuration. + - This option depends on the ability of the application to handle existing resources. + +## Issues on Specific Versions + +**Note:** The following is an incomplete list of high-severity issues affecting specific Rancher/webhook versions. In most cases, these issues can be resolved by upgrading to a more recent Rancher version. + +### Incompatible Webhook Version on Rollback + +**Note:** This affects rolling back to Rancher v2.7.5 or earlier. + +If you roll back to Rancher v2.7.5 or earlier, you may see webhook versions that are too recent to be compatible with downstream clusters running pre-v2.7.5 version of Rancher. This may cause various incompatibility issues. For example, project members may be unable to create namespaces. In addition, when you roll back to versions before the webhook was installed in downstream clusters, the webhook may remain installed, which can result in similar incompatibility issues. + +To help alleviate these issues, you can run the [adjust-downstream-webhook](https://github.com/rancherlabs/support-tools/tree/master/adjust-downstream-webhook) shell script after roll back. This script selects and installs the proper webhook version (or removes the webhook entirely) for the corresponding Rancher version. + +### Project Members Can't Create Namespaces + +**Note:** This affects Rancher versions `v2.7.2 - v2.7.4` + +Project users who aren't owners may not be able to create namespaces in projects. This issue is caused by Rancher automatically upgrading the webhook to a version compatible with a more recent version of Rancher than the one currently installed. + +To help alleviate these issues, you can run the [adjust-downstream-webhook](https://github.com/rancherlabs/support-tools/tree/master/adjust-downstream-webhook) shell script after roll back. This script selects and installs the proper webhook version (or removes the webhook entirely) for the corresponding Rancher version. diff --git a/versioned_docs/version-2.7/reference-guides/rancher-webhook.md b/versioned_docs/version-2.7/reference-guides/rancher-webhook.md index 41bd42226b33..1d2a17275b47 100644 --- a/versioned_docs/version-2.7/reference-guides/rancher-webhook.md +++ b/versioned_docs/version-2.7/reference-guides/rancher-webhook.md @@ -11,12 +11,47 @@ Rancher-Webhook is an essential component of Rancher that works in conjunction w It integrates with Kubernetes' extensible admission controllers, as described in the [Kubernetes documentation](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/), which allows Rancher-Webhook to inspect specific requests sent to the Kubernetes API server, and add custom, Rancher-specific validation and mutations to the requests that are specific to Rancher. Rancher-Webhook manages the resources to be validated using the `rancher.cattle.io` `ValidatingWebhookConfiguration` and the `rancher.cattle.io` `MutatingWebhookConfiguration`, and will override any manual edits. Rancher deploys Rancher-Webhook as a separate deployment and service in both local and downstream clusters. Rancher manages Rancher-Webhook using Helm. It's important to note that Rancher may override modifications made by users to the Helm release. -## Why do we need it? +Each Rancher version is designed to be compatible with a single version of the webhook. The compatible versions are provided below for convenience. + +**Note:** Rancher manages deployment and upgrade of the webhook. Under most circumstances, no user intervention should be needed to ensure that the webhook version is compatible with the version of Rancher that you are running. + +| Rancher Version | Webhook Version | +|-----------------|:---------------:| +| v2.7.0 | v0.3.0 | +| v2.7.1 | v0.3.0 | +| v2.7.2 | v0.3.2 | +| v2.7.3 | v0.3.3 | +| v2.7.4 | v0.3.4 | +| v2.7.5 | v0.3.5 | +| v2.7.6 | v0.3.5 | + + +## Why Do We Need It? Rancher-Webhook is crucial for Rancher to protect clusters against malicious attacks and enable various features. Rancher relies on the Rancher-Webhook as an integral part of its functionality. Without the webhook, Rancher would not be a complete product. It provides essential protection for Rancher-managed clusters, preventing security vulnerabilities and ensuring the consistency and stability of the cluster. +## What Resources Does the Webhook Validate? + +An in-progress list of the resources that the webhook validates can be found in the [webhook's repo](https://github.com/rancher/webhook/blob/release/v0.4/docs.md). Checks specific to one version can be found by viewing the `docs.md` file associated with a particular tag (note that webhook versions prior to `v0.3.6` won't have this file). + +## Bypassing the Webhook + +Sometimes, it may be necessary to bypass Rancher's webhook validation to perform emergency restore operations, or fix other critical issues. The bypass operation is exhaustive, meaning that no webhook checks will apply when this is used. It is not possible to bypass some checks and have others still apply - they are either all bypassed, or all active. + +:::danger + +Rancher's webhook provides critical security protections. Bypassing the webhook should only be done by administrators in specific scenarios, after all other options have been exhausted. In addition, permission to bypass the webhook should be carefully controlled, and never given to users who are not admins. + +::: + +To bypass the webhook, impersonate both the `rancher-webhook-sudo` service account and the `system:masters` group (both are required): + +```bash +kubectl create -f example.yaml --as=system:serviceaccount:cattle-system:rancher-webhook-sudo --as-group=system:masters +``` + ## Common Issues ### EKS Cluster with Calico CNI @@ -38,3 +73,34 @@ When using a private GKE cluster, errors may occur that prevent the Kubernetes A Internal error occurred: failed calling webhook "rancher.cattle.io.namespaces.create-non-kubesystem": failed to call webhook: Post "https://rancher-webhook.cattle-system.svc:443/v1/webhook/validation/namespaces?timeout=10s": context deadline exceeded ``` This issue occurs because firewall rules restrict communication between the API server and the private cluster. To resolve this communication problem, users must add firewall rules to allow the GKE control plane to communicate with the Rancher-Webhook on port 9443. Please refer to the [GKE documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#add_firewall_rules) for detailed information and steps on updating the firewall rules. + +### Application Fails to Deploy Due to rancher-webhook Blocking Access + +The webhook provides extra validations on [namespaces](https://github.com/rancher/webhook/blob/release/v0.4/docs.md#psa-label-validation). One of these validations ensures that users can only update PSA relevant labels if they have the proper permissions (`updatepsa` for `projects` in `management.cattle.io`). This can result in specific operators, such as Tigera or Trident, failing when they attempt to deploy namespaces with PSA labels. There are several ways to resolve this issue: + +- Configure the application to create a namespace with no PSA labels. If users wish to apply a PSA to these namespaces, they can add them to a project with the desired PSA after configuration. See the [docs on PSS and PSA resources](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/pod-security-standards) for instructions on how. + - This is the preferred option, though not all applications can be configured in this fashion. +- Manually grant the operator permissions to manage PSAs for namespaces. + - This option will introduce security risks, since the operator will now be able to set the PSA for the namespaces it has access to. This could allow the operator to deploy a privileged pod, or effect cluster takeover through other means. +- A user account with the proper permissions can pre-create the namespace with the appropriate configuration. + - This option depends on the ability of the application to handle existing resources. + +## Issues on Specific Versions + +**Note:** The following is an incomplete list of high-severity issues affecting specific Rancher/webhook versions. In most cases, these issues can be resolved by upgrading to a more recent Rancher version. + +### Incompatible Webhook Version on Rollback + +**Note:** This affects rolling back to Rancher v2.7.5 or earlier. + +If you roll back to Rancher v2.7.5 or earlier, you may see webhook versions that are too recent to be compatible with downstream clusters running pre-v2.7.5 version of Rancher. This may cause various incompatibility issues. For example, project members may be unable to create namespaces. In addition, when you roll back to versions before the webhook was installed in downstream clusters, the webhook may remain installed, which can result in similar incompatibility issues. + +To help alleviate these issues, you can run the [adjust-downstream-webhook](https://github.com/rancherlabs/support-tools/tree/master/adjust-downstream-webhook) shell script after roll back. This script selects and installs the proper webhook version (or removes the webhook entirely) for the corresponding Rancher version. + +### Project Members Can't Create Namespaces + +**Note:** This affects Rancher versions `v2.7.2 - v2.7.4` + +Project users who aren't owners may not be able to create namespaces in projects. This issue is caused by Rancher automatically upgrading the webhook to a version compatible with a more recent version of Rancher than the one currently installed. + +To help alleviate these issues, you can run the [adjust-downstream-webhook](https://github.com/rancherlabs/support-tools/tree/master/adjust-downstream-webhook) shell script after roll back. This script selects and installs the proper webhook version (or removes the webhook entirely) for the corresponding Rancher version. From 7ae3799d6a595bb50350b45b0e3518ac38c4a668 Mon Sep 17 00:00:00 2001 From: Andy Pitcher Date: Mon, 11 Sep 2023 17:52:11 -0400 Subject: [PATCH 41/54] rke2 protect-kernel-defaults and checks --- .../rke2-hardening-guide.md | 51 +++++++++++-------- 1 file changed, 31 insertions(+), 20 deletions(-) diff --git a/docs/pages-for-subheaders/rke2-hardening-guide.md b/docs/pages-for-subheaders/rke2-hardening-guide.md index 962462c7f3fd..9bd8f95e4c0b 100644 --- a/docs/pages-for-subheaders/rke2-hardening-guide.md +++ b/docs/pages-for-subheaders/rke2-hardening-guide.md @@ -12,10 +12,13 @@ This hardening guide is intended to be used for RKE2 clusters and is associated | Rancher Version | CIS Benchmark Version | Kubernetes Version | |-----------------|-----------------------|------------------------------| -| Rancher v2.7 | Benchmark v1.23 | Kubernetes v1.23 up to v1.25 | +| Rancher v2.7 | Benchmark v1.23 | Kubernetes v1.23 | +| Rancher v2.7 | Benchmark v1.24 | Kubernetes v1.24 | +| Rancher v2.7 | Benchmark v1.7 | Kubernetes v1.25 up to v1.26 | :::note -At the time of writing, the upstream CIS Kubernetes v1.25 benchmark is not yet available in Rancher. At this time Rancher is using the CIS v1.23 benchmark when scanning Kubernetes v1.25 clusters. Due to that, the CIS checks 5.2.2, 5.2.3, 5.2.5, 5.2.6, 5.2.7 and 5.2.8 might fail. +- In Benchmark v1.24 and later, some check ids might fail due to new file permission requirements (600 instead of 644). Impacted check ids: `1.1.1`, `1.1.3`, `1.1.5`, `1.1.7`, `1.1.13`, `1.1.15`, `1.1.17`, `4.1.3`, `4.1.5` and `4.1.9`. + - In Benchmark v1.7, the `--protect-kernel-defaults` (4.2.6) parameter is not required anymore, and was removed by CIS. ::: For more details on how to evaluate a hardened RKE2 cluster against the official CIS benchmark, refer to the RKE2 self-assessment guides for specific Kubernetes and CIS benchmark versions. @@ -29,6 +32,31 @@ RKE2 passes a number of the Kubernetes CIS controls without modification, as it There are two areas of host-level requirements: kernel parameters and etcd process/directory configuration. These are outlined in this section. +### Ensure `protect-kernel-defaults` is set + + + + +The `protect-kernel-defaults` is no longer required since CIS benchmark 1.7. + + + + +This is a kubelet flag that will cause the kubelet to exit if the required kernel parameters are unset or are set to values that are different from the kubelet's defaults. + +The `protect-kernel-defaults` flag can be set in the cluster configuration in Rancher. + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + protect-kernel-defaults: true +``` + + + + ### Set kernel parameters The following `sysctl` configuration is recommended for all nodes type in the cluster. Set the following parameters in `/etc/sysctl.d/90-kubelet.conf`: @@ -64,22 +92,6 @@ sudo useradd -r -c "etcd user" -s /sbin/nologin -M etcd -U The runtime requirements to pass the CIS Benchmark are centered around pod security, network policies and kernel parameters. Most of this is automatically handled by RKE2 when using a valid `cis-1.xx` profile, but some additional operator intervention is required. These are outlined in this section. -### Ensure `protect-kernel-defaults` is set - -This is a kubelet flag that will cause the kubelet to exit if the required kernel parameters are unset or are set to values that are different from the kubelet's defaults. - -Both `protect-kernel-defaults` and `profile` flags can be set in the RKE2 template configuration file. -When the `profile` flag is set, RKE2 will set the flag to `true` if it is unset. - -```yaml -spec: - rkeConfig: - machineSelectorConfig: - - config: - profile: # use cis-1.23 or cis-1.6 - protect-kernel-defaults: true -``` - ### PodSecurity RKE2 always runs with some amount of pod security. @@ -89,7 +101,7 @@ RKE2 always runs with some amount of pod security. On v1.25 and newer, [Pod Security Admissions (PSAs)](https://kubernetes.io/docs/concepts/security/pod-security-admission/) are used for pod security. -Below is the minimum necessary configuration needed for hardening RKE2 to pass CIS v1.23 hardened profile `rke2-cis-1.23-hardened` available in Rancher. +Below is the minimum necessary configuration needed for hardening RKE2 to pass CIS v1.23 hardened profile `rke2-cis-1.7-hardened` available in Rancher. ```yaml spec: @@ -230,7 +242,6 @@ spec: machineSelectorConfig: - config: profile: cis-1.23 - protect-kernel-defaults: true ```
From 35067183ee395f3a883c8140a0a2fb5766ac6770 Mon Sep 17 00:00:00 2001 From: Andy Pitcher Date: Mon, 11 Sep 2023 18:18:07 -0400 Subject: [PATCH 42/54] Apply grammar recommendations: - Remove latest to cis-1.7 version - Improve wording Co-authored-by: Marty Hernandez Avedon --- docs/pages-for-subheaders/rke1-hardening-guide.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/pages-for-subheaders/rke1-hardening-guide.md b/docs/pages-for-subheaders/rke1-hardening-guide.md index 19bc19bc64c9..031bf8f448f8 100644 --- a/docs/pages-for-subheaders/rke1-hardening-guide.md +++ b/docs/pages-for-subheaders/rke1-hardening-guide.md @@ -17,8 +17,8 @@ This hardening guide is intended to be used for RKE clusters and is associated w | Rancher v2.7 | Benchmark v1.7 | Kubernetes v1.25 up to v1.26 | :::note -- Since Benchmark v1.24, check id `4.1.7 Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)` might fail, as /etc/kubernetes/ssl/kube-ca.pem is provisioned in 644 by default. -- Since Benchmark v1.7 (latest), `--protect-kernel-defaults` (check id 4.2.6) parameter is not required anymore, and was replaced. +- In Benchmark v1.24 and later, check id `4.1.7 Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)` might fail, as `/etc/kubernetes/ssl/kube-ca.pem` is set to 644 by default. +- In Benchmark v1.7, the `--protect-kernel-defaults` (`4.2.6`) parameter isn't required anymore, and was removed by CIS. ::: For more details on how to evaluate a hardened RKE cluster against the official CIS benchmark, refer to the RKE self-assessment guides for specific Kubernetes and CIS benchmark versions. From acedf5470d3ddaa76749fe046b6201e41021f538 Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Tue, 12 Sep 2023 09:56:11 -0400 Subject: [PATCH 43/54] #793 RKE2 cluster templates (#822) * #793 RKE2 cluster templates #793 * syncing 2.6 after testing confirmed --- .../manage-clusters/manage-cluster-templates.md | 4 ++-- .../manage-clusters/manage-cluster-templates.md | 4 ++-- .../manage-clusters/manage-cluster-templates.md | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/how-to-guides/new-user-guides/manage-clusters/manage-cluster-templates.md b/docs/how-to-guides/new-user-guides/manage-clusters/manage-cluster-templates.md index 65571412fd5a..0d4b15bc48c5 100644 --- a/docs/how-to-guides/new-user-guides/manage-clusters/manage-cluster-templates.md +++ b/docs/how-to-guides/new-user-guides/manage-clusters/manage-cluster-templates.md @@ -32,8 +32,8 @@ In this section, you'll learn how to add the cluster template to the `local` clu :::note Prerequisites: -- You will need permission to install Helm charts on the `local` Kubernetes cluster that Rancher is installed on. -- In order for the chart to appear in the form for creating new clusters, the chart must have the annotation `catalog.cattle.io/type: cluster-template`. +- You will need permission to install Helm charts on the `local` Rancher cluster. +- To make the chart viewable in the cluster creation form, both the chart and the index.yaml file must have the annotation, `catalog.cattle.io/type: cluster-template`. ::: diff --git a/versioned_docs/version-2.6/how-to-guides/new-user-guides/manage-clusters/manage-cluster-templates.md b/versioned_docs/version-2.6/how-to-guides/new-user-guides/manage-clusters/manage-cluster-templates.md index 538092b37819..6396a81e4348 100644 --- a/versioned_docs/version-2.6/how-to-guides/new-user-guides/manage-clusters/manage-cluster-templates.md +++ b/versioned_docs/version-2.6/how-to-guides/new-user-guides/manage-clusters/manage-cluster-templates.md @@ -32,8 +32,8 @@ In this section, you'll learn how to add the cluster template to the `local` clu :::note Prerequisites: -- You will need permission to install Helm charts on the `local` Kubernetes cluster that Rancher is installed on. -- In order for the chart to appear in the form for creating new clusters, the chart must have the annotation `catalog.cattle.io/type: cluster-template`. +- You will need permission to install Helm charts on the `local` Rancher cluster. +- To make the chart viewable in the cluster creation form, both the chart and the index.yaml file must have the annotation, `catalog.cattle.io/type: cluster-template`. ::: diff --git a/versioned_docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/manage-cluster-templates.md b/versioned_docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/manage-cluster-templates.md index 65571412fd5a..0d4b15bc48c5 100644 --- a/versioned_docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/manage-cluster-templates.md +++ b/versioned_docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/manage-cluster-templates.md @@ -32,8 +32,8 @@ In this section, you'll learn how to add the cluster template to the `local` clu :::note Prerequisites: -- You will need permission to install Helm charts on the `local` Kubernetes cluster that Rancher is installed on. -- In order for the chart to appear in the form for creating new clusters, the chart must have the annotation `catalog.cattle.io/type: cluster-template`. +- You will need permission to install Helm charts on the `local` Rancher cluster. +- To make the chart viewable in the cluster creation form, both the chart and the index.yaml file must have the annotation, `catalog.cattle.io/type: cluster-template`. ::: From 58d3cc3f1785f2194a60ddc9f08f06217436737f Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Wed, 13 Sep 2023 16:23:17 -0400 Subject: [PATCH 44/54] syncing 2.7 rke1 and rke2 hardening guides w /docs (#840) --- .../rke1-hardening-guide.md | 9 ++-- .../rke2-hardening-guide.md | 51 +++++++++++-------- 2 files changed, 36 insertions(+), 24 deletions(-) diff --git a/versioned_docs/version-2.7/pages-for-subheaders/rke1-hardening-guide.md b/versioned_docs/version-2.7/pages-for-subheaders/rke1-hardening-guide.md index 5c5fad27a162..4d6f97e18c77 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/rke1-hardening-guide.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/rke1-hardening-guide.md @@ -16,10 +16,13 @@ This hardening guide is intended to be used for RKE clusters and is associated w | Rancher Version | CIS Benchmark Version | Kubernetes Version | |-----------------|-----------------------|------------------------------| -| Rancher v2.7 | Benchmark v1.23 | Kubernetes v1.23 up to v1.25 | +| Rancher v2.7 | Benchmark v1.23 | Kubernetes v1.23 | +| Rancher v2.7 | Benchmark v1.24 | Kubernetes v1.24 | +| Rancher v2.7 | Benchmark v1.7 | Kubernetes v1.25 up to v1.26 | :::note -At the time of writing, the upstream CIS Kubernetes v1.25 benchmark is not yet available in Rancher. At this time Rancher is using the CIS v1.23 benchmark when scanning Kubernetes v1.25 clusters. Due to that, the CIS checks 5.2.3, 5.2.4, 5.2.5 and 5.2.6 might fail. +- In Benchmark v1.24 and later, check id `4.1.7 Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)` might fail, as `/etc/kubernetes/ssl/kube-ca.pem` is set to 644 by default. +- In Benchmark v1.7, the `--protect-kernel-defaults` (`4.2.6`) parameter isn't required anymore, and was removed by CIS. ::: For more details on how to evaluate a hardened RKE cluster against the official CIS benchmark, refer to the RKE self-assessment guides for specific Kubernetes and CIS benchmark versions. @@ -247,7 +250,6 @@ services: kubelet: extra_args: feature-gates: RotateKubeletServerCertificate=true - protect-kernel-defaults: "true" generate_serving_certificate: true addons: | apiVersion: networking.k8s.io/v1 @@ -444,7 +446,6 @@ rancher_kubernetes_engine_config: kubelet: extra_args: feature-gates: RotateKubeletServerCertificate=true - protect-kernel-defaults: true tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 generate_serving_certificate: true scheduler: diff --git a/versioned_docs/version-2.7/pages-for-subheaders/rke2-hardening-guide.md b/versioned_docs/version-2.7/pages-for-subheaders/rke2-hardening-guide.md index 5f3c8c7697f4..378050c8e45f 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/rke2-hardening-guide.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/rke2-hardening-guide.md @@ -16,10 +16,13 @@ This hardening guide is intended to be used for RKE2 clusters and is associated | Rancher Version | CIS Benchmark Version | Kubernetes Version | |-----------------|-----------------------|------------------------------| -| Rancher v2.7 | Benchmark v1.23 | Kubernetes v1.23 up to v1.25 | +| Rancher v2.7 | Benchmark v1.23 | Kubernetes v1.23 | +| Rancher v2.7 | Benchmark v1.24 | Kubernetes v1.24 | +| Rancher v2.7 | Benchmark v1.7 | Kubernetes v1.25 up to v1.26 | :::note -At the time of writing, the upstream CIS Kubernetes v1.25 benchmark is not yet available in Rancher. At this time Rancher is using the CIS v1.23 benchmark when scanning Kubernetes v1.25 clusters. Due to that, the CIS checks 5.2.2, 5.2.3, 5.2.5, 5.2.6, 5.2.7 and 5.2.8 might fail. +- In Benchmark v1.24 and later, some check ids might fail due to new file permission requirements (600 instead of 644). Impacted check ids: `1.1.1`, `1.1.3`, `1.1.5`, `1.1.7`, `1.1.13`, `1.1.15`, `1.1.17`, `4.1.3`, `4.1.5` and `4.1.9`. + - In Benchmark v1.7, the `--protect-kernel-defaults` (4.2.6) parameter is not required anymore, and was removed by CIS. ::: For more details on how to evaluate a hardened RKE2 cluster against the official CIS benchmark, refer to the RKE2 self-assessment guides for specific Kubernetes and CIS benchmark versions. @@ -33,6 +36,31 @@ RKE2 passes a number of the Kubernetes CIS controls without modification, as it There are two areas of host-level requirements: kernel parameters and etcd process/directory configuration. These are outlined in this section. +### Ensure `protect-kernel-defaults` is set + + + + +The `protect-kernel-defaults` is no longer required since CIS benchmark 1.7. + + + + +This is a kubelet flag that will cause the kubelet to exit if the required kernel parameters are unset or are set to values that are different from the kubelet's defaults. + +The `protect-kernel-defaults` flag can be set in the cluster configuration in Rancher. + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + protect-kernel-defaults: true +``` + + + + ### Set kernel parameters The following `sysctl` configuration is recommended for all nodes type in the cluster. Set the following parameters in `/etc/sysctl.d/90-kubelet.conf`: @@ -68,22 +96,6 @@ sudo useradd -r -c "etcd user" -s /sbin/nologin -M etcd -U The runtime requirements to pass the CIS Benchmark are centered around pod security, network policies and kernel parameters. Most of this is automatically handled by RKE2 when using a valid `cis-1.xx` profile, but some additional operator intervention is required. These are outlined in this section. -### Ensure `protect-kernel-defaults` is set - -This is a kubelet flag that will cause the kubelet to exit if the required kernel parameters are unset or are set to values that are different from the kubelet's defaults. - -Both `protect-kernel-defaults` and `profile` flags can be set in the RKE2 template configuration file. -When the `profile` flag is set, RKE2 will set the flag to `true` if it is unset. - -```yaml -spec: - rkeConfig: - machineSelectorConfig: - - config: - profile: # use cis-1.23 or cis-1.6 - protect-kernel-defaults: true -``` - ### PodSecurity RKE2 always runs with some amount of pod security. @@ -93,7 +105,7 @@ RKE2 always runs with some amount of pod security. On v1.25 and newer, [Pod Security Admissions (PSAs)](https://kubernetes.io/docs/concepts/security/pod-security-admission/) are used for pod security. -Below is the minimum necessary configuration needed for hardening RKE2 to pass CIS v1.23 hardened profile `rke2-cis-1.23-hardened` available in Rancher. +Below is the minimum necessary configuration needed for hardening RKE2 to pass CIS v1.23 hardened profile `rke2-cis-1.7-hardened` available in Rancher. ```yaml spec: @@ -234,7 +246,6 @@ spec: machineSelectorConfig: - config: profile: cis-1.23 - protect-kernel-defaults: true ```
From ef5ab63ddd4e9924c2b67a81e9a31f57a375001b Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Wed, 13 Sep 2023 16:29:07 -0400 Subject: [PATCH 45/54] #686 clarify when values should be raw or manually base64 encoded (#761) * 686 clarify when values should be raw or manually base64 encoded * rm'd section as duplicate of https://ranchermanager.docs.rancher.com/reference-guides/backup-restore-configuration/backup-configuration#example-credentialsecret * UI instructions for finding Backup: Create + link to how-to on secret creation * sync language * Apply suggestions from code review Co-authored-by: Billy Tat --------- Co-authored-by: Billy Tat --- .../backup-configuration.md | 9 ++++++--- .../backup-restore-configuration/examples.md | 13 ------------- 2 files changed, 6 insertions(+), 16 deletions(-) diff --git a/docs/reference-guides/backup-restore-configuration/backup-configuration.md b/docs/reference-guides/backup-restore-configuration/backup-configuration.md index 37ae6f2ab457..a9d1eff259c8 100644 --- a/docs/reference-guides/backup-restore-configuration/backup-configuration.md +++ b/docs/reference-guides/backup-restore-configuration/backup-configuration.md @@ -2,8 +2,9 @@ title: Backup Configuration --- -The Backup Create page lets you configure a schedule, enable encryption and specify the storage location for your backups. +The **Backup: Create** page lets you configure a schedule, enable encryption and specify the storage location for your backups. +You must first [install](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md) the `rancher-backup` operator. After you do so, you can access the **Backup: Create** page: ## Schedule @@ -76,7 +77,7 @@ If you use an S3 backup target, make sure that every cluster has its own bucket The S3 storage location contains the following configuration fields: -1. **Credential Secret** (optional): If you need to use the AWS Access keys Secret keys to access s3 bucket, create a secret with your credentials with keys and the directives `accessKey` and `secretKey`. It can be in any namespace. An example secret is [here.](#example-credentialsecret) This directive is unnecessary if the nodes running your operator are in EC2 and set up with IAM permissions that allow them to access S3, as described in [this section.](#iam-permissions-for-ec2-nodes-to-access-s3) The Credential Secret dropdown lists the secrets in all namespaces. +1. **Credential Secret** (optional): If you need an AWS access key or secret key to access an S3 bucket, [create a secret](../../how-to-guides/new-user-guides/kubernetes-resources-setup/secrets.md) using your credentials, with keys and directives named `accessKey` and `secretKey`. The secret can be in any namespace. An example secret is [here](#example-credentialsecret). This directive is unnecessary if the nodes running your operator are in EC2 and assigned [IAM permissions to access S3](#iam-permissions-for-ec2-nodes-to-access-s3). The **Credential Secret** dropdown lists secrets in all namespaces. 1. **Bucket Name**: The name of the S3 bucket where backup files will be stored. 1. **Region** (optional): The AWS [region](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) where the S3 bucket is located. This field isn't needed for configuring MinIO. 1. **Folder** (optional): The name of the folder in the S3 bucket where backup files will be stored. Nested folders (e.g., `rancher/cluster1`) are not supported. If this field is left empty, the default behavior is to store the backup files in the root folder of the S3 bucket. @@ -85,9 +86,11 @@ The S3 storage location contains the following configuration fields: 1. **Skip TLS Verifications** (optional): Set to true if you are not using TLS. +#### YAML Directive Fields + | YAML Directive Name | Description | Required | | ---------------- | ---------------- | ------------ | -| `credentialSecretName` | If you need to use the AWS Access keys Secret keys to access s3 bucket, create a secret with your credentials with keys and the directives `accessKey` and `secretKey`. It can be in any namespace as long as you provide that namespace in `credentialSecretNamespace`. An example secret is [here.](#example-credentialsecret) This directive is unnecessary if the nodes running your operator are in EC2 and set up with IAM permissions that allow them to access S3, as described in [this section.](#iam-permissions-for-ec2-nodes-to-access-s3) | | +| `credentialSecretName` | If you need an AWS access key or secret key to access an S3 bucket, [create a secret](../../how-to-guides/new-user-guides/kubernetes-resources-setup/secrets.md) using your credentials, with keys and directives named `accessKey` and `secretKey`. The secret can be in any namespace. An example secret is [here](#example-credentialsecret). This directive is unnecessary if the nodes running your operator are in EC2 and assigned [IAM permissions to access S3](#iam-permissions-for-ec2-nodes-to-access-s3). | | | `credentialSecretNamespace` | The namespace of the secret containing the credentials to access S3. This directive is unnecessary if the nodes running your operator are in EC2 and set up with IAM permissions that allow them to access S3, as described in [this section.](#iam-permissions-for-ec2-nodes-to-access-s3) | | | `bucketName` | The name of the S3 bucket where backup files will be stored. | ✓ | | `folder` | The name of the folder in the S3 bucket where backup files will be stored. Nested folders (e.g., `rancher/cluster1`) are not supported. If this field is left empty, the default behavior is to store the backup files in the root folder of the S3 bucket. | | diff --git a/docs/reference-guides/backup-restore-configuration/examples.md b/docs/reference-guides/backup-restore-configuration/examples.md index 99bcfc2190f5..77f79460a64f 100644 --- a/docs/reference-guides/backup-restore-configuration/examples.md +++ b/docs/reference-guides/backup-restore-configuration/examples.md @@ -242,19 +242,6 @@ spec: encryptionConfigSecretName: test-encryptionconfig ``` -## Example Credential Secret for Storing Backups in S3 - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: creds -type: Opaque -data: - accessKey: - secretKey: -``` - ## Example EncryptionConfiguration The snippet below demonstrates two different types of secrets and their relevance with respect to Backup and Restore of custom resources. From 6c332ea0e621c452c0149e02e06d5a8e6db7b4e5 Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Wed, 13 Sep 2023 16:38:01 -0400 Subject: [PATCH 46/54] #795 Update issue templates, add PR template (#833) * added PR template, made all templates consistent with format seen in Release Notes repo template * Apply suggestions from code review Co-authored-by: Sunil Singh * Update .github/pull_request_template.md Co-authored-by: Billy Tat * Update request-an-update.md - spacing + connected > related * Update request-a-new-feature.md - consistent wording * updated PR template with link to readme, removed comment tags from reminder section as readme link wouldn't be clickable otherwise --------- Co-authored-by: Sunil Singh Co-authored-by: Billy Tat --- .../ISSUE_TEMPLATE/request-a-new-feature.md | 18 ++++++++++--- .github/ISSUE_TEMPLATE/request-an-update.md | 13 ++++++--- .github/pull_request_template.md | 27 +++++++++++++++++++ 3 files changed, 51 insertions(+), 7 deletions(-) create mode 100644 .github/pull_request_template.md diff --git a/.github/ISSUE_TEMPLATE/request-a-new-feature.md b/.github/ISSUE_TEMPLATE/request-a-new-feature.md index 23385bdcf31b..902c8387d0f8 100644 --- a/.github/ISSUE_TEMPLATE/request-a-new-feature.md +++ b/.github/ISSUE_TEMPLATE/request-a-new-feature.md @@ -4,13 +4,23 @@ about: For requesting new feature(s) to be added to the docs. title: '' labels: '' assignees: '' - --- -**Summary:** +## Related Issues + + + +## Summary + + +## Details -**Details:** + diff --git a/.github/ISSUE_TEMPLATE/request-an-update.md b/.github/ISSUE_TEMPLATE/request-an-update.md index 1a36aedfe533..7a9b24fd7e58 100644 --- a/.github/ISSUE_TEMPLATE/request-an-update.md +++ b/.github/ISSUE_TEMPLATE/request-an-update.md @@ -4,8 +4,15 @@ about: For fixing docs errors/typos, adding needed/missing information, updating title: '' labels: '' assignees: '' - --- -**Summary:** -Describe the requested update giving as much detail as possible. Please also list page link(s) in the current docs where the update applies. \ No newline at end of file +## Related Issues + + +## Summary + + diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 000000000000..0f835da48dd5 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,27 @@ + + +Fixes #[issue_number] + +## Reminders + +- See the [README](../README.md) for more details on how to work with the Rancher docs. + +- Verify if changes pertain to other versions of Rancher. If they do, finalize the edits on one version of the page, then apply the edits to the other versions. + +- If the pull request is dependent on an upcoming release, make sure to target the release branch instead of `main`. + +## Description + + + +## Comments + + \ No newline at end of file From a7c4d606ee79cfa976664ba421ff0dc798a48ec7 Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Wed, 13 Sep 2023 16:39:08 -0400 Subject: [PATCH 47/54] #795 Update README w navigation and moving/renaming file info (#820) * Added page about moving/renaming docs, new headings for readme * added navigation section * revised README * revised moving-or-renaming-docs * Apply suggestions from code review Co-authored-by: Lucas Saintarbor * Update moving-or-renaming-docs.md - revamping schema example * json > js * typos, extra explanation * json > js * various changes --------- Co-authored-by: Lucas Saintarbor --- README.md | 27 +++++++++++---- moving-or-renaming-docs.md | 67 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+), 6 deletions(-) create mode 100644 moving-or-renaming-docs.md diff --git a/README.md b/README.md index ddcf0d0a4fc0..1812e0d86f75 100644 --- a/README.md +++ b/README.md @@ -1,16 +1,31 @@ -# Edit The Docs +# Contribute to Rancher Docs -To get started, fork and clone the rancher-docs repository. +Welcome to the [Rancher docs](https://ranchermanager.docs.rancher.com/) repository. See the [Rancher software](https://github.com/rancher/rancher) repository if you have questions or requests for the Rancher platform. + +## Make a Suggestion + +You can suggest changes to the Rancher docs in two ways: + +1. [Open an issue](https://github.com/rancher/rancher-docs/issues/new/choose). +1. Edit the docs in the way you see fit and open a pull request. + +## Edit the Docs + +To get started, [fork](https://github.com/rancher/rancher-docs/fork) and clone the rancher-docs repository. Our repository doesn't allow you to make changes directly to the `main` branch. Create a working branch and make pull requests from your fork to [rancher/rancher-docs](https://github.com/rancher/rancher-docs). For most updates, you'll need to edit a file in `/docs`, and the corresponding file in `/versioned_docs/version-2.7`. If a change affects older versions, you can find files documenting Rancher v2.0 and later in the `/versioned_docs` directory. -If a file is moved or renamed, you'll also need to edit the `sidebars.js` files for each version, and the list of redirects in `docusaurus.config.js`. +If a file is moved or renamed, you'll also need to edit the `sidebars.js` files for each version, and the list of redirects in `docusaurus.config.js`. See [Moving or Renaming Docs](./moving-or-renaming-docs.md). + +### Navigate the Repo + +The file paths in the repo correspond to the URLs for pages on the docs website. The docs for the latest version of Rancher are located in `/docs`. Most index pages are found within the `/pages-for-subheaders` directory in `/docs`. All images are in `/static/img` in the top level of the repo. Older docs are found within `/versioned_docs` and generally follow the same structure as the files in `/docs`. -## Style & Formatting +### Style & Formatting -The docs are written in [Markdown](https://www.markdownguide.org/getting-started/). We refer to the Microsoft [style guide](https://learn.microsoft.com/en-us/style-guide/welcome/) and generally use standard American English. Many pages are also available in Simplified Chinese. +The docs are written in [Markdown](https://www.markdownguide.org/getting-started/). We refer to the Microsoft [style guide](https://learn.microsoft.com/en-us/style-guide/welcome/) and use standard American English. Many pages are also available in Simplified Chinese. Every docs page contain metadata in the first few lines: @@ -22,7 +37,7 @@ title: Some Title The `title` is rendered as the page's headline. The site renderer wraps the `title` value in `H1` tags, which are equivalent to `#` in Markdown syntax. This means that all subsequent headers on the page should be second level (`##`) or more. -## Docs Website +## Run the Docs Website The Rancher Docs website is built with [Docusaurus 2](https://docusaurus.io/), a modern static website generator. diff --git a/moving-or-renaming-docs.md b/moving-or-renaming-docs.md new file mode 100644 index 000000000000..2f0d1c5b0d1a --- /dev/null +++ b/moving-or-renaming-docs.md @@ -0,0 +1,67 @@ +# Adding, Moving, or Renaming Docs + +Docusaurus generates sidebars based on a JSON file named `sidebars.js`. When you add a new doc, you need to add an entry to `sidebars.js`. Otherwise, the page won't appear in the list of sidebar topics. When you move or re-title a doc, you need to update `sidebars.js` to reflect the change to the doc's location or title. If you alter a docs file's path, by moving the file or editing the file's name, you'll also need to add a redirect to the new path in `docusaurus.config.js`. + +> **Note:** Avoid adding filenames that contain periods before the file extension (example: `rke2-self-assessment-guide-with-cis-v1.23-k8s-v1.25.md`). If necessary, use dashes instead of periods (`rke2-self-assessment-guide-with-cis-v1-23-k8s-v1-25.md`). + +## Sidebars + +The `sidebars.js` file for the latest version of Rancher is located in the top level of the repo. Versioned docs each have their own versioned sidebar, found within `/versioned_sidebars` in the top level of the repo. + +The schema for `sidebars.js` looks like this: + +```JS +sidebar: [ + "toplevel", + { + type: "category", + label: "Second Level", + items: [ + "second-level/overview", + { + type: "category", + label: "Topic One", + link: { + type: "doc", + id: "pages-for-subheaders/index-page-for-topic-one" + } + items: [ + "second-level/topic-one/page-a", + "second-level/topic-one/page-b", + ] + } + ] + } +] +``` + +Paths for docs files are listed within an `items` array. Their position within the file is similar to how they'll be organized within the published sidebar. + +If the doc is as an index page for a topic with numerous subtopic pages, its entry in `sidebars.js` should have extra metadata, such as `category`, `link`, and `label`. + +### Moving Index Pages + +Some entries in the published sidebar are clickable dropdown menus. When a reader clicks them, they reveal a list of subtopics. These dropdown menu entries are indicated by `type: category` in the sidebar file. When you select the entry on the published docs site, the menu opens and you will navigate to the page indicated in `link.id`. + +Docusaurus uses the `label` field to generate the text that appears on the dropdown menu in the sidebar. When you rename these index pages, you also need to update their `label` in the sidebar file. + +### Redirecting Pages + +When you move a page, update redirects in the `@docusaurus/plugin-client-redirects` field within the `docusaurus.config.js` file. This file is located in the top level of the repo. + +The schema for docs redirects looks like this: + +```JS + { + to: '/faq/general-faq', + from: '/faq' + } + { + to: '/v2.6/faq/general-faq', + from: '/v2.6/faq' + }, +``` + +Docusaurus redirects don't accept wildcards, so each path must be exact. This means that you must add individual redirects for each version of a doc. + +Docusaurus also can't redirect pages whose filenames contain a period before the extension. You'll need to manually update any docset links to those pages. From f6a70125b123d83aea5a546ebdf561ec77253bb7 Mon Sep 17 00:00:00 2001 From: Tom Lebreux Date: Wed, 13 Sep 2023 18:02:02 -0400 Subject: [PATCH 48/54] Convert all helm-template instructions to helm-install (#828) * Adjust command to install cert-manager The directory `cert-manager` is not created when using `helm install` instead of `helm template --output-dir .`. Also, the CRDs are downloaded in the current directory, so adjust the commands as well. * Convert helm template to helm upgrade/install Since we're using Helm hooks, we cannot use `helm template`, otherwise things will break. The commands are updated to use `helm install` and `helm upgrade`. --- .../air-gapped-upgrades.md | 25 +++++-------------- .../install-rancher-ha.md | 4 +-- .../enable-experimental-features.md | 11 +++----- .../air-gapped-upgrades.md | 25 +++++-------------- .../install-rancher-ha.md | 4 +-- .../enable-experimental-features.md | 11 +++----- .../air-gapped-upgrades.md | 25 +++++-------------- .../install-rancher-ha.md | 4 +-- .../enable-experimental-features.md | 11 +++----- 9 files changed, 36 insertions(+), 84 deletions(-) diff --git a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md index 5a9aed244877..7b01fb3aac23 100644 --- a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md +++ b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md @@ -1,5 +1,5 @@ --- -title: Rendering the Helm Template in an Air-Gapped Environment +title: Upgrading in an Air-Gapped Environment --- @@ -12,9 +12,9 @@ These instructions assume you have already followed the instructions for a Kuber ::: -### Rancher Helm Template Options +### Rancher Helm Upgrade Options -Render the Rancher template using the same chosen options that were used when installing Rancher. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. +To upgrade with Helm, apply the same options that you used when installing Rancher. Refer to the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. Based on the choice you made during installation, complete one of the procedures below. @@ -29,8 +29,7 @@ Placeholder | Description ### Option A: Default Self-signed Certificate ``` -helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated +helm upgrade rancher ./rancher-.tgz \ --namespace cattle-system \ --set hostname= \ --set certmanager.version= \ @@ -42,8 +41,7 @@ helm template rancher ./rancher-.tgz --output-dir . \ ### Option B: Certificates from Files using Kubernetes Secrets ```plain -helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated +helm upgrade rancher ./rancher-.tgz \ --namespace cattle-system \ --set hostname= \ --set rancherImage=/rancher/rancher \ @@ -55,8 +53,7 @@ helm template rancher ./rancher-.tgz --output-dir . \ If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: ```plain -helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated +helm upgrade rancher ./rancher-.tgz \ --namespace cattle-system \ --set hostname= \ --set rancherImage=/rancher/rancher \ @@ -66,16 +63,6 @@ helm template rancher ./rancher-.tgz --output-dir . \ --set useBundledSystemChart=true # Use the packaged Rancher system charts ``` -### Apply the Rendered Templates - -Copy the rendered manifest directories to a system with access to the Rancher server cluster and apply the rendered templates. - -Use `kubectl` to apply the rendered manifests. - -```plain -kubectl -n cattle-system apply -R -f ./rancher -``` - ## Verify the Upgrade Log into Rancher to confirm that the upgrade succeeded. diff --git a/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md index abf4a386d146..d09b5eca604c 100644 --- a/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md +++ b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md @@ -122,7 +122,7 @@ Copy the fetched charts to a system that has access to the Rancher server cluste ##### 1. Install Cert-Manager -Install cert-manager with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. +Install cert-manager with the same options you would use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. :::note @@ -144,7 +144,7 @@ If you are using self-signed certificates, install cert-manager: 2. Create the cert-manager CustomResourceDefinitions (CRDs). ```plain - kubectl apply -f cert-manager/cert-manager-crd.yaml + kubectl apply -f cert-manager-crd.yaml ``` 3. Install cert-manager. diff --git a/docs/pages-for-subheaders/enable-experimental-features.md b/docs/pages-for-subheaders/enable-experimental-features.md index 40c722001f17..0e5ad8636085 100644 --- a/docs/pages-for-subheaders/enable-experimental-features.md +++ b/docs/pages-for-subheaders/enable-experimental-features.md @@ -55,17 +55,14 @@ If you are installing an alpha version, Helm requires adding the `--devel` optio ::: -### Rendering the Helm Chart for Air Gap Installations +### Enabling Features for Air Gap Installs -For an air gap installation of Rancher, you need to add a Helm chart repository and render a Helm template before installing Rancher with Helm. For details, refer to the [air gap installation documentation.](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md) +To perform an [air gap installation of Rancher](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md), add a Helm chart repository and download a Helm chart, then install Rancher with Helm. -Here is an example of a command for passing in the feature flag names when rendering the Helm template. In the below example, two features are enabled by passing the feature flag names in a comma separated list. - -The Helm command is as follows: +When you install the Helm chart, you should pass in feature flag names in a comma separated list, as in the following example: ``` -helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated +helm install rancher ./rancher-.tgz \ --namespace cattle-system \ --set hostname= \ --set rancherImage=/rancher/rancher \ diff --git a/versioned_docs/version-2.6/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md b/versioned_docs/version-2.6/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md index 9ac190372a30..a02bf280cd17 100644 --- a/versioned_docs/version-2.6/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md +++ b/versioned_docs/version-2.6/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md @@ -1,5 +1,5 @@ --- -title: Rendering the Helm Template in an Air-Gapped Environment +title: Upgrading in an Air-Gapped Environment --- @@ -12,9 +12,9 @@ These instructions assume you have already followed the instructions for a Kuber ::: -### Rancher Helm Template Options +### Rancher Helm Upgrade Options -Render the Rancher template using the same chosen options that were used when installing Rancher. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. +To upgrade with Helm, apply the same options that you used when installing Rancher. Refer to the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. Based on the choice you made during installation, complete one of the procedures below. @@ -29,8 +29,7 @@ Placeholder | Description ### Option A: Default Self-signed Certificate ``` -helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated +helm upgrade rancher ./rancher-.tgz \ --namespace cattle-system \ --set hostname= \ --set certmanager.version= \ @@ -42,8 +41,7 @@ helm template rancher ./rancher-.tgz --output-dir . \ ### Option B: Certificates from Files using Kubernetes Secrets ```plain -helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated +helm upgrade rancher ./rancher-.tgz \ --namespace cattle-system \ --set hostname= \ --set rancherImage=/rancher/rancher \ @@ -55,8 +53,7 @@ helm template rancher ./rancher-.tgz --output-dir . \ If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: ```plain -helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated +helm upgrade rancher ./rancher-.tgz \ --namespace cattle-system \ --set hostname= \ --set rancherImage=/rancher/rancher \ @@ -66,16 +63,6 @@ helm template rancher ./rancher-.tgz --output-dir . \ --set useBundledSystemChart=true # Use the packaged Rancher system charts ``` -### Apply the Rendered Templates - -Copy the rendered manifest directories to a system with access to the Rancher server cluster and apply the rendered templates. - -Use `kubectl` to apply the rendered manifests. - -```plain -kubectl -n cattle-system apply -R -f ./rancher -``` - ## Verify the Upgrade Log into Rancher to confirm that the upgrade succeeded. diff --git a/versioned_docs/version-2.6/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md b/versioned_docs/version-2.6/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md index bbb4f55383bb..bbe3d40328dc 100644 --- a/versioned_docs/version-2.6/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md +++ b/versioned_docs/version-2.6/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md @@ -117,7 +117,7 @@ Copy the fetched charts to a system that has access to the Rancher server cluste ##### 1. Install Cert-Manager -Install cert-manager with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. +Install cert-manager with the same options you would use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. :::note @@ -139,7 +139,7 @@ If you are using self-signed certificates, install cert-manager: 2. Create the cert-manager CustomResourceDefinitions (CRDs). ```plain - kubectl apply -f cert-manager/cert-manager-crd.yaml + kubectl apply -f cert-manager-crd.yaml ``` 3. Install cert-manager. diff --git a/versioned_docs/version-2.6/pages-for-subheaders/enable-experimental-features.md b/versioned_docs/version-2.6/pages-for-subheaders/enable-experimental-features.md index 4e9cd6a601a1..30d1d178b2e8 100644 --- a/versioned_docs/version-2.6/pages-for-subheaders/enable-experimental-features.md +++ b/versioned_docs/version-2.6/pages-for-subheaders/enable-experimental-features.md @@ -53,17 +53,14 @@ If you are installing an alpha version, Helm requires adding the `--devel` optio ::: -### Rendering the Helm Chart for Air Gap Installations +### Enabling Features for Air Gap Installs -For an air gap installation of Rancher, you need to add a Helm chart repository and render a Helm template before installing Rancher with Helm. For details, refer to the [air gap installation documentation.](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md) +To perform an [air gap installation of Rancher](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md), add a Helm chart repository and download a Helm chart, then install Rancher with Helm. -Here is an example of a command for passing in the feature flag names when rendering the Helm template. In the below example, two features are enabled by passing the feature flag names in a comma separated list. - -The Helm command is as follows: +When you install the Helm chart, you should pass in feature flag names in a comma separated list, as in the following example: ``` -helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated +helm install rancher ./rancher-.tgz \ --namespace cattle-system \ --set hostname= \ --set rancherImage=/rancher/rancher \ diff --git a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md index 5a9aed244877..7b01fb3aac23 100644 --- a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md +++ b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md @@ -1,5 +1,5 @@ --- -title: Rendering the Helm Template in an Air-Gapped Environment +title: Upgrading in an Air-Gapped Environment --- @@ -12,9 +12,9 @@ These instructions assume you have already followed the instructions for a Kuber ::: -### Rancher Helm Template Options +### Rancher Helm Upgrade Options -Render the Rancher template using the same chosen options that were used when installing Rancher. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. +To upgrade with Helm, apply the same options that you used when installing Rancher. Refer to the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. Based on the choice you made during installation, complete one of the procedures below. @@ -29,8 +29,7 @@ Placeholder | Description ### Option A: Default Self-signed Certificate ``` -helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated +helm upgrade rancher ./rancher-.tgz \ --namespace cattle-system \ --set hostname= \ --set certmanager.version= \ @@ -42,8 +41,7 @@ helm template rancher ./rancher-.tgz --output-dir . \ ### Option B: Certificates from Files using Kubernetes Secrets ```plain -helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated +helm upgrade rancher ./rancher-.tgz \ --namespace cattle-system \ --set hostname= \ --set rancherImage=/rancher/rancher \ @@ -55,8 +53,7 @@ helm template rancher ./rancher-.tgz --output-dir . \ If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: ```plain -helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated +helm upgrade rancher ./rancher-.tgz \ --namespace cattle-system \ --set hostname= \ --set rancherImage=/rancher/rancher \ @@ -66,16 +63,6 @@ helm template rancher ./rancher-.tgz --output-dir . \ --set useBundledSystemChart=true # Use the packaged Rancher system charts ``` -### Apply the Rendered Templates - -Copy the rendered manifest directories to a system with access to the Rancher server cluster and apply the rendered templates. - -Use `kubectl` to apply the rendered manifests. - -```plain -kubectl -n cattle-system apply -R -f ./rancher -``` - ## Verify the Upgrade Log into Rancher to confirm that the upgrade succeeded. diff --git a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md index abf4a386d146..d09b5eca604c 100644 --- a/versioned_docs/version-2.7/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md +++ b/versioned_docs/version-2.7/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md @@ -122,7 +122,7 @@ Copy the fetched charts to a system that has access to the Rancher server cluste ##### 1. Install Cert-Manager -Install cert-manager with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. +Install cert-manager with the same options you would use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. :::note @@ -144,7 +144,7 @@ If you are using self-signed certificates, install cert-manager: 2. Create the cert-manager CustomResourceDefinitions (CRDs). ```plain - kubectl apply -f cert-manager/cert-manager-crd.yaml + kubectl apply -f cert-manager-crd.yaml ``` 3. Install cert-manager. diff --git a/versioned_docs/version-2.7/pages-for-subheaders/enable-experimental-features.md b/versioned_docs/version-2.7/pages-for-subheaders/enable-experimental-features.md index 40c722001f17..0e5ad8636085 100644 --- a/versioned_docs/version-2.7/pages-for-subheaders/enable-experimental-features.md +++ b/versioned_docs/version-2.7/pages-for-subheaders/enable-experimental-features.md @@ -55,17 +55,14 @@ If you are installing an alpha version, Helm requires adding the `--devel` optio ::: -### Rendering the Helm Chart for Air Gap Installations +### Enabling Features for Air Gap Installs -For an air gap installation of Rancher, you need to add a Helm chart repository and render a Helm template before installing Rancher with Helm. For details, refer to the [air gap installation documentation.](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md) +To perform an [air gap installation of Rancher](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md), add a Helm chart repository and download a Helm chart, then install Rancher with Helm. -Here is an example of a command for passing in the feature flag names when rendering the Helm template. In the below example, two features are enabled by passing the feature flag names in a comma separated list. - -The Helm command is as follows: +When you install the Helm chart, you should pass in feature flag names in a comma separated list, as in the following example: ``` -helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated +helm install rancher ./rancher-.tgz \ --namespace cattle-system \ --set hostname= \ --set rancherImage=/rancher/rancher \ From bcfd5b25aa8a74f6f719949393344f32e4c322e0 Mon Sep 17 00:00:00 2001 From: Jiaqi Luo <6218999+jiaqiluo@users.noreply.github.com> Date: Wed, 13 Sep 2023 16:05:50 -0700 Subject: [PATCH 49/54] Add the instruction for enable API audit log in downstream clusters (#824) --- ...le-api-audit-log-in-downstream-clusters.md | 142 +++++++ .../k3s-cluster-configuration.md | 353 ++++++++++++++++-- .../rke2-cluster-configuration.md | 232 ++++++++---- sidebars.js | 1 + ...le-api-audit-log-in-downstream-clusters.md | 142 +++++++ .../k3s-cluster-configuration.md | 353 ++++++++++++++++-- .../rke2-cluster-configuration.md | 232 ++++++++---- versioned_sidebars/version-2.7-sidebars.json | 1 + 8 files changed, 1250 insertions(+), 206 deletions(-) create mode 100644 docs/how-to-guides/advanced-user-guides/enable-api-audit-log-in-downstream-clusters.md create mode 100644 versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-api-audit-log-in-downstream-clusters.md diff --git a/docs/how-to-guides/advanced-user-guides/enable-api-audit-log-in-downstream-clusters.md b/docs/how-to-guides/advanced-user-guides/enable-api-audit-log-in-downstream-clusters.md new file mode 100644 index 000000000000..ab18d0a7b391 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/enable-api-audit-log-in-downstream-clusters.md @@ -0,0 +1,142 @@ +--- +title: Enabling the API Audit Log in Downstream Clusters +--- + + + + + +Kubernetes auditing provides a security-relevant chronological set of records about a cluster. Kube-apiserver performs auditing. Requests generate an event at each stage of its execution, which is then preprocessed according to a certain policy and written to a backend. The policy determines what’s recorded and the backend persists the records. + +You might want to configure the audit log as part of compliance with the Center for Internet Security (CIS) Kubernetes Benchmark controls. + +For configuration details, refer to the [official Kubernetes documentation](https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/). + + + + + +:::note + +This feature is available in Rancher v2.7.2 and above. + +::: + +As a prerequisite, you need to create a secret or configmap which will be the source of the audit policy. + +The secret or configmap must meet the following two requirements: + +1. It must be in the `fleet-default` namespace where the Cluster object exists. +2. It must have the annotation `rke.cattle.io/object-authorized-for-clusters: cluster-name1,cluster-name2` which permits the target clusters to use it. + +:::tip + +Rancher Dashboard provides an easy-to-use form for creating the secret or configmap. + +::: + +Example: + +```yaml +apiVersion: v1 +data: + audit-policy: >- + IyBMb2cgYWxsIHJlcXVlc3RzIGF0IHRoZSBNZXRhZGF0YSBsZXZlbC4KYXBpVmVyc2lvbjogYXVkaXQuazhzLmlvL3YxCmtpbmQ6IFBvbGljeQpydWxlczoKLSBsZXZlbDogTWV0YWRhdGE= +kind: Secret +metadata: + annotations: + rke.cattle.io/object-authorized-for-clusters: cluster1 + name: name1 + namespace: fleet-default +``` + +The audit log can be enabled and configured by editing the cluster in YAML and utilizing the `machineSelectorFiles` and `machineGlobalConfig` directives. + +Example: + +```yaml +apiVersion: provisioning.cattle.io/v1 +kind: Cluster +spec: + rkeConfig: + machineGlobalConfig: + kube-apiserver-arg: + - audit-policy-file=/dev-audit-policy.yaml + - audit-log-path=/dev-audit.logs + machineSelectorFiles: + - fileSources: + - configMap: + name: '' + secret: + items: + - key: audit-policy + path: /dev-audit-policy.yaml + name: dev-audit-policy + machineLabelSelector: + matchLabels: + rke.cattle.io/control-plane-role: 'true' +``` + +For more information about cluster configuration, refer to the REK2 or K3s cluster configuration reference pages. + + + + + +The audit log can be enabled and configured by editing the cluster with YAML. + +When the audit log is enabled, RKE1 default values will be applied. + +```yaml +# +# Rancher Config +# +rancher_kubernetes_engine_config: + services: + kube-api: + audit_log: + enabled: true +``` + +You can customize the audit log by using the configuration directive. + +```yaml +# +# Rancher Config +# +rancher_kubernetes_engine_config: + services: + kube-api: + audit_log: + enabled: true + configuration: + max_age: 6 + max_backup: 6 + max_size: 110 + path: /var/log/kube-audit/audit-log.json + format: json + policy: + apiVersion: audit.k8s.io/v1 # This is required. + kind: Policy + omitStages: + - "RequestReceived" + rules: + # Log pod changes at RequestResponse level + - level: RequestResponse + resources: + - group: "" + # Resource "pods" doesn't match requests to any subresource of pods, + # which is consistent with the RBAC policy. + resources: ["pods"] + # Log "pods/log", "pods/status" at Metadata level + - level: Metadata + resources: + - group: "" + resources: ["pods/log", "pods/status"] +``` + +For configuration details, refer to the official [RKE1 documentation](https://rke.docs.rancher.com/config-options/audit-log). + + + + diff --git a/docs/reference-guides/cluster-configuration/rancher-server-configuration/k3s-cluster-configuration.md b/docs/reference-guides/cluster-configuration/rancher-server-configuration/k3s-cluster-configuration.md index 79fadd9816da..ca377342025d 100644 --- a/docs/reference-guides/cluster-configuration/rancher-server-configuration/k3s-cluster-configuration.md +++ b/docs/reference-guides/cluster-configuration/rancher-server-configuration/k3s-cluster-configuration.md @@ -15,94 +15,158 @@ You can configure the Kubernetes options one of two ways: - [Rancher UI](#configuration-options-in-the-rancher-ui): Use the Rancher UI to select options that are commonly customized when setting up a Kubernetes cluster. - [Cluster Config File](#cluster-config-file): Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create a K3s config file. Using a config file allows you to set any of the [options](https://rancher.com/docs/k3s/latest/en/installation/install-options/) available in an K3s installation. +## Editing Clusters in the Rancher UI + +The Rancher UI provides two ways to edit a cluster: +1. With a form. +1. With YAML. + +### Editing Clusters with a Form + +The form covers the most frequently needed options for clusters. + +To edit your cluster, + +1. Click **☰ > Cluster Management**. +1. Go to the cluster you want to configure and click **⋮ > Edit Config**. + +### Editing Clusters in YAML + +For a complete reference of configurable options for K3s clusters in YAML, see the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/install-options/) + +To edit your cluster with YAML: + +1. Click **☰ > Cluster Management**. +1. Go to the cluster you want to configure and click **⋮ > Edit as YAML**. +1. Edit the RKE options under the `rkeConfig` directive. + ## Configuration Options in the Rancher UI -:::tip +### Machine Pool Configuration -Some advanced configuration options are not exposed in the Rancher UI forms, but they can be enabled by editing the K3s cluster configuration file in YAML. For the complete reference of configurable options for K3s clusters in YAML, see the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/install-options/) +This subsection covers generic machine pool configurations. For specific infrastructure provider configurations, refer to the following: -::: +- [Azure](../downstream-cluster-configuration/machine-configuration/azure.md) +- [DigitalOcean](../downstream-cluster-configuration/machine-configuration/digitalocean.md) +- [EC2](../downstream-cluster-configuration/machine-configuration/amazon-ec2.md) + +##### Pool Name + +The name of the machine pool. + +##### Machine Count + +The number of machines in the pool. + +##### Roles + +Option to assign etcd, control plane, and worker roles to nodes. + +#### Advanced -### Basics -#### Kubernetes Version +##### Auto Replace + +The amount of time nodes can be unreachable before they are automatically deleted and replaced. + +##### Drain Before Delete + +Enables draining nodes by evicting all pods before the node is deleted. + +##### Kubernetes Node Labels + +Add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) to nodes to help with organization and object selection. + +For details on label syntax requirements, see the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) + +##### Taints + +Add [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to nodes, to prevent pods from being scheduled to or executed on the nodes, unless the pods have matching tolerations. + +### Cluster Configuration +#### Basics +##### Kubernetes Version The version of Kubernetes installed on your cluster nodes. Rancher packages its own version of Kubernetes based on [hyperkube](https://github.com/rancher/hyperkube). For more detail, see [Upgrading Kubernetes](../../../getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md). -#### Encrypt Secrets +##### Pod Security Admission Configuration Template + +The default [pod security admission configuration template](../../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/psa-config-templates.md) for the cluster. + +##### Encrypt Secrets Option to enable or disable secrets encryption. When enabled, secrets will be encrypted using a AES-CBC key. If disabled, any previously secrets will not be readable until encryption is enabled again. Refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/advanced/#secrets-encryption-config-experimental) for details. -#### Project Network Isolation +##### Project Network Isolation If your network provider allows project network isolation, you can choose whether to enable or disable inter-project communication. -#### SELinux +##### SELinux Option to enable or disable [SELinux](https://rancher.com/docs/k3s/latest/en/advanced/#selinux-support) support. -#### CoreDNS +##### CoreDNS By default, [CoreDNS](https://coredns.io/) is installed as the default DNS provider. If CoreDNS is not installed, an alternate DNS provider must be installed yourself. Refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/networking/#coredns) for details.. -#### Klipper Service LB +##### Klipper Service LB Option to enable or disable the [Klipper](https://github.com/rancher/klipper-lb) service load balancer. Refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/networking/#service-load-balancer) for details. -#### Traefik Ingress +##### Traefik Ingress Option to enable or disable the [Traefik](https://traefik.io/) HTTP reverse proxy and load balancer. For more details and configuration options, see the [K3s documentation](https://rancher.com/docs/k3s/latest/en/networking/#traefik-ingress-controller). -#### Local Storage +##### Local Storage Option to enable or disable [local storage](https://rancher.com/docs/k3s/latest/en/storage/) on the node(s). -#### Metrics Server +##### Metrics Server Option to enable or disable the [metrics server](https://github.com/kubernetes-incubator/metrics-server). If enabled, ensure port 10250 is opened for inbound TCP traffic. -### Add-On Config +#### Add-On Config Additional Kubernetes manifests, managed as a [Add-on](https://kubernetes.io/docs/concepts/cluster-administration/addons/), to apply to the cluster on startup. Refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/helm/#automatically-deploying-manifests-and-helm-charts) for details. -### Agent Environment Vars +#### Agent Environment Vars Option to set environment variables for [K3s agents](https://rancher.com/docs/k3s/latest/en/architecture/). The environment variables can be set using key value pairs. Refer to the [K3 documentation](https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/) for more details. -### etcd +#### etcd -#### Automatic Snapshots +##### Automatic Snapshots Option to enable or disable recurring etcd snapshots. If enabled, users have the option to configure the frequency of snapshots. For details, refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/backup-restore/#creating-snapshots). -#### Metrics +##### Metrics Option to choose whether to expose etcd metrics to the public or only within the cluster. -### Networking +#### Networking -#### Cluster CIDR +##### Cluster CIDR IPv4/IPv6 network CIDRs to use for pod IPs (default: 10.42.0.0/16). -#### Service CIDR +##### Service CIDR IPv4/IPv6 network CIDRs to use for service IPs (default: 10.43.0.0/16). -#### Cluster DNS +##### Cluster DNS IPv4 Cluster IP for coredns service. Should be in your service-cidr range (default: 10.43.0.10). -#### Cluster Domain +##### Cluster Domain Select the domain for the cluster. The default is `cluster.local`. -#### NodePort Service Port Range +##### NodePort Service Port Range Option to change the range of ports that can be used for [NodePort services](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport). The default is `30000-32767`. -#### Truncate Hostnames +##### Truncate Hostnames Option to truncate hostnames to 15 characters or less. You can only set this field during the initial creation of the cluster. You can't enable or disable the 15 character limit after cluster creation. @@ -110,11 +174,11 @@ This setting only affects machine-provisioned clusters. Since custom clusters se Truncating hostnames in a cluster improves compatibility with Windows-based systems. Although Kubernetes allows hostnames up to 63 characters in length, systems that use NetBIOS restrict hostnames to 15 characters or less. -#### TLS Alternate Names +##### TLS Alternate Names Add additional hostnames or IPv4/IPv6 addresses as Subject Alternative Names on the server TLS cert. -#### Authorized Cluster Endpoint +##### Authorized Cluster Endpoint Authorized Cluster Endpoint can be used to directly access the Kubernetes API server, without requiring communication through Rancher. @@ -122,34 +186,249 @@ For more detail on how an authorized cluster endpoint works and why it is used, We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.](../../rancher-manager-architecture/architecture-recommendations.md#architecture-for-an-authorized-cluster-endpoint-ace) -### Registries +#### Registries Select the image repository to pull Rancher images from. For more details and configuration options, see the [K3s documentation](https://rancher.com/docs/k3s/latest/en/installation/private-registry/). -### Upgrade Strategy +#### Upgrade Strategy -#### Controle Plane Concurrency +##### Control Plane Concurrency Select how many nodes can be upgraded at the same time. Can be a fixed number or percentage. -#### Worker Concurrency +##### Worker Concurrency Select how many nodes can be upgraded at the same time. Can be a fixed number or percentage. -#### Drain Nodes (Control Plane) +##### Drain Nodes (Control Plane) Option to remove all pods from the node prior to upgrading. -#### Drain Nodes (Worker Nodes) +##### Drain Nodes (Worker Nodes) Option to remove all pods from the node prior to upgrading. -### Advanced +#### Advanced Option to set kubelet options for different nodes. For available options, refer to the [Kubernetes documentation](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/). -## Cluster Config File +## Cluster Config File Reference + +Editing clusters in YAML allows you to set configurations that are already listed in [Configuration Options in the Rancher UI](#configuration-options-in-the-rancher-ui), as well as set Rancher-specific parameters. + +
+ + Example Cluster Config File Snippet + + +```yaml +apiVersion: provisioning.cattle.io/v1 +kind: Cluster +spec: + cloudCredentialSecretName: cattle-global-data:cc-fllv6 + clusterAgentDeploymentCustomization: {} + fleetAgentDeploymentCustomization: {} + kubernetesVersion: v1.26.7+k3s1 + localClusterAuthEndpoint: {} + rkeConfig: + chartValues: {} + etcd: + snapshotRetention: 5 + snapshotScheduleCron: 0 */5 * * * + machineGlobalConfig: + disable-apiserver: false + disable-cloud-controller: false + disable-controller-manager: false + disable-etcd: false + disable-kube-proxy: false + disable-network-policy: false + disable-scheduler: false + etcd-expose-metrics: false + kube-apiserver-arg: + - audit-policy-file=/etc/rancher/k3s/user-audit-policy.yaml + - audit-log-path=/etc/rancher/k3s/user-audit.logs + profile: null + secrets-encryption: false + machinePools: + - controlPlaneRole: true + etcdRole: true + machineConfigRef: + kind: Amazonec2Config + name: nc-test-pool1-pwl5h + name: pool1 + quantity: 1 + unhealthyNodeTimeout: 0s + workerRole: true + machineSelectorConfig: + - config: + docker: false + protect-kernel-defaults: false + selinux: false + machineSelectorFiles: + - fileSources: + - configMap: + name: '' + secret: + name: audit-policy + items: + - key: audit-policy + path: /etc/rancher/k3s/user-audit-policy.yaml + machineLabelSelector: + matchLabels: + rke.cattle.io/control-plane-role: 'true' + registries: {} + upgradeStrategy: + controlPlaneConcurrency: '1' + controlPlaneDrainOptions: + deleteEmptyDirData: true + disableEviction: false + enabled: false + force: false + gracePeriod: -1 + ignoreDaemonSets: true + ignoreErrors: false + postDrainHooks: null + preDrainHooks: null + skipWaitForDeleteTimeoutSeconds: 0 + timeout: 120 + workerConcurrency: '1' + workerDrainOptions: + deleteEmptyDirData: true + disableEviction: false + enabled: false + force: false + gracePeriod: -1 + ignoreDaemonSets: true + ignoreErrors: false + postDrainHooks: null + preDrainHooks: null + skipWaitForDeleteTimeoutSeconds: 0 + timeout: 120 +``` +
+ +### chartValues + +Specify the values for the system charts installed by K3s. + +Example: + +```yaml +chartValues: + chart-name: + key: value +``` +### machineGlobalConfig + +Specify K3s configurations. Any configuration change made here will apply to every node. The configuration options available in the [standalone version of k3s](https://docs.k3s.io/cli/server) can be applied here. + +Example: + +```yaml +machineGlobalConfig: + etcd-arg: + - key1=value1 + - key2=value2 +``` + +### machineSelectorConfig + +`machineSelectorConfig` is the same as [`machineGlobalConfig`](#machineglobalconfig) except that a [label](#kubernetes-node-labels) selector can be specified with the configuration. The configuration will only be applied to nodes that match the provided label selector. + +Multiple `config` entries are allowed, each specifying their own `machineLabelSelector`. A user can specify `matchExpressions`, `matchLabels`, both, or neither. Omitting the `machineLabelSelector` section of this field has the same effect as putting the config in the `machineGlobalConfig` section. + +Example: + +```yaml +machineSelectorConfig + - config: + config-key: config-value + machineLabelSelector: + matchExpressions: + - key: example-key + operator: string # Valid operators are In, NotIn, Exists and DoesNotExist. + values: + - example-value1 + - example-value2 + matchLabels: + key1: value1 + key2: value2 +``` +### machineSelectorFiles + +:::note + +This feature is available in Rancher v2.7.2 and later. + +::: + +Deliver files to nodes, so that the files can be in place before initiating K3s server or agent processes. +The content of the file is retrieved from either a secret or a configmap. The target nodes are filtered by the `machineLabelSelector`. + +Example : + +```yaml +machineSelectorFiles: + - fileSources: + - secret: + items: + - key: example-key + path: path-to-put-the-file-on-nodes + permissions: 644 (optional) + hash: base64-encoded-hash-of-the-content (optional) + name: example-secret-name + machineLabelSelector: + matchExpressions: + - key: example-key + operator: string # Valid operators are In, NotIn, Exists and DoesNotExist. + values: + - example-value1 + - example-value2 + matchLabels: + key1: value1 + key2: value2 + - fileSources: + - configMap: + items: + - key: example-key + path: path-to-put-the-file-on-nodes + permissions: 644 (optional) + hash: base64-encoded-hash-of-the-content (optional) + name: example-configmap-name + machineLabelSelector: + matchExpressions: + - key: example-key + operator: string # Valid operators are In, NotIn, Exists and DoesNotExist. + values: + - example-value1 + - example-value2 + matchLabels: + key1: value1 + key2: value2 +``` + +The secret or configmap must meet the following requirements: + +1. It must be in the `fleet-default` namespace where the Cluster object exists. +2. It must have the annotation `rke.cattle.io/object-authorized-for-clusters: cluster-name1,cluster-name2`, which permits the target clusters to use it. -Instead of using the Rancher UI forms to choose Kubernetes options for the cluster, advanced users can create an K3s config file. Using a config file allows you to set any of the [options](https://rancher.com/docs/k3s/latest/en/installation/install-options/) available in an K3s installation. +:::tip + +Rancher Dashboard provides an easy-to-use form for creating the secret or configmap. + +::: -To edit an K3s config file directly from the Rancher UI, click **Edit as YAML**. +Example: + +```yaml +apiVersion: v1 +data: + audit-policy: >- + IyBMb2cgYWxsIHJlcXVlc3RzIGF0IHRoZSBNZXRhZGF0YSBsZXZlbC4KYXBpVmVyc2lvbjogYXVkaXQuazhzLmlvL3YxCmtpbmQ6IFBvbGljeQpydWxlczoKLSBsZXZlbDogTWV0YWRhdGE= +kind: Secret +metadata: + annotations: + rke.cattle.io/object-authorized-for-clusters: cluster1 + name: name1 + namespace: fleet-default +``` diff --git a/docs/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md b/docs/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md index c279eccf9786..116b93799474 100644 --- a/docs/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md +++ b/docs/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md @@ -15,18 +15,26 @@ You can configure the Kubernetes options in one of the two following ways: - [Rancher UI](#configuration-options-in-the-rancher-ui): Use the Rancher UI to select options that are commonly customized when setting up a Kubernetes cluster. - [Cluster Config File](#cluster-config-file-reference): Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE2 config file. Using a config file allows you to set many additional [options](https://docs.rke2.io/install/configuration) available for an RKE2 installation. -## Editing Clusters with a Form in the Rancher UI +## Editing Clusters in the Rancher UI + +The Rancher UI provides two ways to edit a cluster: +1. With a form. +1. With YAML. + +### Editing Clusters with a Form + +The form covers the most frequently needed options for clusters. To edit your cluster, -1. In the upper left corner, click **☰ > Cluster Management**. +1. Click **☰ > Cluster Management**. 1. Go to the cluster you want to configure and click **⋮ > Edit Config**. -## Editing Clusters with YAML +### Editing Clusters in YAML -Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE2 config file. Using a config file allows you to set any of the options available in an RKE2 installation by specifying them in YAML. +For a complete reference of configurable options for K3s clusters in YAML, see the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/install-options/) -To edit an RKE2 config file directly from the Rancher UI, +To edit your cluster in YAML: 1. Click **☰ > Cluster Management**. 1. Go to the cluster you want to configure and click **⋮ > Edit as YAML**. @@ -34,62 +42,55 @@ To edit an RKE2 config file directly from the Rancher UI, ## Configuration Options in the Rancher UI -:::tip - -Some advanced configuration options are not exposed in the Rancher UI forms, but they can be enabled by editing the RKE2 cluster configuration file in YAML. For the complete reference of configurable options for RKE2 Kubernetes clusters in YAML, see the [RKE2 documentation.](https://docs.rke2.io/install/configuration) - -::: - -## Machine Pool +### Machine Pool Configuration -This subsection covers the generic machine pool configurations. For infrastructure provider specific, configurations refer to the following pages: +This subsection covers generic machine pool configurations. For specific infrastructure provider configurations, refer to the following: - [Azure](../downstream-cluster-configuration/machine-configuration/azure.md) - [DigitalOcean](../downstream-cluster-configuration/machine-configuration/digitalocean.md) - [EC2](../downstream-cluster-configuration/machine-configuration/amazon-ec2.md) -### Pool Name +##### Pool Name The name of the machine pool. -### Machine Count +##### Machine Count The number of machines in the pool. -### Roles +##### Roles Option to assign etcd, control plane, and worker roles to nodes. -### Advanced +#### Advanced -#### Auto Replace +##### Auto Replace -The duration nodes can be unreachable before they are automatically deleted and replaced. +The amount of time nodes can be unreachable before they are automatically deleted and replaced. -#### Drain Before Delete +##### Drain Before Delete Enables draining nodes by evicting all pods before the node is deleted. -#### Kubernetes Node Labels +##### Kubernetes Node Labels Add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) to nodes to help with organization and object selection. For details on label syntax requirements, see the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) -#### Taints +##### Taints -Add [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to nodes, which can be used to prevent pods from being scheduled to or executed on nodes, unless the pods have matching tolerations. +Add [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to nodes, to prevent pods from being scheduled to or executed on the nodes, unless the pods have matching tolerations. -## Cluster Configuration - -### Basics -#### Kubernetes Version +### Cluster Configuration +#### Basics +##### Kubernetes Version The version of Kubernetes installed on your cluster nodes. Rancher packages its own version of Kubernetes based on [hyperkube](https://github.com/rancher/hyperkube). For more detail, see [Upgrading Kubernetes](../../../getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md). -#### Container Network Provider +##### Container Network Provider The [Network Provider](https://kubernetes.io/docs/concepts/cluster-administration/networking/) that the cluster uses. @@ -110,7 +111,7 @@ Out of the box, Rancher is compatible with the following network providers: For more details on the different networking providers and how to configure them, please view our [RKE2 documentation](https://docs.rke2.io/install/network_options). -##### Dual-stack Networking +###### Dual-stack Networking [Dual-stack](https://docs.rke2.io/install/network_options#dual-stack-configuration) networking is supported for all CNI providers. To configure RKE2 in dual-stack mode, set valid IPv4/IPv6 CIDRs for your [Cluster CIDR](#cluster-cidr) and/or [Service CIDR](#service-cidr). @@ -118,7 +119,7 @@ For more details on the different networking providers and how to configure them When using `cilium` or `multus,cilium` as your container network interface provider, ensure the **Enable IPv6 Support** option is also enabled. -#### Cloud Provider +##### Cloud Provider You can configure a [Kubernetes cloud provider](../../../pages-for-subheaders/set-up-cloud-providers.md). If you want to use dynamically provisioned [volumes and storage](../../../pages-for-subheaders/create-kubernetes-persistent-storage.md) in Kubernetes, typically you must select the specific cloud provider in order to use it. For example, if you want to use Amazon EBS, you would need to select the `aws` cloud provider. @@ -128,89 +129,93 @@ If the cloud provider you want to use is not listed as an option, you will need ::: -#### Default Pod Security Policy +##### Default Pod Security Policy + +The default [pod security policy](../../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md) for the cluster. Please refer to the [RKE2 documentation](https://docs.rke2.io/security/pod_security_policies) on the specifications of each available policy. -Choose the default [pod security policy](../../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md) for the cluster. Please refer to the [RKE2 documentation](https://docs.rke2.io/security/pod_security_policies) on the specifications of each available policy. +##### Pod Security Admission Configuration Template -#### Worker CIS Profile +The default [pod security admission configuration template](../../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/psa-config-templates.md) for the cluster. + +##### Worker CIS Profile Select a [CIS benchmark](../../../pages-for-subheaders/cis-scan-guides.md) to validate the system configuration against. -#### Project Network Isolation +##### Project Network Isolation If your network provider allows project network isolation, you can choose whether to enable or disable inter-project communication. Project network isolation is available if you are using any RKE2 network plugin that supports the enforcement of Kubernetes network policies, such as Canal. -#### CoreDNS +##### CoreDNS By default, [CoreDNS](https://coredns.io/) is installed as the default DNS provider. If CoreDNS is not installed, an alternate DNS provider must be installed yourself. Refer to the [RKE2 documentation](https://docs.rke2.io/networking#coredns) for additional CoreDNS configurations. -#### NGINX Ingress +##### NGINX Ingress If you want to publish your applications in a high-availability configuration, and you're hosting your nodes with a cloud-provider that doesn't have a native load-balancing feature, enable this option to use NGINX Ingress within the cluster. Refer to the [RKE2 documentation](https://docs.rke2.io/networking#nginx-ingress-controller) for additional configuration options. Refer to the [RKE2 documentation](https://docs.rke2.io/networking#nginx-ingress-controller) for additional configuration options. -#### Metrics Server +##### Metrics Server Option to enable or disable [Metrics Server](https://rancher.com/docs/rke/latest/en/config-options/add-ons/metrics-server/). Each cloud provider capable of launching a cluster using RKE2 can collect metrics and monitor for your cluster nodes. Enable this option to view your node metrics from your cloud provider's portal. -### Add-On Config +#### Add-On Config Additional Kubernetes manifests, managed as an [Add-on](https://kubernetes.io/docs/concepts/cluster-administration/addons/), to apply to the cluster on startup. Refer to the [RKE2 documentation](https://docs.rke2.io/helm#automatically-deploying-manifests-and-helm-charts) for details. -### Agent Environment Vars +#### Agent Environment Vars Option to set environment variables for [Rancher agents](../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/about-rancher-agents.md). The environment variables can be set using key value pairs. Refer to the [RKE2 documentation](https://docs.rke2.io/reference/linux_agent_config) for more details. -### etcd +#### etcd -#### Automatic Snapshots +##### Automatic Snapshots Option to enable or disable recurring etcd snapshots. If enabled, users have the option to configure the frequency of snapshots. For details, refer to the [RKE2 documentation](https://docs.rke2.io/backup_restore#creating-snapshots). Note that with RKE2, snapshots are stored on each etcd node. This varies from RKE1 which only stores one snapshot per cluster. -#### Metrics +##### Metrics Option to choose whether to expose etcd metrics to the public or only within the cluster. -### Networking +#### Networking -#### Cluster CIDR +##### Cluster CIDR IPv4 and/or IPv6 network CIDRs to use for pod IPs (default: 10.42.0.0/16). -##### Dual-stack Networking +###### Dual-stack Networking To configure [dual-stack](https://docs.rke2.io/install/network_options#dual-stack-configuration) mode, enter a valid IPv4/IPv6 CIDR. For example `10.42.0.0/16,2001:cafe:42:0::/56`. [Additional configuration](#dual-stack-additional-config) is required when using `cilium` or `multus,cilium` as your [container network](#container-network-provider) interface provider. -#### Service CIDR +##### Service CIDR IPv4/IPv6 network CIDRs to use for service IPs (default: 10.43.0.0/16). -##### Dual-stack Networking +###### Dual-stack Networking To configure [dual-stack](https://docs.rke2.io/install/network_options#dual-stack-configuration) mode, enter a valid IPv4/IPv6 CIDR. For example `10.42.0.0/16,2001:cafe:42:0::/56`. [Additional configuration](#dual-stack-additional-config) is required when using `cilium ` or `multus,cilium` as your [container network](#container-network-provider) interface provider. -#### Cluster DNS +##### Cluster DNS IPv4 Cluster IP for coredns service. Should be in your service-cidr range (default: 10.43.0.10). -#### Cluster Domain +##### Cluster Domain Select the domain for the cluster. The default is `cluster.local`. -#### NodePort Service Port Range +##### NodePort Service Port Range Option to change the range of ports that can be used for [NodePort services](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport). The default is `30000-32767`. -#### Truncate Hostnames +##### Truncate Hostnames Option to truncate hostnames to 15 characters or less. You can only set this field during the initial creation of the cluster. You can't enable or disable the 15 character limit after cluster creation. @@ -218,11 +223,11 @@ This setting only affects machine-provisioned clusters. Since custom clusters se Truncating hostnames in a cluster improves compatibility with Windows-based systems. Although Kubernetes allows hostnames up to 63 characters in length, systems that use NetBIOS restrict hostnames to 15 characters or less. -#### TLS Alternate Names +##### TLS Alternate Names Add additional hostnames or IPv4/IPv6 addresses as Subject Alternative Names on the server TLS cert. -#### Authorized Cluster Endpoint +##### Authorized Cluster Endpoint Authorized Cluster Endpoint can be used to directly access the Kubernetes API server, without requiring communication through Rancher. @@ -232,35 +237,35 @@ For more detail on how an authorized cluster endpoint works and why it is used, We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.](../../rancher-manager-architecture/architecture-recommendations.md#architecture-for-an-authorized-cluster-endpoint-ace) -### Registries +#### Registries Select the image repository to pull Rancher images from. For more details and configuration options, see the [RKE2 documentation](https://docs.rke2.io/install/containerd_registry_configuration). -### Upgrade Strategy +#### Upgrade Strategy -#### Control Plane Concurrency +##### Control Plane Concurrency Select how many nodes can be upgraded at the same time. Can be a fixed number or percentage. -#### Worker Concurrency +##### Worker Concurrency Select how many nodes can be upgraded at the same time. Can be a fixed number or percentage. -#### Drain Nodes (Control Plane) +##### Drain Nodes (Control Plane) Option to remove all pods from the node prior to upgrading. -#### Drain Nodes (Worker Nodes) +##### Drain Nodes (Worker Nodes) Option to remove all pods from the node prior to upgrading. -### Advanced +#### Advanced Option to set kubelet options for different nodes. For available options, refer to the [Kubernetes documentation](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/). ## Cluster Config File Reference -Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create a config file. Using a config file allows you to set the [options available](https://docs.rke2.io/install/configuration) in an RKE2 installation, including those already listed in [Configuration Options in the Rancher UI](#configuration-options-in-the-rancher-ui), as well as Rancher-specific parameters. +Editing clusters in YAML allows you to set the [options available](https://docs.rke2.io/install/configuration) in an RKE2 installation, including those already listed in [Configuration Options in the Rancher UI](#configuration-options-in-the-rancher-ui), as well as set Rancher-specific parameters.
@@ -268,9 +273,11 @@ Instead of using the Rancher UI to choose Kubernetes options for the cluster, ad ```yaml +apiVersion: provisioning.cattle.io/v1 +kind: Cluster spec: cloudCredentialSecretName: cattle-global-data:cc-s879v - kubernetesVersion: v1.23.6+rke2r2 + kubernetesVersion: v1.25.12+rke2r1 localClusterAuthEndpoint: {} rkeConfig: chartValues: @@ -283,6 +290,9 @@ spec: disable-kube-proxy: false etcd-expose-metrics: false profile: null + kube-apiserver-arg: + - audit-policy-file=/etc/rancher/rke2/user-audit-policy.yaml + - audit-log-path=/etc/rancher/rke2/user-audit.logs machinePools: - controlPlaneRole: true etcdRole: true @@ -296,6 +306,18 @@ spec: machineSelectorConfig: - config: protect-kernel-defaults: false + machineSelectorFiles: + - fileSources: + - configMap: + name: '' + secret: + name: audit-policy + items: + - key: audit-policy + path: /etc/rancher/rke2/user-audit-policy.yaml + machineLabelSelector: + matchLabels: + rke.cattle.io/control-plane-role: 'true' registries: {} upgradeStrategy: controlPlaneConcurrency: "1" @@ -317,7 +339,7 @@ spec: ### chartValues -Option to specify the values for the system charts installed by RKE2/k3s. +Specify the values for the system charts installed by RKE2. Example: @@ -328,7 +350,7 @@ chartValues: ``` ### machineGlobalConfig -The RKE2/K3s configurations are nested under the `machineGlobalConfig` directive. Any configuration change made here will apply to every node. The configuration options available in the [standalone version of RKE2](https://docs.rke2.io/reference/server_config) can be applied here. +Specify RKE2 configurations. Any configuration change made here will apply to every node. The configuration options available in the [standalone version of RKE2](https://docs.rke2.io/reference/server_config) can be applied here. Example: @@ -341,9 +363,9 @@ machineGlobalConfig: ### machineSelectorConfig -This is the same as [`machineGlobalConfig`](#machineglobalconfig) except that a [label](#kubernetes-node-labels) selector can be specified with the configuration. The configuration will only be applied to nodes that match the provided label selector. +`machineSelectorConfig` is the same as [`machineGlobalConfig`](#machineglobalconfig) except that a [label](#kubernetes-node-labels) selector can be specified with the configuration. The configuration will only be applied to nodes that match the provided label selector. -Multiple `config` entries are allowed, each specifying their own `machineLabelSelector`. A user can specify `matchExpressions`, `matchLabels`, both, or neither. Omitting the `machineLabelSelector` section of this has the same effect as putting the config in the `machineGlobalConfig` section. +Multiple `config` entries are allowed, each specifying their own `machineLabelSelector`. A user can specify `matchExpressions`, `matchLabels`, both, or neither. Omitting the `machineLabelSelector` section of this field has the same effect as putting the config in the `machineGlobalConfig` section. Example: @@ -362,3 +384,81 @@ machineSelectorConfig key1: value1 key2: value2 ``` +### machineSelectorFiles + +:::note + +This feature is available in Rancher v2.7.2 and later. + +::: + +Deliver files to nodes, so that the files can be in place before initiating RKE2 server or agent processes. +The content of the file is retrieved from either a secret or a configmap. The target nodes are filtered by the `machineLabelSelector`. + +Example : + +```yaml +machineSelectorFiles: + - fileSources: + - secret: + items: + - key: example-key + path: path-to-put-the-file-on-nodes + permissions: 644 (optional) + hash: base64-encoded-hash-of-the-content (optional) + name: example-secret-name + machineLabelSelector: + matchExpressions: + - key: example-key + operator: string # Valid operators are In, NotIn, Exists and DoesNotExist. + values: + - example-value1 + - example-value2 + matchLabels: + key1: value1 + key2: value2 + - fileSources: + - configMap: + items: + - key: example-key + path: path-to-put-the-file-on-nodes + permissions: 644 (optional) + hash: base64-encoded-hash-of-the-content (optional) + name: example-configmap-name + machineLabelSelector: + matchExpressions: + - key: example-key + operator: string # Valid operators are In, NotIn, Exists and DoesNotExist. + values: + - example-value1 + - example-value2 + matchLabels: + key1: value1 + key2: value2 +``` + +The secret or configmap must meet the following requirements: + +1. It must be in the `fleet-default` namespace where the Cluster object exists. +2. It must have the annotation `rke.cattle.io/object-authorized-for-clusters: cluster-name1,cluster-name2`, which permits the target clusters to use it. + +:::tip + +Rancher Dashboard provides an easy-to-use form for creating the secret or configmap. + +::: + +Example: + +```yaml +apiVersion: v1 +data: + audit-policy: >- + IyBMb2cgYWxsIHJlcXVlc3RzIGF0IHRoZSBNZXRhZGF0YSBsZXZlbC4KYXBpVmVyc2lvbjogYXVkaXQuazhzLmlvL3YxCmtpbmQ6IFBvbGljeQpydWxlczoKLSBsZXZlbDogTWV0YWRhdGE= +kind: Secret +metadata: + annotations: + rke.cattle.io/object-authorized-for-clusters: cluster1 + name: name1 + namespace: fleet-default +``` diff --git a/sidebars.js b/sidebars.js index 590e45ab32f8..d9c5f34deb4e 100644 --- a/sidebars.js +++ b/sidebars.js @@ -800,6 +800,7 @@ const sidebars = { "how-to-guides/advanced-user-guides/open-ports-with-firewalld", "how-to-guides/advanced-user-guides/tune-etcd-for-large-installs", "how-to-guides/advanced-user-guides/enable-api-audit-log", + "how-to-guides/advanced-user-guides/enable-api-audit-log-in-downstream-clusters", "how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer", ] } diff --git a/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-api-audit-log-in-downstream-clusters.md b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-api-audit-log-in-downstream-clusters.md new file mode 100644 index 000000000000..ab18d0a7b391 --- /dev/null +++ b/versioned_docs/version-2.7/how-to-guides/advanced-user-guides/enable-api-audit-log-in-downstream-clusters.md @@ -0,0 +1,142 @@ +--- +title: Enabling the API Audit Log in Downstream Clusters +--- + + + + + +Kubernetes auditing provides a security-relevant chronological set of records about a cluster. Kube-apiserver performs auditing. Requests generate an event at each stage of its execution, which is then preprocessed according to a certain policy and written to a backend. The policy determines what’s recorded and the backend persists the records. + +You might want to configure the audit log as part of compliance with the Center for Internet Security (CIS) Kubernetes Benchmark controls. + +For configuration details, refer to the [official Kubernetes documentation](https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/). + + + + + +:::note + +This feature is available in Rancher v2.7.2 and above. + +::: + +As a prerequisite, you need to create a secret or configmap which will be the source of the audit policy. + +The secret or configmap must meet the following two requirements: + +1. It must be in the `fleet-default` namespace where the Cluster object exists. +2. It must have the annotation `rke.cattle.io/object-authorized-for-clusters: cluster-name1,cluster-name2` which permits the target clusters to use it. + +:::tip + +Rancher Dashboard provides an easy-to-use form for creating the secret or configmap. + +::: + +Example: + +```yaml +apiVersion: v1 +data: + audit-policy: >- + IyBMb2cgYWxsIHJlcXVlc3RzIGF0IHRoZSBNZXRhZGF0YSBsZXZlbC4KYXBpVmVyc2lvbjogYXVkaXQuazhzLmlvL3YxCmtpbmQ6IFBvbGljeQpydWxlczoKLSBsZXZlbDogTWV0YWRhdGE= +kind: Secret +metadata: + annotations: + rke.cattle.io/object-authorized-for-clusters: cluster1 + name: name1 + namespace: fleet-default +``` + +The audit log can be enabled and configured by editing the cluster in YAML and utilizing the `machineSelectorFiles` and `machineGlobalConfig` directives. + +Example: + +```yaml +apiVersion: provisioning.cattle.io/v1 +kind: Cluster +spec: + rkeConfig: + machineGlobalConfig: + kube-apiserver-arg: + - audit-policy-file=/dev-audit-policy.yaml + - audit-log-path=/dev-audit.logs + machineSelectorFiles: + - fileSources: + - configMap: + name: '' + secret: + items: + - key: audit-policy + path: /dev-audit-policy.yaml + name: dev-audit-policy + machineLabelSelector: + matchLabels: + rke.cattle.io/control-plane-role: 'true' +``` + +For more information about cluster configuration, refer to the REK2 or K3s cluster configuration reference pages. + + + + + +The audit log can be enabled and configured by editing the cluster with YAML. + +When the audit log is enabled, RKE1 default values will be applied. + +```yaml +# +# Rancher Config +# +rancher_kubernetes_engine_config: + services: + kube-api: + audit_log: + enabled: true +``` + +You can customize the audit log by using the configuration directive. + +```yaml +# +# Rancher Config +# +rancher_kubernetes_engine_config: + services: + kube-api: + audit_log: + enabled: true + configuration: + max_age: 6 + max_backup: 6 + max_size: 110 + path: /var/log/kube-audit/audit-log.json + format: json + policy: + apiVersion: audit.k8s.io/v1 # This is required. + kind: Policy + omitStages: + - "RequestReceived" + rules: + # Log pod changes at RequestResponse level + - level: RequestResponse + resources: + - group: "" + # Resource "pods" doesn't match requests to any subresource of pods, + # which is consistent with the RBAC policy. + resources: ["pods"] + # Log "pods/log", "pods/status" at Metadata level + - level: Metadata + resources: + - group: "" + resources: ["pods/log", "pods/status"] +``` + +For configuration details, refer to the official [RKE1 documentation](https://rke.docs.rancher.com/config-options/audit-log). + + + + diff --git a/versioned_docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/k3s-cluster-configuration.md b/versioned_docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/k3s-cluster-configuration.md index 79fadd9816da..ca377342025d 100644 --- a/versioned_docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/k3s-cluster-configuration.md +++ b/versioned_docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/k3s-cluster-configuration.md @@ -15,94 +15,158 @@ You can configure the Kubernetes options one of two ways: - [Rancher UI](#configuration-options-in-the-rancher-ui): Use the Rancher UI to select options that are commonly customized when setting up a Kubernetes cluster. - [Cluster Config File](#cluster-config-file): Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create a K3s config file. Using a config file allows you to set any of the [options](https://rancher.com/docs/k3s/latest/en/installation/install-options/) available in an K3s installation. +## Editing Clusters in the Rancher UI + +The Rancher UI provides two ways to edit a cluster: +1. With a form. +1. With YAML. + +### Editing Clusters with a Form + +The form covers the most frequently needed options for clusters. + +To edit your cluster, + +1. Click **☰ > Cluster Management**. +1. Go to the cluster you want to configure and click **⋮ > Edit Config**. + +### Editing Clusters in YAML + +For a complete reference of configurable options for K3s clusters in YAML, see the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/install-options/) + +To edit your cluster with YAML: + +1. Click **☰ > Cluster Management**. +1. Go to the cluster you want to configure and click **⋮ > Edit as YAML**. +1. Edit the RKE options under the `rkeConfig` directive. + ## Configuration Options in the Rancher UI -:::tip +### Machine Pool Configuration -Some advanced configuration options are not exposed in the Rancher UI forms, but they can be enabled by editing the K3s cluster configuration file in YAML. For the complete reference of configurable options for K3s clusters in YAML, see the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/install-options/) +This subsection covers generic machine pool configurations. For specific infrastructure provider configurations, refer to the following: -::: +- [Azure](../downstream-cluster-configuration/machine-configuration/azure.md) +- [DigitalOcean](../downstream-cluster-configuration/machine-configuration/digitalocean.md) +- [EC2](../downstream-cluster-configuration/machine-configuration/amazon-ec2.md) + +##### Pool Name + +The name of the machine pool. + +##### Machine Count + +The number of machines in the pool. + +##### Roles + +Option to assign etcd, control plane, and worker roles to nodes. + +#### Advanced -### Basics -#### Kubernetes Version +##### Auto Replace + +The amount of time nodes can be unreachable before they are automatically deleted and replaced. + +##### Drain Before Delete + +Enables draining nodes by evicting all pods before the node is deleted. + +##### Kubernetes Node Labels + +Add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) to nodes to help with organization and object selection. + +For details on label syntax requirements, see the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) + +##### Taints + +Add [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to nodes, to prevent pods from being scheduled to or executed on the nodes, unless the pods have matching tolerations. + +### Cluster Configuration +#### Basics +##### Kubernetes Version The version of Kubernetes installed on your cluster nodes. Rancher packages its own version of Kubernetes based on [hyperkube](https://github.com/rancher/hyperkube). For more detail, see [Upgrading Kubernetes](../../../getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md). -#### Encrypt Secrets +##### Pod Security Admission Configuration Template + +The default [pod security admission configuration template](../../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/psa-config-templates.md) for the cluster. + +##### Encrypt Secrets Option to enable or disable secrets encryption. When enabled, secrets will be encrypted using a AES-CBC key. If disabled, any previously secrets will not be readable until encryption is enabled again. Refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/advanced/#secrets-encryption-config-experimental) for details. -#### Project Network Isolation +##### Project Network Isolation If your network provider allows project network isolation, you can choose whether to enable or disable inter-project communication. -#### SELinux +##### SELinux Option to enable or disable [SELinux](https://rancher.com/docs/k3s/latest/en/advanced/#selinux-support) support. -#### CoreDNS +##### CoreDNS By default, [CoreDNS](https://coredns.io/) is installed as the default DNS provider. If CoreDNS is not installed, an alternate DNS provider must be installed yourself. Refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/networking/#coredns) for details.. -#### Klipper Service LB +##### Klipper Service LB Option to enable or disable the [Klipper](https://github.com/rancher/klipper-lb) service load balancer. Refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/networking/#service-load-balancer) for details. -#### Traefik Ingress +##### Traefik Ingress Option to enable or disable the [Traefik](https://traefik.io/) HTTP reverse proxy and load balancer. For more details and configuration options, see the [K3s documentation](https://rancher.com/docs/k3s/latest/en/networking/#traefik-ingress-controller). -#### Local Storage +##### Local Storage Option to enable or disable [local storage](https://rancher.com/docs/k3s/latest/en/storage/) on the node(s). -#### Metrics Server +##### Metrics Server Option to enable or disable the [metrics server](https://github.com/kubernetes-incubator/metrics-server). If enabled, ensure port 10250 is opened for inbound TCP traffic. -### Add-On Config +#### Add-On Config Additional Kubernetes manifests, managed as a [Add-on](https://kubernetes.io/docs/concepts/cluster-administration/addons/), to apply to the cluster on startup. Refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/helm/#automatically-deploying-manifests-and-helm-charts) for details. -### Agent Environment Vars +#### Agent Environment Vars Option to set environment variables for [K3s agents](https://rancher.com/docs/k3s/latest/en/architecture/). The environment variables can be set using key value pairs. Refer to the [K3 documentation](https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/) for more details. -### etcd +#### etcd -#### Automatic Snapshots +##### Automatic Snapshots Option to enable or disable recurring etcd snapshots. If enabled, users have the option to configure the frequency of snapshots. For details, refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/backup-restore/#creating-snapshots). -#### Metrics +##### Metrics Option to choose whether to expose etcd metrics to the public or only within the cluster. -### Networking +#### Networking -#### Cluster CIDR +##### Cluster CIDR IPv4/IPv6 network CIDRs to use for pod IPs (default: 10.42.0.0/16). -#### Service CIDR +##### Service CIDR IPv4/IPv6 network CIDRs to use for service IPs (default: 10.43.0.0/16). -#### Cluster DNS +##### Cluster DNS IPv4 Cluster IP for coredns service. Should be in your service-cidr range (default: 10.43.0.10). -#### Cluster Domain +##### Cluster Domain Select the domain for the cluster. The default is `cluster.local`. -#### NodePort Service Port Range +##### NodePort Service Port Range Option to change the range of ports that can be used for [NodePort services](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport). The default is `30000-32767`. -#### Truncate Hostnames +##### Truncate Hostnames Option to truncate hostnames to 15 characters or less. You can only set this field during the initial creation of the cluster. You can't enable or disable the 15 character limit after cluster creation. @@ -110,11 +174,11 @@ This setting only affects machine-provisioned clusters. Since custom clusters se Truncating hostnames in a cluster improves compatibility with Windows-based systems. Although Kubernetes allows hostnames up to 63 characters in length, systems that use NetBIOS restrict hostnames to 15 characters or less. -#### TLS Alternate Names +##### TLS Alternate Names Add additional hostnames or IPv4/IPv6 addresses as Subject Alternative Names on the server TLS cert. -#### Authorized Cluster Endpoint +##### Authorized Cluster Endpoint Authorized Cluster Endpoint can be used to directly access the Kubernetes API server, without requiring communication through Rancher. @@ -122,34 +186,249 @@ For more detail on how an authorized cluster endpoint works and why it is used, We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.](../../rancher-manager-architecture/architecture-recommendations.md#architecture-for-an-authorized-cluster-endpoint-ace) -### Registries +#### Registries Select the image repository to pull Rancher images from. For more details and configuration options, see the [K3s documentation](https://rancher.com/docs/k3s/latest/en/installation/private-registry/). -### Upgrade Strategy +#### Upgrade Strategy -#### Controle Plane Concurrency +##### Control Plane Concurrency Select how many nodes can be upgraded at the same time. Can be a fixed number or percentage. -#### Worker Concurrency +##### Worker Concurrency Select how many nodes can be upgraded at the same time. Can be a fixed number or percentage. -#### Drain Nodes (Control Plane) +##### Drain Nodes (Control Plane) Option to remove all pods from the node prior to upgrading. -#### Drain Nodes (Worker Nodes) +##### Drain Nodes (Worker Nodes) Option to remove all pods from the node prior to upgrading. -### Advanced +#### Advanced Option to set kubelet options for different nodes. For available options, refer to the [Kubernetes documentation](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/). -## Cluster Config File +## Cluster Config File Reference + +Editing clusters in YAML allows you to set configurations that are already listed in [Configuration Options in the Rancher UI](#configuration-options-in-the-rancher-ui), as well as set Rancher-specific parameters. + +
+ + Example Cluster Config File Snippet + + +```yaml +apiVersion: provisioning.cattle.io/v1 +kind: Cluster +spec: + cloudCredentialSecretName: cattle-global-data:cc-fllv6 + clusterAgentDeploymentCustomization: {} + fleetAgentDeploymentCustomization: {} + kubernetesVersion: v1.26.7+k3s1 + localClusterAuthEndpoint: {} + rkeConfig: + chartValues: {} + etcd: + snapshotRetention: 5 + snapshotScheduleCron: 0 */5 * * * + machineGlobalConfig: + disable-apiserver: false + disable-cloud-controller: false + disable-controller-manager: false + disable-etcd: false + disable-kube-proxy: false + disable-network-policy: false + disable-scheduler: false + etcd-expose-metrics: false + kube-apiserver-arg: + - audit-policy-file=/etc/rancher/k3s/user-audit-policy.yaml + - audit-log-path=/etc/rancher/k3s/user-audit.logs + profile: null + secrets-encryption: false + machinePools: + - controlPlaneRole: true + etcdRole: true + machineConfigRef: + kind: Amazonec2Config + name: nc-test-pool1-pwl5h + name: pool1 + quantity: 1 + unhealthyNodeTimeout: 0s + workerRole: true + machineSelectorConfig: + - config: + docker: false + protect-kernel-defaults: false + selinux: false + machineSelectorFiles: + - fileSources: + - configMap: + name: '' + secret: + name: audit-policy + items: + - key: audit-policy + path: /etc/rancher/k3s/user-audit-policy.yaml + machineLabelSelector: + matchLabels: + rke.cattle.io/control-plane-role: 'true' + registries: {} + upgradeStrategy: + controlPlaneConcurrency: '1' + controlPlaneDrainOptions: + deleteEmptyDirData: true + disableEviction: false + enabled: false + force: false + gracePeriod: -1 + ignoreDaemonSets: true + ignoreErrors: false + postDrainHooks: null + preDrainHooks: null + skipWaitForDeleteTimeoutSeconds: 0 + timeout: 120 + workerConcurrency: '1' + workerDrainOptions: + deleteEmptyDirData: true + disableEviction: false + enabled: false + force: false + gracePeriod: -1 + ignoreDaemonSets: true + ignoreErrors: false + postDrainHooks: null + preDrainHooks: null + skipWaitForDeleteTimeoutSeconds: 0 + timeout: 120 +``` +
+ +### chartValues + +Specify the values for the system charts installed by K3s. + +Example: + +```yaml +chartValues: + chart-name: + key: value +``` +### machineGlobalConfig + +Specify K3s configurations. Any configuration change made here will apply to every node. The configuration options available in the [standalone version of k3s](https://docs.k3s.io/cli/server) can be applied here. + +Example: + +```yaml +machineGlobalConfig: + etcd-arg: + - key1=value1 + - key2=value2 +``` + +### machineSelectorConfig + +`machineSelectorConfig` is the same as [`machineGlobalConfig`](#machineglobalconfig) except that a [label](#kubernetes-node-labels) selector can be specified with the configuration. The configuration will only be applied to nodes that match the provided label selector. + +Multiple `config` entries are allowed, each specifying their own `machineLabelSelector`. A user can specify `matchExpressions`, `matchLabels`, both, or neither. Omitting the `machineLabelSelector` section of this field has the same effect as putting the config in the `machineGlobalConfig` section. + +Example: + +```yaml +machineSelectorConfig + - config: + config-key: config-value + machineLabelSelector: + matchExpressions: + - key: example-key + operator: string # Valid operators are In, NotIn, Exists and DoesNotExist. + values: + - example-value1 + - example-value2 + matchLabels: + key1: value1 + key2: value2 +``` +### machineSelectorFiles + +:::note + +This feature is available in Rancher v2.7.2 and later. + +::: + +Deliver files to nodes, so that the files can be in place before initiating K3s server or agent processes. +The content of the file is retrieved from either a secret or a configmap. The target nodes are filtered by the `machineLabelSelector`. + +Example : + +```yaml +machineSelectorFiles: + - fileSources: + - secret: + items: + - key: example-key + path: path-to-put-the-file-on-nodes + permissions: 644 (optional) + hash: base64-encoded-hash-of-the-content (optional) + name: example-secret-name + machineLabelSelector: + matchExpressions: + - key: example-key + operator: string # Valid operators are In, NotIn, Exists and DoesNotExist. + values: + - example-value1 + - example-value2 + matchLabels: + key1: value1 + key2: value2 + - fileSources: + - configMap: + items: + - key: example-key + path: path-to-put-the-file-on-nodes + permissions: 644 (optional) + hash: base64-encoded-hash-of-the-content (optional) + name: example-configmap-name + machineLabelSelector: + matchExpressions: + - key: example-key + operator: string # Valid operators are In, NotIn, Exists and DoesNotExist. + values: + - example-value1 + - example-value2 + matchLabels: + key1: value1 + key2: value2 +``` + +The secret or configmap must meet the following requirements: + +1. It must be in the `fleet-default` namespace where the Cluster object exists. +2. It must have the annotation `rke.cattle.io/object-authorized-for-clusters: cluster-name1,cluster-name2`, which permits the target clusters to use it. -Instead of using the Rancher UI forms to choose Kubernetes options for the cluster, advanced users can create an K3s config file. Using a config file allows you to set any of the [options](https://rancher.com/docs/k3s/latest/en/installation/install-options/) available in an K3s installation. +:::tip + +Rancher Dashboard provides an easy-to-use form for creating the secret or configmap. + +::: -To edit an K3s config file directly from the Rancher UI, click **Edit as YAML**. +Example: + +```yaml +apiVersion: v1 +data: + audit-policy: >- + IyBMb2cgYWxsIHJlcXVlc3RzIGF0IHRoZSBNZXRhZGF0YSBsZXZlbC4KYXBpVmVyc2lvbjogYXVkaXQuazhzLmlvL3YxCmtpbmQ6IFBvbGljeQpydWxlczoKLSBsZXZlbDogTWV0YWRhdGE= +kind: Secret +metadata: + annotations: + rke.cattle.io/object-authorized-for-clusters: cluster1 + name: name1 + namespace: fleet-default +``` diff --git a/versioned_docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md b/versioned_docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md index c279eccf9786..402d426b02c3 100644 --- a/versioned_docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md +++ b/versioned_docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md @@ -15,18 +15,26 @@ You can configure the Kubernetes options in one of the two following ways: - [Rancher UI](#configuration-options-in-the-rancher-ui): Use the Rancher UI to select options that are commonly customized when setting up a Kubernetes cluster. - [Cluster Config File](#cluster-config-file-reference): Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE2 config file. Using a config file allows you to set many additional [options](https://docs.rke2.io/install/configuration) available for an RKE2 installation. -## Editing Clusters with a Form in the Rancher UI +## Editing Clusters in the Rancher UI + +The Rancher UI provides two ways to edit a cluster: +1. With a form. +1. With YAML. + +### Editing Clusters with a Form + +The form covers the most frequently needed options for clusters. To edit your cluster, -1. In the upper left corner, click **☰ > Cluster Management**. +1. Click **☰ > Cluster Management**. 1. Go to the cluster you want to configure and click **⋮ > Edit Config**. -## Editing Clusters with YAML +### Editing Clusters in YAML -Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE2 config file. Using a config file allows you to set any of the options available in an RKE2 installation by specifying them in YAML. +For a complete reference of configurable options for K3s clusters in YAML, see the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/install-options/) -To edit an RKE2 config file directly from the Rancher UI, +To edit your cluster in YAML: 1. Click **☰ > Cluster Management**. 1. Go to the cluster you want to configure and click **⋮ > Edit as YAML**. @@ -34,62 +42,55 @@ To edit an RKE2 config file directly from the Rancher UI, ## Configuration Options in the Rancher UI -:::tip - -Some advanced configuration options are not exposed in the Rancher UI forms, but they can be enabled by editing the RKE2 cluster configuration file in YAML. For the complete reference of configurable options for RKE2 Kubernetes clusters in YAML, see the [RKE2 documentation.](https://docs.rke2.io/install/configuration) - -::: - -## Machine Pool +### Machine Pool Configuration -This subsection covers the generic machine pool configurations. For infrastructure provider specific, configurations refer to the following pages: +This subsection covers generic machine pool configurations. For specific infrastructure provider configurations, refer to the following: - [Azure](../downstream-cluster-configuration/machine-configuration/azure.md) - [DigitalOcean](../downstream-cluster-configuration/machine-configuration/digitalocean.md) - [EC2](../downstream-cluster-configuration/machine-configuration/amazon-ec2.md) -### Pool Name +##### Pool Name The name of the machine pool. -### Machine Count +##### Machine Count The number of machines in the pool. -### Roles +##### Roles Option to assign etcd, control plane, and worker roles to nodes. -### Advanced +#### Advanced -#### Auto Replace +##### Auto Replace -The duration nodes can be unreachable before they are automatically deleted and replaced. +The amount of time nodes can be unreachable before they are automatically deleted and replaced. -#### Drain Before Delete +##### Drain Before Delete Enables draining nodes by evicting all pods before the node is deleted. -#### Kubernetes Node Labels +##### Kubernetes Node Labels Add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) to nodes to help with organization and object selection. For details on label syntax requirements, see the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) -#### Taints +##### Taints -Add [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to nodes, which can be used to prevent pods from being scheduled to or executed on nodes, unless the pods have matching tolerations. +Add [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to nodes, to prevent pods from being scheduled to or executed on the nodes, unless the pods have matching tolerations. -## Cluster Configuration - -### Basics -#### Kubernetes Version +### Cluster Configuration +#### Basics +##### Kubernetes Version The version of Kubernetes installed on your cluster nodes. Rancher packages its own version of Kubernetes based on [hyperkube](https://github.com/rancher/hyperkube). For more detail, see [Upgrading Kubernetes](../../../getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md). -#### Container Network Provider +##### Container Network Provider The [Network Provider](https://kubernetes.io/docs/concepts/cluster-administration/networking/) that the cluster uses. @@ -110,7 +111,7 @@ Out of the box, Rancher is compatible with the following network providers: For more details on the different networking providers and how to configure them, please view our [RKE2 documentation](https://docs.rke2.io/install/network_options). -##### Dual-stack Networking +###### Dual-stack Networking [Dual-stack](https://docs.rke2.io/install/network_options#dual-stack-configuration) networking is supported for all CNI providers. To configure RKE2 in dual-stack mode, set valid IPv4/IPv6 CIDRs for your [Cluster CIDR](#cluster-cidr) and/or [Service CIDR](#service-cidr). @@ -118,7 +119,7 @@ For more details on the different networking providers and how to configure them When using `cilium` or `multus,cilium` as your container network interface provider, ensure the **Enable IPv6 Support** option is also enabled. -#### Cloud Provider +##### Cloud Provider You can configure a [Kubernetes cloud provider](../../../pages-for-subheaders/set-up-cloud-providers.md). If you want to use dynamically provisioned [volumes and storage](../../../pages-for-subheaders/create-kubernetes-persistent-storage.md) in Kubernetes, typically you must select the specific cloud provider in order to use it. For example, if you want to use Amazon EBS, you would need to select the `aws` cloud provider. @@ -128,89 +129,93 @@ If the cloud provider you want to use is not listed as an option, you will need ::: -#### Default Pod Security Policy +##### Default Pod Security Policy + +The default [pod security policy](../../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md) for the cluster. Please refer to the [RKE2 documentation](https://docs.rke2.io/security/pod_security_policies) on the specifications of each available policy. -Choose the default [pod security policy](../../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md) for the cluster. Please refer to the [RKE2 documentation](https://docs.rke2.io/security/pod_security_policies) on the specifications of each available policy. +##### Pod Security Admission Configuration Template -#### Worker CIS Profile +The default [pod security admission configuration template](../../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/psa-config-templates.md) for the cluster. + +##### Worker CIS Profile Select a [CIS benchmark](../../../pages-for-subheaders/cis-scan-guides.md) to validate the system configuration against. -#### Project Network Isolation +##### Project Network Isolation If your network provider allows project network isolation, you can choose whether to enable or disable inter-project communication. Project network isolation is available if you are using any RKE2 network plugin that supports the enforcement of Kubernetes network policies, such as Canal. -#### CoreDNS +##### CoreDNS By default, [CoreDNS](https://coredns.io/) is installed as the default DNS provider. If CoreDNS is not installed, an alternate DNS provider must be installed yourself. Refer to the [RKE2 documentation](https://docs.rke2.io/networking#coredns) for additional CoreDNS configurations. -#### NGINX Ingress +##### NGINX Ingress If you want to publish your applications in a high-availability configuration, and you're hosting your nodes with a cloud-provider that doesn't have a native load-balancing feature, enable this option to use NGINX Ingress within the cluster. Refer to the [RKE2 documentation](https://docs.rke2.io/networking#nginx-ingress-controller) for additional configuration options. Refer to the [RKE2 documentation](https://docs.rke2.io/networking#nginx-ingress-controller) for additional configuration options. -#### Metrics Server +##### Metrics Server Option to enable or disable [Metrics Server](https://rancher.com/docs/rke/latest/en/config-options/add-ons/metrics-server/). Each cloud provider capable of launching a cluster using RKE2 can collect metrics and monitor for your cluster nodes. Enable this option to view your node metrics from your cloud provider's portal. -### Add-On Config +#### Add-On Config Additional Kubernetes manifests, managed as an [Add-on](https://kubernetes.io/docs/concepts/cluster-administration/addons/), to apply to the cluster on startup. Refer to the [RKE2 documentation](https://docs.rke2.io/helm#automatically-deploying-manifests-and-helm-charts) for details. -### Agent Environment Vars +#### Agent Environment Vars Option to set environment variables for [Rancher agents](../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/about-rancher-agents.md). The environment variables can be set using key value pairs. Refer to the [RKE2 documentation](https://docs.rke2.io/reference/linux_agent_config) for more details. -### etcd +#### etcd -#### Automatic Snapshots +##### Automatic Snapshots Option to enable or disable recurring etcd snapshots. If enabled, users have the option to configure the frequency of snapshots. For details, refer to the [RKE2 documentation](https://docs.rke2.io/backup_restore#creating-snapshots). Note that with RKE2, snapshots are stored on each etcd node. This varies from RKE1 which only stores one snapshot per cluster. -#### Metrics +##### Metrics Option to choose whether to expose etcd metrics to the public or only within the cluster. -### Networking +#### Networking -#### Cluster CIDR +##### Cluster CIDR IPv4 and/or IPv6 network CIDRs to use for pod IPs (default: 10.42.0.0/16). -##### Dual-stack Networking +###### Dual-stack Networking To configure [dual-stack](https://docs.rke2.io/install/network_options#dual-stack-configuration) mode, enter a valid IPv4/IPv6 CIDR. For example `10.42.0.0/16,2001:cafe:42:0::/56`. [Additional configuration](#dual-stack-additional-config) is required when using `cilium` or `multus,cilium` as your [container network](#container-network-provider) interface provider. -#### Service CIDR +##### Service CIDR IPv4/IPv6 network CIDRs to use for service IPs (default: 10.43.0.0/16). -##### Dual-stack Networking +###### Dual-stack Networking To configure [dual-stack](https://docs.rke2.io/install/network_options#dual-stack-configuration) mode, enter a valid IPv4/IPv6 CIDR. For example `10.42.0.0/16,2001:cafe:42:0::/56`. [Additional configuration](#dual-stack-additional-config) is required when using `cilium ` or `multus,cilium` as your [container network](#container-network-provider) interface provider. -#### Cluster DNS +##### Cluster DNS IPv4 Cluster IP for coredns service. Should be in your service-cidr range (default: 10.43.0.10). -#### Cluster Domain +##### Cluster Domain Select the domain for the cluster. The default is `cluster.local`. -#### NodePort Service Port Range +##### NodePort Service Port Range Option to change the range of ports that can be used for [NodePort services](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport). The default is `30000-32767`. -#### Truncate Hostnames +##### Truncate Hostnames Option to truncate hostnames to 15 characters or less. You can only set this field during the initial creation of the cluster. You can't enable or disable the 15 character limit after cluster creation. @@ -218,11 +223,11 @@ This setting only affects machine-provisioned clusters. Since custom clusters se Truncating hostnames in a cluster improves compatibility with Windows-based systems. Although Kubernetes allows hostnames up to 63 characters in length, systems that use NetBIOS restrict hostnames to 15 characters or less. -#### TLS Alternate Names +##### TLS Alternate Names Add additional hostnames or IPv4/IPv6 addresses as Subject Alternative Names on the server TLS cert. -#### Authorized Cluster Endpoint +##### Authorized Cluster Endpoint Authorized Cluster Endpoint can be used to directly access the Kubernetes API server, without requiring communication through Rancher. @@ -232,35 +237,35 @@ For more detail on how an authorized cluster endpoint works and why it is used, We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.](../../rancher-manager-architecture/architecture-recommendations.md#architecture-for-an-authorized-cluster-endpoint-ace) -### Registries +#### Registries Select the image repository to pull Rancher images from. For more details and configuration options, see the [RKE2 documentation](https://docs.rke2.io/install/containerd_registry_configuration). -### Upgrade Strategy +#### Upgrade Strategy -#### Control Plane Concurrency +##### Control Plane Concurrency Select how many nodes can be upgraded at the same time. Can be a fixed number or percentage. -#### Worker Concurrency +##### Worker Concurrency Select how many nodes can be upgraded at the same time. Can be a fixed number or percentage. -#### Drain Nodes (Control Plane) +##### Drain Nodes (Control Plane) Option to remove all pods from the node prior to upgrading. -#### Drain Nodes (Worker Nodes) +##### Drain Nodes (Worker Nodes) Option to remove all pods from the node prior to upgrading. -### Advanced +#### Advanced Option to set kubelet options for different nodes. For available options, refer to the [Kubernetes documentation](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/). ## Cluster Config File Reference -Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create a config file. Using a config file allows you to set the [options available](https://docs.rke2.io/install/configuration) in an RKE2 installation, including those already listed in [Configuration Options in the Rancher UI](#configuration-options-in-the-rancher-ui), as well as Rancher-specific parameters. +Editing clusters in YAML allows you to set the [options available](https://docs.rke2.io/install/configuration) in an RKE2 installation, including those already listed in [Configuration Options in the Rancher UI](#configuration-options-in-the-rancher-ui), as well as set Rancher-specific parameters.
@@ -268,9 +273,11 @@ Instead of using the Rancher UI to choose Kubernetes options for the cluster, ad ```yaml +apiVersion: provisioning.cattle.io/v1 +kind: Cluster spec: cloudCredentialSecretName: cattle-global-data:cc-s879v - kubernetesVersion: v1.23.6+rke2r2 + kubernetesVersion: v1.25.12+rke2r1 localClusterAuthEndpoint: {} rkeConfig: chartValues: @@ -283,6 +290,9 @@ spec: disable-kube-proxy: false etcd-expose-metrics: false profile: null + kube-apiserver-arg: + - audit-policy-file=/etc/rancher/rke2/user-audit-policy.yaml + - audit-log-path=/etc/rancher/rke2/user-audit.logs machinePools: - controlPlaneRole: true etcdRole: true @@ -296,6 +306,18 @@ spec: machineSelectorConfig: - config: protect-kernel-defaults: false + machineSelectorFiles: + - fileSources: + - configMap: + name: '' + secret: + name: audit-policy + items: + - key: audit-policy + path: /etc/rancher/rke2/user-audit-policy.yaml + machineLabelSelector: + matchLabels: + rke.cattle.io/control-plane-role: 'true' registries: {} upgradeStrategy: controlPlaneConcurrency: "1" @@ -317,7 +339,7 @@ spec: ### chartValues -Option to specify the values for the system charts installed by RKE2/k3s. +Specify the values for the system charts installed by RKE2. Example: @@ -328,7 +350,7 @@ chartValues: ``` ### machineGlobalConfig -The RKE2/K3s configurations are nested under the `machineGlobalConfig` directive. Any configuration change made here will apply to every node. The configuration options available in the [standalone version of RKE2](https://docs.rke2.io/reference/server_config) can be applied here. +Specify RKE2 configurations. Any configuration change made here will apply to every node. The configuration options available in the [standalone version of RKE2](https://docs.rke2.io/reference/server_config) can be applied here. Example: @@ -341,9 +363,9 @@ machineGlobalConfig: ### machineSelectorConfig -This is the same as [`machineGlobalConfig`](#machineglobalconfig) except that a [label](#kubernetes-node-labels) selector can be specified with the configuration. The configuration will only be applied to nodes that match the provided label selector. +`machineSelectorConfig` is the same as [`machineGlobalConfig`](#machineglobalconfig) except that a [label](#kubernetes-node-labels) selector can be specified with the configuration. The configuration will only be applied to nodes that match the provided label selector. -Multiple `config` entries are allowed, each specifying their own `machineLabelSelector`. A user can specify `matchExpressions`, `matchLabels`, both, or neither. Omitting the `machineLabelSelector` section of this has the same effect as putting the config in the `machineGlobalConfig` section. +Multiple `config` entries are allowed, each specifying their own `machineLabelSelector`. A user can specify `matchExpressions`, `matchLabels`, both, or neither. Omitting the `machineLabelSelector` section of this field has the same effect as putting the config in the `machineGlobalConfig` section. Example: @@ -362,3 +384,81 @@ machineSelectorConfig key1: value1 key2: value2 ``` +### machineSelectorFiles + +:::note + +This feature is available in Rancher v2.7.2 and later. + +::: + +Deliver files to nodes, so that the files can be in place before initiating RKE2 server or agent processes. +The content of the file is retrieved from either a secret or a configmap. The target nodes are filtered by the `machineLabelSelector`. + +Example : + +```yaml +machineSelectorFiles: + - fileSources: + - secret: + items: + - key: example-key + path: path-to-put-the-file-on-nodes + permissions: 644 (optional) + hash: base64-encoded-hash-of-the-content (optional) + name: example-secret-name + machineLabelSelector: + matchExpressions: + - key: example-key + operator: string # Valid operators are In, NotIn, Exists and DoesNotExist. + values: + - example-value1 + - example-value2 + matchLabels: + key1: value1 + key2: value2 + - fileSources: + - configMap: + items: + - key: example-key + path: path-to-put-the-file-on-nodes + permissions: 644 (optional) + hash: base64-encoded-hash-of-the-content (optional) + name: example-configmap-name + machineLabelSelector: + matchExpressions: + - key: example-key + operator: string # Valid operators are In, NotIn, Exists and DoesNotExist. + values: + - example-value1 + - example-value2 + matchLabels: + key1: value1 + key2: value2 +``` + +The secret or configmap must meet the following requirements: + +1. It must be in the `fleet-default` namespace where the Cluster object exists. +2. It must have the annotation `rke.cattle.io/object-authorized-for-clusters: cluster-name1,cluster-name2`, which permits the target clusters to use it. + +:::tip + +Rancher Dashboard provides an easy-to-use form for creating the secret or configmap. + +::: + +Example: + +```yaml +apiVersion: v1 +data: + audit-policy: >- + IyBMb2cgYWxsIHJlcXVlc3RzIGF0IHRoZSBNZXRhZGF0YSBsZXZlbC4KYXBpVmVyc2lvbjogYXVkaXQuazhzLmlvL3YxCmtpbmQ6IFBvbGljeQpydWxlczoKLSBsZXZlbDogTWV0YWRhdGE= +kind: Secret +metadata: + annotations: + rke.cattle.io/object-authorized-for-clusters: cluster1 + name: name1 + namespace: fleet-default +``` diff --git a/versioned_sidebars/version-2.7-sidebars.json b/versioned_sidebars/version-2.7-sidebars.json index 5c7eaa9c1b59..3c5434046822 100644 --- a/versioned_sidebars/version-2.7-sidebars.json +++ b/versioned_sidebars/version-2.7-sidebars.json @@ -763,6 +763,7 @@ "how-to-guides/advanced-user-guides/open-ports-with-firewalld", "how-to-guides/advanced-user-guides/tune-etcd-for-large-installs", "how-to-guides/advanced-user-guides/enable-api-audit-log", + "how-to-guides/advanced-user-guides/enable-api-audit-log-in-downstream-clusters", "how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer" ] } From c4d8b628841b4261e7124a3d9ebd0414c396e467 Mon Sep 17 00:00:00 2001 From: felipe-colussi Date: Thu, 14 Sep 2023 09:35:18 -0300 Subject: [PATCH 50/54] add cattle-cluster-agent debug logs --- .../other-troubleshooting-tips/logging.md | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/docs/troubleshooting/other-troubleshooting-tips/logging.md b/docs/troubleshooting/other-troubleshooting-tips/logging.md index d4d6353442e1..22d121e32aed 100644 --- a/docs/troubleshooting/other-troubleshooting-tips/logging.md +++ b/docs/troubleshooting/other-troubleshooting-tips/logging.md @@ -78,3 +78,23 @@ possible to enable debug logs in `rancher-machine` when provisioning RKE1, RKE2 Just like the `trace` log level above, `rancher-machine` debug logs can contain sensitive information. ::: + + +## Cattle-cluster-agent debug logs + +If you need to troubleshoot the initialization of `cattle-cluster-agent` the log levels can be set during the +initialization of the downstream clusters. + +Wile creating a Cluster under `Cluster Configuration -> Agent Environment Vars` some variables can be set to define +the log level. + + +For trace-level `CATTLE_TRACE` or `RANCHER_TRACE` must be set to `true` + +For debug-level `CATTLE_DEBUG` or `RANCHER_DEUBG` must be set to `true` + +:::caution + +Just like the `trace` log level above, `cattle-cluster-agent` debug logs can contain sensitive information. + +::: From 342d8feb5adf761d62623bd180be97a2321bfc25 Mon Sep 17 00:00:00 2001 From: Felipe Colussi-oliva Date: Mon, 18 Sep 2023 13:31:36 -0300 Subject: [PATCH 51/54] Update docs/troubleshooting/other-troubleshooting-tips/logging.md Co-authored-by: Marty Hernandez Avedon --- docs/troubleshooting/other-troubleshooting-tips/logging.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/troubleshooting/other-troubleshooting-tips/logging.md b/docs/troubleshooting/other-troubleshooting-tips/logging.md index 22d121e32aed..df163942adfa 100644 --- a/docs/troubleshooting/other-troubleshooting-tips/logging.md +++ b/docs/troubleshooting/other-troubleshooting-tips/logging.md @@ -95,6 +95,6 @@ For debug-level `CATTLE_DEBUG` or `RANCHER_DEUBG` must be set to `true` :::caution -Just like the `trace` log level above, `cattle-cluster-agent` debug logs can contain sensitive information. +The `cattle-cluster-agent` debug logs may contain sensitive information. ::: From 7cd5e1cad868e84b99e101dbfeecee71a6689c4b Mon Sep 17 00:00:00 2001 From: Felipe Colussi-oliva Date: Mon, 18 Sep 2023 13:31:49 -0300 Subject: [PATCH 52/54] Update docs/troubleshooting/other-troubleshooting-tips/logging.md Co-authored-by: Marty Hernandez Avedon --- docs/troubleshooting/other-troubleshooting-tips/logging.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/troubleshooting/other-troubleshooting-tips/logging.md b/docs/troubleshooting/other-troubleshooting-tips/logging.md index df163942adfa..ebc312b3fed4 100644 --- a/docs/troubleshooting/other-troubleshooting-tips/logging.md +++ b/docs/troubleshooting/other-troubleshooting-tips/logging.md @@ -85,8 +85,7 @@ Just like the `trace` log level above, `rancher-machine` debug logs can contain If you need to troubleshoot the initialization of `cattle-cluster-agent` the log levels can be set during the initialization of the downstream clusters. -Wile creating a Cluster under `Cluster Configuration -> Agent Environment Vars` some variables can be set to define -the log level. +When you create a cluster under **Cluster Configuration > Agent Environment Vars** you can set variables to define the log level. For trace-level `CATTLE_TRACE` or `RANCHER_TRACE` must be set to `true` From c44c0b363cb6f9c970a931a7d016382d7c3c78ae Mon Sep 17 00:00:00 2001 From: Felipe Colussi-oliva Date: Mon, 18 Sep 2023 13:31:56 -0300 Subject: [PATCH 53/54] Update docs/troubleshooting/other-troubleshooting-tips/logging.md Co-authored-by: Marty Hernandez Avedon --- docs/troubleshooting/other-troubleshooting-tips/logging.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/troubleshooting/other-troubleshooting-tips/logging.md b/docs/troubleshooting/other-troubleshooting-tips/logging.md index ebc312b3fed4..c677a2c20705 100644 --- a/docs/troubleshooting/other-troubleshooting-tips/logging.md +++ b/docs/troubleshooting/other-troubleshooting-tips/logging.md @@ -82,8 +82,7 @@ Just like the `trace` log level above, `rancher-machine` debug logs can contain ## Cattle-cluster-agent debug logs -If you need to troubleshoot the initialization of `cattle-cluster-agent` the log levels can be set during the -initialization of the downstream clusters. +The `cattle-cluster-agent` log levels can be set when you initialize the downstream clusters. When you create a cluster under **Cluster Configuration > Agent Environment Vars** you can set variables to define the log level. From 1a9a64ff1eeb0481d03ad03816ecdff2c1340a60 Mon Sep 17 00:00:00 2001 From: Felipe Colussi-oliva Date: Mon, 18 Sep 2023 13:32:12 -0300 Subject: [PATCH 54/54] Update docs/troubleshooting/other-troubleshooting-tips/logging.md Co-authored-by: Marty Hernandez Avedon --- docs/troubleshooting/other-troubleshooting-tips/logging.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/troubleshooting/other-troubleshooting-tips/logging.md b/docs/troubleshooting/other-troubleshooting-tips/logging.md index c677a2c20705..ca7b7783b579 100644 --- a/docs/troubleshooting/other-troubleshooting-tips/logging.md +++ b/docs/troubleshooting/other-troubleshooting-tips/logging.md @@ -87,9 +87,9 @@ The `cattle-cluster-agent` log levels can be set when you initialize the downstr When you create a cluster under **Cluster Configuration > Agent Environment Vars** you can set variables to define the log level. -For trace-level `CATTLE_TRACE` or `RANCHER_TRACE` must be set to `true` +- Trace-level logging: Set `CATTLE_TRACE` or `RANCHER_TRACE` to `true` -For debug-level `CATTLE_DEBUG` or `RANCHER_DEUBG` must be set to `true` +- Debug-level logging: Set `CATTLE_DEBUG` or `RANCHER_DEUBG` to `true` :::caution
v2.7.5DocumentationRelease NotesSupport Matrix
v2.7.4 Documentation