diff --git a/Makefile b/Makefile index a775e60d..4dd90c8d 100644 --- a/Makefile +++ b/Makefile @@ -13,7 +13,7 @@ JSONNETFMT_ARGS ?= --in-place --pad-arrays JSONNET_IMAGE ?= docker.io/bitnami/jsonnet:latest JSONNET_DOCKER ?= $(DOCKER_CMD) $(DOCKER_ARGS) --entrypoint=jsonnetfmt $(JSONNET_IMAGE) -YAML_FILES ?= $(shell find . -type f -not -path './vendor/*' \( -name '*.yaml' -or -name '*.yml' \)) +YAML_FILES ?= $(shell find . -type f -not -regex './\(helmcharts\|manifests\|vendor\)/.*' \( -name '*.yaml' -or -name '*.yml' \)) YAMLLINT_ARGS ?= --no-warnings YAMLLINT_CONFIG ?= .yamllint.yml YAMLLINT_IMAGE ?= docker.io/cytopia/yamllint:latest @@ -22,7 +22,8 @@ YAMLLINT_DOCKER ?= $(DOCKER_CMD) $(DOCKER_ARGS) $(YAMLLINT_IMAGE) VALE_CMD ?= $(DOCKER_CMD) $(DOCKER_ARGS) --volume "$${PWD}"/docs/modules:/pages vshn/vale:2.1.1 VALE_ARGS ?= --minAlertLevel=error --config=/pages/ROOT/pages/.vale.ini /pages -ANTORA_PREVIEW_CMD ?= $(DOCKER_CMD) run --rm --publish 2020:2020 --volume "${PWD}":/antora vshn/antora-preview:2.3.3 --style=syn --antora=docs +ANTORA_PREVIEW_CMD ?= $(DOCKER_CMD) run --rm --publish 35729:35729 --publish 2020:2020 --volume "${PWD}":/preview/antora vshn/antora-preview:2.3.6 --style=syn --antora=docs + .PHONY: all all: lint open diff --git a/README.md b/README.md index f00bfbb1..dbd9d0d6 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Commodore Component: Rook Ceph -This is a [Commodore][commodore] Component for Rook Ceph. +This is a [Commodore][commodore] Component to manage the [Rook.io](https://rook.io) rook-ceph operator, Ceph cluster and CSI drivers. This repository is part of Project Syn. For documentation on Project Syn and this component, see https://syn.tools. diff --git a/class/defaults.yml b/class/defaults.yml index 1157e898..5ceb2b9c 100644 --- a/class/defaults.yml +++ b/class/defaults.yml @@ -1,4 +1,177 @@ parameters: rook_ceph: =_metadata: {} - namespace: syn-rook-ceph + namespace: syn-rook-ceph-operator + + ceph_cluster: + name: cluster + namespace: syn-rook-ceph-${rook_ceph:ceph_cluster:name} + node_count: 3 + block_storage_class: localblock + # Configure volume size here, if block storage PVs are provisioned + # dynamically + block_volume_size: 1 + # set to true if backing storage is SSD + tune_fast_device_class: false + # Rendered into rook-config-override CM + config_override: + osd: + # We explicitly set bluefs_buffered_io to false to get good write + # bandwidth on Exoscale -> TODO: needs to be checked per + # infrastructure, maybe move the config to cloud/exoscale/params.yml + bluefs_buffered_io: false + + # Whether to setup RBD CSI driver and pools + rbd_enabled: true + # Whether to setup CephFS CSI driver and pools + cephfs_enabled: false + + storage_pools: + rbd: + storagepool: + config: + failureDomain: host + replicated: + size: 3 + requireSafeReplicaSize: true + storage_class_config: + parameters: + csi.storage.k8s.io/fstype: ext4 + allowVolumeExpansion: true + cephfs: + fspool: + data_pools: + pool0: + failureDomain: host + replicated: + size: 3 + requireSafeReplicaSize: true + parameters: + compression_mode: none + config: + metadataPool: + replicated: + size: 3 + requireSafeReplicaSize: true + parameters: + compression_mode: none + # dataPools rendered from data_pools in Jsonnet + preserveFilesystemOnDelete: true + metadataServer: + activeCount: 1 + activeStandby: true + # metadata server placement done in Jsonnet but can be + # extended here + mirroring: + enabled: false + storage_class_config: + allowVolumeExpansion: true + + + node_selector: + node-role.kubernetes.io/storage: '' + + tolerations: + - key: storagenode + operator: Exists + + images: + rook: + registry: docker.io + image: rook/ceph + tag: v1.6.6 + ceph: + registry: docker.io + image: ceph/ceph + tag: v16.2.4 + cephcsi: + registry: quay.io + image: cephcsi/cephcsi + tag: v3.3.1 + + charts: + rook-ceph: v1.6.6 + + operator_helm_values: + image: + repository: ${rook_ceph:images:rook:registry}/${rook_ceph:images:rook:image} + tag: ${rook_ceph:images:rook:tag} + nodeSelector: ${rook_ceph:node_selector} + resources: + limits: + cpu: 1000m + memory: 1Gi + requests: + cpu: 750m + memory: 512Mi + tolerations: ${rook_ceph:tolerations} + csi: + enableCSIHostNetwork: false + cephcsi: + image: ${rook_ceph:images:cephcsi:registry}/${rook_ceph:images:cephcsi:image}:${rook_ceph:images:cephcsi:tag} + + toolbox: + enabled: true + image: ${rook_ceph:images:rook} + + cephClusterSpec: + cephVersion: + image: ${rook_ceph:images:ceph:registry}/${rook_ceph:images:ceph:image}:${rook_ceph:images:ceph:tag} + allowUnsupported: false + dataDirHostPath: /var/lib/rook + monitoring: + enabled: true + rulesNamespace: ${rook_ceph:ceph_cluster:namespace} + mon: + count: 3 + allowMultiplePerNode: false + network: + provider: host + placement: + all: + # nodeAffinity is injected in Jsonnet, + # taking placement labels from ${rook_ceph:node_selector} + tolerations: ${rook_ceph:tolerations} + resources: + mgr: + limits: + cpu: "1" + memory: 2Gi + requests: + cpu: "1" + memory: 2Gi + mon: + limits: + cpu: "1" + memory: 2Gi + requests: + cpu: "1" + memory: 2Gi + osd: + limits: + cpu: "6" + memory: 5Gi + requests: + cpu: "4" + memory: 5Gi + storage: + useAllNodes: false + useAllDevices: false + storageClassDeviceSets: + - name: ${rook_ceph:ceph_cluster:name} + count: ${rook_ceph:ceph_cluster:node_count} + volumeClaimTemplates: + - spec: + storageClassName: ${rook_ceph:ceph_cluster:block_storage_class} + volumeMode: Block + accessModes: + - ReadWriteOnce + resources: + requests: + storage: ${rook_ceph:ceph_cluster:block_volume_size} + encrypted: true + tuneFastDeviceClass: ${rook_ceph:ceph_cluster:tune_fast_device_class} + + disruptionManagement: + managePodBudgets: true + osdMaintenanceTimeout: 30 diff --git a/class/rook-ceph.yml b/class/rook-ceph.yml index 76152dc3..713c31dd 100644 --- a/class/rook-ceph.yml +++ b/class/rook-ceph.yml @@ -1,12 +1,44 @@ parameters: kapitan: + dependencies: + - type: helm + source: https://charts.rook.io/release + chart_name: rook-ceph + version: ${rook_ceph:charts:rook-ceph} + output_path: dependencies/rook-ceph/helmcharts/rook-ceph/${rook_ceph:charts:rook-ceph} + - type: https + source: https://github.com/rook/rook/raw/${rook_ceph:images:rook:tag}/cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml + output_path: dependencies/rook-ceph/manifests/${rook_ceph:images:rook:tag}/rbd-storageclass.yaml + - type: https + source: https://github.com/rook/rook/raw/${rook_ceph:images:rook:tag}/cluster/examples/kubernetes/ceph/csi/rbd/snapshotclass.yaml + output_path: dependencies/rook-ceph/manifests/${rook_ceph:images:rook:tag}/rbd-snapshotclass.yaml + - type: https + source: https://github.com/rook/rook/raw/${rook_ceph:images:rook:tag}/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass.yaml + output_path: dependencies/rook-ceph/manifests/${rook_ceph:images:rook:tag}/cephfs-storageclass.yaml + - type: https + source: https://github.com/rook/rook/raw/${rook_ceph:images:rook:tag}/cluster/examples/kubernetes/ceph/csi/cephfs/snapshotclass.yaml + output_path: dependencies/rook-ceph/manifests/${rook_ceph:images:rook:tag}/cephfs-snapshotclass.yaml compile: - input_paths: - rook-ceph/component/app.jsonnet input_type: jsonnet output_path: apps/ + - input_paths: + - rook-ceph/helmcharts/rook-ceph/${rook_ceph:charts:rook-ceph} + input_type: helm + output_type: yaml + output_path: rook-ceph/01_rook_ceph_helmchart + helm_values: ${rook_ceph:operator_helm_values} + helm_params: + release_name: syn-rook-ceph + namespace: ${rook_ceph:namespace} - input_paths: - rook-ceph/component/main.jsonnet input_type: jsonnet output_path: rook-ceph/ - + commodore: + postprocess: + filters: + - type: jsonnet + path: rook-ceph/01_rook_ceph_helmchart/rook-ceph/templates/ + filter: postprocess/patch_operator_deployment.jsonnet diff --git a/component/cephcluster.libsonnet b/component/cephcluster.libsonnet new file mode 100644 index 00000000..ca18c871 --- /dev/null +++ b/component/cephcluster.libsonnet @@ -0,0 +1,334 @@ +local com = import 'lib/commodore.libjsonnet'; +local kap = import 'lib/kapitan.libjsonnet'; +local kube = import 'lib/kube.libjsonnet'; +local inv = kap.inventory(); +local params = inv.parameters.rook_ceph; + +local on_openshift = inv.parameters.facts.distribution == 'openshift4'; + +local serviceaccounts = + if params.ceph_cluster.namespace != params.namespace then { + [std.strReplace(suffix, '-', '_')]: kube.ServiceAccount('rook-ceph-%s' % suffix) { + metadata+: { + namespace: params.ceph_cluster.namespace, + }, + } + for suffix in [ 'osd', 'mgr', 'cmd-reporter' ] + } + else {}; + +local roles = + ( + // For OCP4 we need the metrics discovery role + if on_openshift then + { + metrics: kube.Role('rook-ceph-metrics') { + metadata+: { + namespace: params.ceph_cluster.namespace, + }, + rules: [ + { + apiGroups: [ '' ], + resources: [ 'services', 'endpoints', 'pods' ], + verbs: [ 'get', 'list', 'watch' ], + }, + ], + }, + } + else {} + ) + + // the following roles are created by the operator helm chart in the + // operator namespace. However, if we create the Ceph cluster in a different + // namespace, we need to create them in that namespace instead. + if params.ceph_cluster.namespace != params.namespace then { + osd: kube.Role('rook-ceph-osd') { + metadata+: { + namespace: params.ceph_cluster.namespace, + }, + rules: [ + { + apiGroups: [ '' ], + resources: [ 'configmaps' ], + verbs: [ 'get', 'list', 'watch', 'create', 'update', 'delete' ], + }, + { + apiGroups: [ 'ceph.rook.io' ], + resources: [ 'cephclusters', 'cephclusters/finalizers' ], + verbs: [ 'get', 'list', 'create', 'update', 'delete' ], + }, + ], + }, + mgr: kube.Role('rook-ceph-mgr') { + metadata+: { + namespace: params.ceph_cluster.namespace, + }, + rules: [ + { + apiGroups: [ '' ], + resources: [ 'pods', 'services', 'pods/log' ], + verbs: [ 'get', 'list', 'watch', 'create', 'update', 'delete' ], + }, + { + apiGroups: [ 'batch' ], + resources: [ 'jobs' ], + verbs: [ 'get', 'list', 'watch', 'create', 'update', 'delete' ], + }, + { + apiGroups: [ 'ceph.rook.io' ], + resources: [ '*' ], + verbs: [ '*' ], + }, + ], + }, + cmd_reporter: kube.Role('rook-ceph-cmd-reporter') { + metadata+: { + namespace: params.ceph_cluster.namespace, + }, + rules: [ + { + apiGroups: [ '' ], + resources: [ 'pods', 'configmaps' ], + verbs: [ 'get', 'list', 'watch', 'create', 'update', 'delete' ], + }, + ], + }, + monitoring: kube.Role('rook-ceph-monitoring') { + metadata+: { + namespace: params.ceph_cluster.namespace, + }, + rules: [ + { + apiGroups: [ 'monitoring.coreos.com' ], + resources: [ 'servicemonitors', 'prometheusrules' ], + verbs: [ 'get', 'list', 'watch', 'create', 'update', 'delete' ], + }, + ], + }, + } + else {}; + +local rolebindings = + ( + // For OCP4, we need the metrics discovery rolebinding + if on_openshift then + [ + kube.RoleBinding('rook-ceph-metrics') { + metadata+: { + namespace: params.ceph_cluster.namespace, + }, + roleRef_:: roles.metrics, + subjects: [ { + kind: 'ServiceAccount', + name: 'prometheus-k8s', + namespace: 'openshift-monitoring', + } ], + }, + ] + else [] + ) + + if params.ceph_cluster.namespace != params.namespace then [ + // allow the operator to create resource in the cluster's namespace + kube.RoleBinding('rook-ceph-cluster-mgmt') { + metadata+: { + namespace: params.ceph_cluster.namespace, + }, + roleRef: { + apiGroup: 'rbac.authorization.k8s.io', + kind: 'ClusterRole', + name: 'rook-ceph-cluster-mgmt', + }, + subjects: [ { + kind: 'ServiceAccount', + name: 'rook-ceph-system', + namespace: params.namespace, + } ], + }, + // allow the osd pods in the namespace to work with configmaps + kube.RoleBinding('rook-ceph-osd') { + metadata+: { + namespace: params.ceph_cluster.namespace, + }, + roleRef_:: roles.osd, + subjects_:: [ serviceaccounts.osd ], + }, + // Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules + kube.RoleBinding('rook-ceph-mgr') { + metadata+: { + namespace: params.ceph_cluster.namespace, + }, + roleRef_:: roles.mgr, + subjects_:: [ serviceaccounts.mgr ], + }, + // Allow the ceph mgr to access the rook system resources necessary for the mgr modules + kube.RoleBinding('rook-ceph-mgr-system-%s' % params.ceph_cluster.name) { + metadata+: { + namespace: params.namespace, + }, + roleRef: { + apiGroup: 'rbac.authorization.k8s.io', + kind: 'ClusterRole', + name: 'rook-ceph-mgr-system', + }, + subjects_:: [ serviceaccounts.mgr ], + }, + kube.RoleBinding('rook-ceph-cmd-reporter') { + metadata+: { + namespace: params.ceph_cluster.namespace, + }, + roleRef_:: roles.cmd_reporter, + subjects_:: [ serviceaccounts.cmd_reporter ], + }, + // monitoring + kube.RoleBinding('rook-ceph-monitoring') { + metadata+: { + namespace: params.ceph_cluster.namespace, + }, + roleRef_:: roles.monitoring, + subjects: [ { + kind: 'ServiceAccount', + name: 'rook-ceph-system', + namespace: params.namespace, + } ], + }, + ] + else []; + +local clusterrolebindings = + if params.ceph_cluster.namespace != params.namespace then [ + kube.ClusterRoleBinding('rook-ceph-mgr-cluster-%s' % params.ceph_cluster.name) { + roleRef: { + apiGroup: 'rbac.authorization.k8s.io', + kind: 'ClusterRole', + name: 'rook-ceph-mgr-cluster', + }, + subjects_:: [ serviceaccounts.mgr ], + }, + kube.ClusterRoleBinding('rook-ceph-osd-%s' % params.ceph_cluster.name) { + roleRef: { + apiGroup: 'rbac.authorization.k8s.io', + kind: 'ClusterRole', + name: 'rook-ceph-osd', + }, + subjects_:: [ serviceaccounts.osd ], + }, + ] + else []; + + +local objValues(o) = [ o[it] for it in std.objectFields(o) ]; + +local rbac = + objValues(serviceaccounts) + + objValues(roles) + + rolebindings + + clusterrolebindings; + +local nodeAffinity = { + nodeAffinity+: { + requiredDuringSchedulingIgnoredDuringExecution+: { + nodeSelectorTerms+: [ { + matchExpressions+: [ + { + key: label, + operator: 'Exists', + } + for label in std.objectFields(params.node_selector) + ], + } ], + }, + }, +}; + +local cephcluster = + kube._Object('ceph.rook.io/v1', 'CephCluster', params.ceph_cluster.name) + { + metadata+: { + namespace: params.ceph_cluster.namespace, + }, + spec: + { + placement: { + all: nodeAffinity, + }, + disruptionManagement: { + manageMachineDisruptionBudgets: on_openshift, + machineDisruptionBudgetNamespace: 'openshift-machine-api', + }, + } + + com.makeMergeable(params.cephClusterSpec), + }; + +local configmap = + kube.ConfigMap('rook-config-override') { + metadata+: { + namespace: params.ceph_cluster.namespace, + }, + data: { + config: std.manifestIni({ + sections: params.ceph_cluster.config_override, + }), + }, + }; + +local toolbox = + kube.Deployment('rook-ceph-tools') + { + metadata+: { + namespace: params.ceph_cluster.namespace, + }, + spec+: { + template+: { + spec+: { + containers_:: { + rook_ceph_tools: kube.Container('rook-ceph-tools') { + image: '%(registry)s/%(image)s:%(tag)s' % params.toolbox.image, + command: [ '/tini' ], + args: [ '-g', '--', '/usr/local/bin/toolbox.sh' ], + imagePullPolicy: 'IfNotPresent', + env_:: { + ROOK_CEPH_USERNAME: { + secretKeyRef: { + name: 'rook-ceph-mon', + key: 'ceph-username', + }, + }, + ROOK_CEPH_SECRET: { + secretKeyRef: { + name: 'rook-ceph-mon', + key: 'ceph-secret', + }, + }, + }, + volumeMounts_:: { + ceph_config: { mountPath: '/etc/ceph' }, + mon_endpoint_volume: { mountPath: '/etc/rook' }, + }, + }, + }, + volumes_:: { + mon_endpoint_volume: { + configMap: { + name: 'rook-ceph-mon-endpoints', + items: [ { + key: 'data', + path: 'mon-endpoints', + } ], + }, + }, + ceph_config: { + emptyDir: {}, + }, + }, + tolerations: params.tolerations, + affinity: nodeAffinity, + }, + }, + }, + }; + +{ + rbac: rbac, + configmap: configmap, + cluster: cephcluster, + toolbox: toolbox, +} diff --git a/component/cephfs.libsonnet b/component/cephfs.libsonnet new file mode 100644 index 00000000..9f5d0495 --- /dev/null +++ b/component/cephfs.libsonnet @@ -0,0 +1,103 @@ +local com = import 'lib/commodore.libjsonnet'; +local kap = import 'lib/kapitan.libjsonnet'; +local kube = import 'lib/kube.libjsonnet'; +local sc = import 'lib/storageclass.libsonnet'; + +local inv = kap.inventory(); +local params = inv.parameters.rook_ceph; + +local sp = import 'storagepool.libsonnet'; + +local cephfs_params = params.ceph_cluster.storage_pools.cephfs; + +local metadataServerPlacement = { + spec+: { + metadataServer+: { + placement+: { + podAntiAffinity+: { + requiredDuringSchedulingIgnoredDuringExecution+: [ { + labelSelector: { + matchExpressions: [ { + key: 'app', + operator: 'In', + values: [ 'rook-ceph-mds' ], + } ], + }, + topologyKey: 'kubernetes.io/hostname', + } ], + preferredDuringSchedulingIgnoredDuringExecution+: [ { + weight: 100, + podAffinityTerm: { + labelSelector: { + matchExpressions: [ { + key: 'app', + operator: 'In', + values: [ 'rook-ceph-mds' ], + } ], + }, + topologyKey: 'topology.kubernetes.io/zone', + }, + } ], + }, + }, + }, + }, +}; + +// Users are responsible for providing working cephfs configs, we don't +// verify them here +local cephfs_pools = [ + kube._Object('ceph.rook.io/v1', 'CephFilesystem', pool) + { + metadata+: { + namespace: params.ceph_cluster.namespace, + }, + } + + metadataServerPlacement + + { + spec+: + com.makeMergeable( + com.getValueOrDefault( + cephfs_params[pool], + 'config', + {} + ) + ), + } + + { + spec+: { + // overwrite datapools setup by user -> documentation clearly states + // to configure dataPools in params.data_pools. + dataPools: [ + cephfs_params[pool].data_pools[pn] + for pn in std.objectFields(cephfs_params[pool].data_pools) + ], + }, + } + for pool in std.objectFields(cephfs_params) +]; + +// TODO: figure out if/how we want to create storageclasses for additional +// pools configured on CephFS instances. +local cephfs_storageclasses = [ + local subpool = '%s-data0' % [ fs ]; + sp.configure_storageclass('cephfs', fs, subpool) { + parameters+: { + fsName: fs, + }, + } + for fs in std.objectFields(cephfs_params) +]; +local cephfs_snapclass = [ + sp.configure_snapshotclass('cephfs'), +]; + +if params.ceph_cluster.cephfs_enabled then { + storagepools: cephfs_pools, + storageclasses: cephfs_storageclasses, + snapshotclass: cephfs_snapclass, +} else { + storagepools: [], + storageclasses: [], + snapshotclass: [], +} diff --git a/component/main.jsonnet b/component/main.jsonnet index 3c4d88dc..84ec7a21 100644 --- a/component/main.jsonnet +++ b/component/main.jsonnet @@ -1,10 +1,60 @@ -// main template for rook-ceph local kap = import 'lib/kapitan.libjsonnet'; local kube = import 'lib/kube.libjsonnet'; local inv = kap.inventory(); -// The hiera parameters for the component local params = inv.parameters.rook_ceph; -// Define outputs below +local on_openshift = + inv.parameters.facts.distribution == 'openshift4'; + +local cephcluster = import 'cephcluster.libsonnet'; + +local ns_config = + if on_openshift then { + metadata+: { + annotations+: { + // set node selector to allow pods to be scheduled on all nodes -> CSI + // plugins need to run everywhere + 'openshift.io/node-selector': '', + }, + labels+: { + // Configure the namespaces so that the OCP4 cluster-monitoring + // Prometheus can find the servicemonitors and rules. + 'openshift.io/cluster-monitoring': 'true', + }, + }, + } + else + {}; + +local ocp_config = import 'openshift.libsonnet'; + +local rbd_config = import 'rbd.libsonnet'; +local cephfs_config = import 'cephfs.libsonnet'; + +local namespaces = + [ + kube.Namespace(params.namespace) + ns_config, + ] + + if params.ceph_cluster.namespace != params.namespace then + [ + kube.Namespace(params.ceph_cluster.namespace) + ns_config, + ] + else []; + { + '00_namespaces': namespaces, + [if on_openshift then '02_openshift_sccs']: ocp_config.sccs, + '10_cephcluster_rbac': cephcluster.rbac, + '10_cephcluster_configoverride': cephcluster.configmap, + '10_cephcluster_cluster': cephcluster.cluster, + [if params.toolbox.enabled then '10_cephcluster_toolbox']: cephcluster.toolbox, + '20_storagepools': + rbd_config.storagepools + + cephfs_config.storagepools, + '30_storageclasses': + rbd_config.storageclasses + + cephfs_config.storageclasses, + '30_snapshotclasses': + rbd_config.snapshotclass + + cephfs_config.snapshotclass, } diff --git a/component/openshift.libsonnet b/component/openshift.libsonnet new file mode 100644 index 00000000..9c5c9073 --- /dev/null +++ b/component/openshift.libsonnet @@ -0,0 +1,109 @@ +local kap = import 'lib/kapitan.libjsonnet'; +local kube = import 'lib/kube.libjsonnet'; +local inv = kap.inventory(); +local params = inv.parameters.rook_ceph; + +local sccServiceAccountList(accts, namespace) = + [ + 'system:serviceaccount:%s:%s' % [ namespace, sa ] + for sa in accts + ]; + +local sccs = [ + kube._Object( + 'security.openshift.io/v1', + 'SecurityContextConstraints', + 'rook-ceph' + ) + { + allowPrivilegedContainer: true, + allowHostNetwork: true, + allowHostDirVolumePlugin: true, + allowedCapabilities: [], + allowHostPorts: true, + allowHostPID: true, + allowHostIPC: true, + readOnlyRootFilesystem: false, + requiredDropCapabilities: [], + defaultAddCapabilities: [], + runAsUser: { + type: 'RunAsAny', + }, + seLinuxContext: { + type: 'MustRunAs', + }, + fsGroup: { + type: 'MustRunAs', + }, + supplementalGroups: { + type: 'RunAsAny', + }, + allowedFlexVolumes: [ + { driver: 'ceph.rook.io/rook' }, + { driver: 'ceph.rook.io/rook-ceph' }, + ], + volumes: [ + 'configMap', + 'downwardAPI', + 'emptyDir', + 'flexVolume', + 'hostPath', + 'persistentVolumeClaim', + 'projected', + 'secret', + ], + users: + [ + 'system:serviceaccount:%s:rook-ceph-system' % params.namespace, + ] + + sccServiceAccountList([ + 'default', + 'rook-ceph-mgr', + 'rook-ceph-osd', + ], params.ceph_cluster.namespace), + }, + kube._Object( + 'security.openshift.io/v1', + 'SecurityContextConstraints', + 'rook-ceph-csi' + ) + { + allowPrivilegedContainer: true, + allowHostNetwork: true, + allowHostDirVolumePlugin: true, + allowedCapabilities: [ '*' ], + allowHostPorts: true, + allowHostPID: true, + allowHostIPC: true, + readOnlyRootFilesystem: false, + requiredDropCapabilities: [], + defaultAddCapabilities: [], + runAsUser: { + type: 'RunAsAny', + }, + seLinuxContext: { + type: 'RunAsAny', + }, + fsGroup: { + type: 'RunAsAny', + }, + supplementalGroups: { + type: 'RunAsAny', + }, + allowedFlexVolumes: [ + { driver: 'ceph.rook.io/rook' }, + { driver: 'ceph.rook.io/rook-ceph' }, + ], + volumes: [ '*' ], + users: sccServiceAccountList([ + 'rook-csi-rbd-plugin-sa', + 'rook-csi-rbd-provisioner-sa', + 'rook-csi-cephfs-plugin-sa', + 'rook-csi-cephfs-provisioner-sa', + ], params.namespace), + }, +]; + +{ + sccs: sccs, +} diff --git a/component/rbd.libsonnet b/component/rbd.libsonnet new file mode 100644 index 00000000..6acea2ef --- /dev/null +++ b/component/rbd.libsonnet @@ -0,0 +1,46 @@ +local com = import 'lib/commodore.libjsonnet'; +local kap = import 'lib/kapitan.libjsonnet'; +local kube = import 'lib/kube.libjsonnet'; +local sc = import 'lib/storageclass.libsonnet'; + +local inv = kap.inventory(); +local params = inv.parameters.rook_ceph; + +local sp = import 'storagepool.libsonnet'; + +local rbd_params = params.ceph_cluster.storage_pools.rbd; + +local rbd_blockpools = [ + kube._Object('ceph.rook.io/v1', 'CephBlockPool', name) { + metadata+: { + namespace: params.ceph_cluster.namespace, + }, + spec: com.makeMergeable( + com.getValueOrDefault( + params.ceph_cluster.storage_pools.rbd[name], + 'config', + {} + ) + ), + } + for name in std.objectFields(rbd_params) +]; + +local rbd_storageclasses = [ + sp.configure_storageclass('rbd', name) + for name in std.objectFields(rbd_params) +]; + +local rbd_snapclass = [ + sp.configure_snapshotclass('rbd'), +]; + +if params.ceph_cluster.rbd_enabled then { + storagepools: rbd_blockpools, + storageclasses: rbd_storageclasses, + snapshotclass: rbd_snapclass, +} else { + storagepools: [], + storageclasses: [], + snapshotclass: [], +} diff --git a/component/storagepool.libsonnet b/component/storagepool.libsonnet new file mode 100644 index 00000000..60e61965 --- /dev/null +++ b/component/storagepool.libsonnet @@ -0,0 +1,94 @@ +local com = import 'lib/commodore.libjsonnet'; +local kap = import 'lib/kapitan.libjsonnet'; +local sc = import 'lib/storageclass.libsonnet'; + +local inv = kap.inventory(); +local params = inv.parameters.rook_ceph; + +local load_manifest(name) = + std.parseJson(kap.yaml_load_stream( + 'rook-ceph/manifests/%s/%s.yaml' % [ params.images.rook.tag, name ] + )); + +local load_storageclass(type) = + // load yaml + local manifest = load_manifest('%s-storageclass' % type); + assert std.length(manifest) >= 1; + // find storageclass manifest in loaded yaml + local sc = std.prune([ + if it.kind == 'StorageClass' then it + for it in manifest + ]); + assert std.length(sc) == 1; + // return storageclass + sc[0]; + +local get_sc_config(type, pool) = + assert std.objectHas(params.ceph_cluster.storage_pools, type); + assert std.objectHas( + params.ceph_cluster.storage_pools[type], pool + ); + com.makeMergeable(com.getValueOrDefault( + params.ceph_cluster.storage_pools[type][pool], + 'storage_class_config', + {} + )); + +// subpool used for CephFS +local configure_sc(type, pool, subpool=null) = + local obj = load_storageclass(type); + local sc_config = get_sc_config(type, pool); + com.makeMergeable(obj) + + sc.storageClass( + '%s-%s-%s' % [ type, pool, params.ceph_cluster.name ] + ) + + sc_config + + { + provisioner: '%s.%s.csi.ceph.com' % [ params.namespace, type ], + parameters+: { + clusterID: params.ceph_cluster.namespace, + pool: + if subpool != null then + subpool + else + pool, + 'csi.storage.k8s.io/provisioner-secret-namespace': + params.ceph_cluster.namespace, + 'csi.storage.k8s.io/controller-expand-secret-namespace': + params.ceph_cluster.namespace, + 'csi.storage.k8s.io/node-stage-secret-namespace': + params.ceph_cluster.namespace, + }, + }; + + +local load_snapclass(type) = + local manifest = load_manifest('%s-snapshotclass' % type); + assert std.length(manifest) == 1; + manifest[0]; + +local configure_snapclass(type) = + local name = if type == 'rbd' then + 'rook-ceph-rbd-%s' % params.ceph_cluster.name + else if type == 'cephfs' then + 'rook-cephfs-%s' % params.ceph_cluster.name + else + error "unknown snapshotclass type '%s'" % type; + + local obj = load_snapclass(type); + obj { + metadata+: { + name: name, + }, + driver: '%s.%s.csi.ceph.com' % [ params.namespace, type ], + parameters+: { + clusterID: params.ceph_cluster.namespace, + 'csi.storage.k8s.io/snapshotter-secret-namespace': params.ceph_cluster.namespace, + }, + }; + + +{ + configure_storageclass: configure_sc, + configure_snapshotclass: configure_snapclass, +} diff --git a/docs/modules/ROOT/pages/how-tos/.gitkeep b/docs/modules/ROOT/pages/how-tos/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/docs/modules/ROOT/pages/how-tos/configure-ceph.adoc b/docs/modules/ROOT/pages/how-tos/configure-ceph.adoc new file mode 100644 index 00000000..56ffb03d --- /dev/null +++ b/docs/modules/ROOT/pages/how-tos/configure-ceph.adoc @@ -0,0 +1,73 @@ += Configuring and tuning Ceph + +This how-to gives some configuration snippets to configure and tune the Ceph cluster. + +See https://rook.io/docs/rook/v1.6/ceph-cluster-crd.html[the Rook.io `CephCluster`] documentation for Ceph configuration options which are exposed by the Rook-Ceph operator. +See https://docs.ceph.com/en/latest/rados/configuration/ceph-conf/[Ceph documentation] for the upstream Ceph configuration documentation. + +== Configure Ceph's backing storage + +To configure the component for an infrastructure which provides backing storage as storageclass `localblock-storage`, simply provide the following config. + +[source,yaml] +---- +parameters: + rook_ceph: + ceph_cluster: + block_storage_class: localblock-storage +---- + +=== Tune backing storage for SSDs + +If the backing storage provided for Ceph is itself backed by SSDs (or better), you can tune Ceph for "fast" devices with the following config. + +[source,yaml] +---- +parameters: + rook_ceph: + ceph_cluster: + tune_fast_device_class: true +---- + +== Configure target size of RBD block pool + +To tell the Ceph cluster that the default RBD block pool, which is named `storagepool`, is expected to take up 80% of the Ceph cluster's capacity, the following config can be provided. + +[source,yaml] +---- +parameters: + rook_ceph: + ceph_cluster: + storage_pools: + rbd: + storagepool: + config: + parameters: + target_size_ratio: "0.8" +---- + +== Configure Ceph options which aren't exposed by the Rook-Ceph operator + +To configure a Ceph option which isn't exposed by the operator, you can provide raw Ceph configuration entries in parameter xref:references/parameters.adoc#_config_override[`ceph_cluster.config_override`]. + +For example, to change the OSD operations queue (`op_queue`) scheduler to the new `mclock_scheduler`, you can provide the following config. + +[IMPORTANT] +==== +This configuration is only an example. +We've not tested or benchmarked Ceph with the `mclock` scheduler. +==== + +[source,yaml] +---- +parameters: + rook_ceph: + ceph_cluster: + config_override: + osd: + osd_op_queue: mclock_scheduler +---- + +As discussed in xref:references/parameters.adoc#_config_override[the parameter's documentation], the contents of `ceph_cluster.config_override` are rendered into __ini__ style format by the component. + +Each key in the parameter is used as the name of a section in the resulting _ini_ style configuration file. diff --git a/docs/modules/ROOT/pages/how-tos/openshift4.adoc b/docs/modules/ROOT/pages/how-tos/openshift4.adoc new file mode 100644 index 00000000..d4db811f --- /dev/null +++ b/docs/modules/ROOT/pages/how-tos/openshift4.adoc @@ -0,0 +1,28 @@ += Configuring the component for OpenShift 4 + +The component mostly automatically configures itself for OpenShift 4. + +There are some configurations which are of particular interest on OpenShift 4. +This how-to gives configuration snippets to customize OpenShift 4-specific configurations of the component. + + +== Customize machine disruption budget configuration + +By default, the component enables machine disruption budget management on OpenShift 4. +The component assumes that machine disruption budgets should be created in namespace `openshift-machine-api`. + +The default configuration of the `CephCluster` resource for OpenShift 4 is equivalent to applying the following configuration in the hierarchy. + +[source,yaml] +---- +parameters: + rook_ceph: + cephClusterSpec: + disruptionManagement: + manageMachineDisruptionBudgets: true <1> + machineDisruptionBudgetNamespace: openshift-machine-api <2> +---- +<1> Whether the component should create and manage machine disruption budgets. +Change this value to `false` to disable machine disruption budgets on OCP4. +<2> The namespace in which the operator creates the machine disruption budgets. +Change this value if machine disruption budgets should be created in some other namespace. diff --git a/docs/modules/ROOT/pages/index.adoc b/docs/modules/ROOT/pages/index.adoc index 146b8b78..dfcb8587 100644 --- a/docs/modules/ROOT/pages/index.adoc +++ b/docs/modules/ROOT/pages/index.adoc @@ -1,5 +1,11 @@ = Rook Ceph -rook-ceph is a Commodore component to manage Rook Ceph. +Rook Ceph is a Commodore component to manage the https://rook.io[Rook.io] Ceph operator and associated resources. +The component configures a single Ceph cluster and allows users to configure one or more `CephBlockPool` and `CephFilesystem` resources on the Ceph cluster. +For each storage pool (RBD blockpool or CephFS), the component creates a `StorageClass` and `VolumeSnapshotClass`. -See the xref:references/parameters.adoc[parameters] reference for further details. +The component depends on the https://hub.syn.tools/storageclass/index.html[storageclass component] for managing StorageClasses. + +To get started with the component, see the xref:tutorials/quick-start.adoc[quick-start guide]. + +See the xref:references/parameters.adoc[parameters] reference for a documentation of all the parameters the component understands. diff --git a/docs/modules/ROOT/pages/references/parameters.adoc b/docs/modules/ROOT/pages/references/parameters.adoc index ba1e380e..7c9d14d1 100644 --- a/docs/modules/ROOT/pages/references/parameters.adoc +++ b/docs/modules/ROOT/pages/references/parameters.adoc @@ -6,14 +6,352 @@ The parent key for all of the following parameters is `rook_ceph`. [horizontal] type:: string -default:: `syn-rook-ceph` +default:: `syn-rook-ceph-operator` -The namespace in which to deploy this component. +The namespace in which the Rook Ceph operator and the CSI drivers are deployed +== `ceph_cluster` -== Example +[horizontal] +type:: dict + +The configuration of the Ceph cluster to deploy. +See the following sections for individual parameters nested under this key. + +=== `name` + +[horizontal] +type:: string +default:: `cluster` + +The name of the Ceph cluster object. +Also used as part of the storageclass and volumesnapshotclass names. + +=== `namespace` + +[horizontal] +type:: string +default:: `syn-rook-ceph-${rook_ceph:ceph_cluster:name}` + +The namespace in which the Ceph cluster is deployed. + +By default, the component deploys the Ceph cluster in a different namespace than the operator and the CSI drivers. +However, the component also supports deploying the operator, CSI drivers and Ceph cluster in the same namespace. + +=== `node_count` + +[horizontal] +type:: integer +default:: `3` + +The number of storage nodes (disks really) that the Ceph cluster should expect. + +The operator will deploy this many Ceph OSDs. + +=== `block_storage_class` + +[horizontal] +type:: string +default:: `localblock` + +The storage class to use for the block storage volumes backing the Ceph cluster. +The storage class **must** support `volumeMode=Block`. + + +=== `block_volume_size` + +[horizontal] +type:: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/#Quantity[K8s Quantity] +default:: `1` + +By default, the component expects that pre-provisioned block storage volumes are used. +If you deploy the component on a cluster which dynamically provisions volumes for the storage class configured in `ceph_cluster.block_storage_class`, set this value to the desired size of the disk for a single node of your Ceph cluster. + +=== `tune_fast_device_class` + +[horizontal] +type:: boolean +default:: `false` + +This parameter can be set to `true` to tune the Ceph cluster OSD parameters for SSDs (or better). + +See https://rook.io/docs/rook/v1.6/ceph-cluster-crd.html#storage-class-device-sets[the Rook.io Ceph cluster CRD documentation] for a more detailed explanation. + +=== `config_override` + +[horizontal] +type:: dict +default:: ++ +[source,yaml] +---- +osd: + bluefs_buffered_io: false +---- + +Additional Ceph configurations which are rendered in _ini_ style format by the component. +Each key in the dict is translated into a section in the resulting _ini_ style file containing the key's value -- which is expected to be a dict -- as settings. + +[IMPORTANT] +==== +The component doesn't validate the resulting configuration. +Please be aware that Ceph may fail to start if you provide an invalid configuration file. +See the https://docs.ceph.com/en/latest/rados/configuration/ceph-conf/[Ceph documentation] for a list of valid configuration sections and options. +==== + +The default value is translated into the following _ini_ style file: + +[source,ini] +---- +[osd] +bluefs_buffered_io = false +---- + +The resulting _ini_ style file is written to the ConfigMap `rook-config-override` in the Ceph cluster namespace. + +=== `rbd_enabled` + +[horizontal] +type:: boolean +default:: `true` + +This parameter controls whether the RBD CSI driver, its associated volumesnapshotclass and any configured `CephBlockPool` resources and associated storageclasses are provisioned. + +The `CephBlockPool` resources are defined and configured in parameter <<_storage_pools_rbd,`storage_pools.rbd`>>. + + +=== `cephfs_enabled` + +[horizontal] +type:: boolean +default:: `false` + +This parameter controls whether the CephFS CSI driver, its associated volumesnapshotclass and any configured `CephFilesystem` resources and associated storageclasses are provisioned. + +The `CephFilesystem` resources are defined and configured in parameter <<_storage_pools_cephfs,`storage_pools.cephfs`>>. + +=== `storage_pools.rbd` + +[horizontal] +type:: dict +keys:: Names of `CephBlockPool` resources +values:: dicts with keys `config` and `storage_class_config` + +In this parameter `CephBlockPool` resources are configured. +The component creates exactly one storageclass and volumesnapshotclass per block pool. + +By default the parameter holds the following configuration: + +[source,yaml] +---- +storagepool: + config: + failureDomain: host + replicated: + size: 3 + requireSafeReplicaSize: true + storage_class_config: + parameters: + csi.storage.k8s.io/fstype: ext4 + allowVolumeExpansion: true +---- + +This configuration results in + +* A `CephBlockPool` named `storagepool` which is configured with 3 replicas distributed on different hosts +* A storageclass which creates PVs on this block pool, uses the `ext4` filesystem and supports volume expansion +* A volumesnapshotclass associated with the storageclass + +See https://rook.io/docs/rook/v1.6/ceph-pool-crd.html[the Rook.io `CephBlockPool` CRD documentation] for all possible configurations in key `config`. + +The values in key `storage_class_config` are merged into the `StorageClass` resource. + +=== `storage_pools.cephfs` + +[horizontal] +type:: dict +keys:: Names of `CephFilesystem` resources +values:: dicts with keys `data_pools`, `config` and `storage_class_config` + +In this parameter `CephFilesystem` resources are configured. +The component creates exactly one storageclass and volumesnapshotclass per CephFS. + +By default the parameter holds the following configuration: + +[source,yaml] +---- +fspool: + data_pools: + pool0: + failureDomain: host + replicated: + size: 3 + requireSafeReplicaSize: true + parameters: + compression_mode: none + config: + metadataPool: + replicated: + size: 3 + requireSafeReplicaSize: true + parameters: + compression_mode: none + # dataPools rendered from data_pools in Jsonnet + preserveFilesystemOnDelete: true + metadataServer: + activeCount: 1 + activeStandby: true + # metadata server placement done in Jsonnet but can be + # extended here + mirroring: + enabled: false + storage_class_config: + allowVolumeExpansion: true +---- + +This configuration creates one `CephFilesystem` resource named `fspool`. +This CephFS instance is configured to have 3 replicas both for the metadata pool and its single data pool. + +The key `data_pools` is provided to avoid having to manage a list of data pools directly in the hierarchy. +The values of each key in `data_pools` are placed in the resulting CephFS resource's field `.spec.dataPools` + +The contents of key `config` are used as the base value of the resulting resource's `.spec` field. +Note that data pools given in `config` in the hierarchy will be overwritten by the pools configured in `data_pools`. + +The values in key `storage_class_config` are merged into the `StorageClass` resource which is for the CephFS instance. + +See https://rook.io/docs/rook/v1.6/ceph-filesystem-crd.html[the Rook.io `CephFilesystem` CRD documentation] for all possible configurations in key `config`. + + +== `node_selector` + +[horizontal] +type:: dict +default:: ++ +[source,yaml] +---- +node-role.kubernetes.io/storage: '' +---- + + +The node selector (if applicable) for all the resources managed by the component. + +== `tolerations` + +[horizontal] +type:: dict +default:: ++ +[source,yaml] +---- +- key: storagenode + operator: Exists +---- + +The tolerations (if applicable) for all the resources managed by the component. + +The component assumes that nodes on which the deployments should be scheduled may be tainted with `storagenode=True:NoSchedule`. + +== `images` +[horizontal] +type:: dict +default:: See https://github.com/projectsyn/component-rook-ceph/blob/master/class/defaults.yml[`class/defaults.yml` on Github] + +This parameter allows selecting the Docker images to use for Rook.io, Ceph, and Ceph-CSI. +Each image is specified using keys `registry`, `image` and `tag`. +This structure allows easily injecting a registry mirror, if required. + +== `charts` + +[horizontal] +type:: dict +default:: See https://github.com/projectsyn/component-rook-ceph/blob/master/class/defaults.yml[`class/defaults.yml` on Github] + +This parameter allows selecting the Helm chart version for the `rook-ceph` operator. + +== `operator_helm_values` + +[horizontal] +type:: dict +default:: See https://github.com/projectsyn/component-rook-ceph/blob/master/class/defaults.yml[`class/defaults.yml` on Github] + +The Helm values to use when rendering the rook-ceph operator Helm chart. + +A few Helm values are configured based on other component parameters by default: + +* The data in parameter `images` is used to set the `image.repository`, `image.tag`, and `csi.cephcsi.image` Helm values +* The value of `node_selector` is used to set Helm value `nodeSelector` +* The value of `tolerations` is used to set Helm value `tolerations` +* The component ensures that `hostpathRequiresPrivileged` is enabled on OpenShift 4 regardless of the contents of the Helm value. + +See https://rook.io/docs/rook/v1.6/helm-operator.html#configuration[the Rook.io docs] for a full list of Helm values. + +== `toolbox` + +[horizontal] +type:: dict +default:: ++ +[source,yaml] +---- +enabled: true +image: ${rook_ceph:images:rook} +---- + +The configuration for the Rook-Ceph toolbox deployment. +This deployment provides an in-cluster shell to observe and administrate the Ceph cluster. + +== `cephClusterSpec` + +[horizontal] +type:: dict +default:: See https://github.com/projectsyn/component-rook-ceph/blob/master/class/defaults.yml[`class/defaults.yml` on Github] + +The default configuration for the `CephCluster` resource. +The value of this parameter is used as field `.spec` of the resulting resource. + +Selected configurations of the Ceph cluster are inherited from other component parameters. +If you overwrite those configurations in this parameter, the values provided in the "source" parameters won't have an effect. + +=== Inherited configurations + +* `cephVersion.image` is constructed from the data in parameter <<_images,`images`>>. +* `placement.all.nodeAffinity` is built from parameter <<_node_selector,`node_selector`>>. +The component constructs the following value for the configuration: ++ +[source,yaml] +---- +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: <1> + - key: NODE_SELECTOR_KEY + operator: Exists + ... +---- +<1> The component creates an entry in `matchExpressions` with `key` equal to the node selector key and `operator=Exists` for each key in parameter <<_node_selector,`node_selector`>>. + +* `placement.all.tolerations` is set to the value of parameter <<_tolerations,`tolerations`>>. +* The component creates as single entry for `storage.storageClassDeviceSets` based on values given in parameter <<_ceph_cluster,`ceph_cluster`>>. +Users are encouraged to use the parameter <<_ceph_cluster,`ceph_cluster`>> to configure the Ceph cluster's backing storage. ++ +The component expects that the provided storageclass for the backing storage supports `volumeMode=Block`. + +See https://rook.io/docs/rook/v1.6/ceph-cluster-crd.html#settings[the Rook.io `CephCluster` documentation] for a full list of configuration parameters. + +== Example configurations + +=== Configure the component for SElinux-enabled cluster nodes + +The component automatically configures the operator on OpenShift 4. +However, on other Kubernetes distributions on nodes which use SElinux, users need to enable `hostpathRequiresPrivileged` in the operator's helm values. [source,yaml] ---- -namespace: example-namespace +parameters: + rook_ceph: + operator_helm_values: + hostpathRequiresPrivileged: true <1> ---- +<1> The operator needs to be informed that deployments which use `hostPath` volume mounts need to run with `privileged` security context. +This setting is required on any cluster which uses SELinux on the nodes. diff --git a/docs/modules/ROOT/pages/tutorials/.gitkeep b/docs/modules/ROOT/pages/tutorials/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/docs/modules/ROOT/pages/tutorials/quick-start.adoc b/docs/modules/ROOT/pages/tutorials/quick-start.adoc new file mode 100644 index 00000000..8d47e449 --- /dev/null +++ b/docs/modules/ROOT/pages/tutorials/quick-start.adoc @@ -0,0 +1,66 @@ += Quick start + +This tutorial will guide you to a working Ceph setup using the default configuration. + +== Prerequisites + +* Cluster is already managed by Project Syn +* At least 3 adequately sized nodes (minimum 8 vCPU, 16GB RAM) are provisioned for the storage cluster +** The nodes are labeled with `node-role.kubernetes.io/storage=''` and tainted with `storagenode=True:NoSchedule` +* Block storage volumes (`volumeMode=Block`) are available in the cluster +* The block storage volumes are associated with a StorageClass +* A working `commodore` command is available locally. +See the https://syn.tools/commodore/running-commodore.html[Running Commodore] documentation for details. + +== Steps + +. Enable component `rook-ceph` ++ +.c-cluster-id.yml +[source,yaml] +---- +applications: + - rook-ceph +---- + +. Configure backing storageclass for the Ceph cluster ++ +.c-cluster-id.yml +[source,yaml] +---- +parameters: + rook_ceph: + ceph_cluster: + block_storage_class: <1> +---- +<1> Specify the name of a storage class which provides volumes with `volumeMode=Block`. + +. If your block storage volumes are provisioned dynamically, specify the volume size ++ +.c-cluster-id.yml +[source,yaml] +---- +parameters: + rook_ceph: + ceph_cluster: + block_volume_size: 200Gi <1> +---- +<1> If you are using pre-provisioned block storage volumes, you can leave this parameter at its default value of `1`. + +. If your block storage volumes are backed by SSDs (or better), tune Ceph for SSD backing storage. ++ +.c-cluster-id.yml +[source,yaml] +---- +parameters: + rook_ceph: + ceph_cluster: + tune_fast_device_class: true +---- + +. Compile and push the cluster catalog ++ +[source,yaml] +---- +commodore catalog compile --push -i +---- diff --git a/docs/modules/ROOT/partials/nav.adoc b/docs/modules/ROOT/partials/nav.adoc index 08f92836..f6c92ade 100644 --- a/docs/modules/ROOT/partials/nav.adoc +++ b/docs/modules/ROOT/partials/nav.adoc @@ -1,2 +1,14 @@ * xref:index.adoc[Home] + +.Tutorials + +* xref:how-tos/quick-start.adoc[Quick start] + +.How-to guides + +* xref:how-tos/openshift4.adoc[Configuring the component for OpenShift 4] +* xref:how-tos/configure-ceph.adoc[Configuring and tuning Ceph] + +.Technical reference + * xref:references/parameters.adoc[Parameters] diff --git a/postprocess/patch_operator_deployment.jsonnet b/postprocess/patch_operator_deployment.jsonnet new file mode 100644 index 00000000..4206443c --- /dev/null +++ b/postprocess/patch_operator_deployment.jsonnet @@ -0,0 +1,79 @@ +local com = import 'lib/commodore.libjsonnet'; +local inv = com.inventory(); +local params = inv.parameters.rook_ceph; + +// Automatically set ROOK_HOSTPATH_REQUIRES_PRIVILEGED=true on OCP4 +// TODO: this should be set to true whenever SElinux is enabled on the +// cluster hosts. +// Respect user-provided configuration via `operator_helm_values` on +// distributions other than OCP4. +local hostpath_requires_privileged = + if inv.parameters.facts.distribution == 'openshift4' then + true + else + com.getValueOrDefault( + params.operator_helm_values, + 'hostpathRequiresPrivileged', + false, + ); + +local deployment_file = std.extVar('output_path') + '/deployment.yaml'; + +local deployment = com.yaml_load(deployment_file) + { + spec+: { + template+: { + spec+: { + containers: [ + if c.name == 'rook-ceph-operator' then + c { + env: [ + if e.name == 'ROOK_CSI_ENABLE_RBD' then + e { + value: '%s' % params.ceph_cluster.rbd_enabled, + } + else if e.name == 'ROOK_CSI_ENABLE_CEPHFS' then + e { + value: '%s' % params.ceph_cluster.cephfs_enabled, + } + else if e.name == 'ROOK_HOSTPATH_REQUIRES_PRIVILEGED' then + e { + value: '%s' % hostpath_requires_privileged, + } + else + e + for e in super.env + ], + volumeMounts+: [ + { + mountPath: '/var/lib/rook', + name: 'rook-config', + }, + { + mountPath: '/etc/ceph', + name: 'default-config-dir', + }, + ], + } + else + c + for c in super.containers + ], + volumes+: [ + { + name: 'rook-config', + emptyDir: {}, + }, + { + name: 'default-config-dir', + emptyDir: {}, + }, + ], + }, + }, + }, +}; + + +{ + deployment: deployment, +}