diff --git a/CHANGELOG.md b/CHANGELOG.md index 808246d..a2d830a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,12 @@ All notable changes to this project will be documented in this file. ## [Unreleased] +### Added + +- The operator can now run on Kubernetes clusters using a non-default cluster domain. It should automatically detect the + correct domain to use, but you can also use the env var `KUBERNETES_CLUSTER_DOMAIN` to set the domain explicitly + or use the helm-chart property `kubernetesClusterDomain` ([#xxx]). + ### Changed - Reduce CRD size from `484KB` to `57KB` by accepting arbitrary YAML input instead of the underlying schema for the following fields ([#118]): @@ -11,6 +17,7 @@ All notable changes to this project will be documented in this file. - `affinity` [#118]: https://github.com/stackabletech/edc-operator/pull/118 +[#xxx]: https://github.com/stackabletech/edc-operator/pull/xxx ## [24.7.0] - 2024-07-24 diff --git a/Cargo.lock b/Cargo.lock index 547fd2d..6ce3f39 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -460,9 +460,9 @@ dependencies = [ [[package]] name = "delegate" -version = "0.12.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e018fccbeeb50ff26562ece792ed06659b9c2dae79ece77c4456bb10d9bf79b" +checksum = "bc2323e10c92e1cf4d86e11538512e6dc03ceb586842970b6332af3d4046a046" dependencies = [ "proc-macro2", "quote", @@ -1095,9 +1095,9 @@ dependencies = [ [[package]] name = "k8s-openapi" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19501afb943ae5806548bc3ebd7f3374153ca057a38f480ef30adfde5ef09755" +checksum = "9c8847402328d8301354c94d605481f25a6bdc1ed65471fd96af8eca71141b13" dependencies = [ "base64 0.22.1", "chrono", @@ -1109,9 +1109,9 @@ dependencies = [ [[package]] name = "kube" -version = "0.93.1" +version = "0.95.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0365920075af1a2d23619c1ca801c492f2400157de42627f041a061716e76416" +checksum = "fa21063c854820a77c5d7f8deeb7ffa55246d8304e4bcd8cce2956752c6604f8" dependencies = [ "k8s-openapi", "kube-client", @@ -1122,9 +1122,9 @@ dependencies = [ [[package]] name = "kube-client" -version = "0.93.1" +version = "0.95.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d81336eb3a5b10a40c97a5a97ad66622e92bad942ce05ee789edd730aa4f8603" +checksum = "31c2355f5c9d8a11900e71a6fe1e47abd5ec45bf971eb4b162ffe97b46db9bb7" dependencies = [ "base64 0.22.1", "bytes", @@ -1160,9 +1160,9 @@ dependencies = [ [[package]] name = "kube-core" -version = "0.93.1" +version = "0.95.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cce373a74d787d439063cdefab0f3672860bd7bac01a38e39019177e764a0fe6" +checksum = "f3030bd91c9db544a50247e7d48d7db9cf633c172732dce13351854526b1e666" dependencies = [ "chrono", "form_urlencoded", @@ -1171,15 +1171,16 @@ dependencies = [ "k8s-openapi", "schemars", "serde", + "serde-value", "serde_json", "thiserror", ] [[package]] name = "kube-derive" -version = "0.93.1" +version = "0.95.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04a26c9844791e127329be5dce9298b03f9e2ff5939076d5438c92dea5eb78f2" +checksum = "fa98be978eddd70a773aa8e86346075365bfb7eb48783410852dbf7cb57f0c27" dependencies = [ "darling", "proc-macro2", @@ -1190,9 +1191,9 @@ dependencies = [ [[package]] name = "kube-runtime" -version = "0.93.1" +version = "0.95.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b84733c0fed6085c9210b43ffb96248676c1e800d0ba38d15043275a792ffa4" +checksum = "5895cb8aa641ac922408f128b935652b34c2995f16ad7db0984f6caa50217914" dependencies = [ "ahash", "async-broadcast", @@ -2096,8 +2097,8 @@ dependencies = [ [[package]] name = "stackable-operator" -version = "0.74.0" -source = "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.74.0#c77a5423b66bc1667b63af7d8bec00de88a5303f" +version = "0.79.0" +source = "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.79.0#a2ac5f525edfd9a2fab884ba753e79a325fbb752" dependencies = [ "chrono", "clap", @@ -2107,6 +2108,7 @@ dependencies = [ "dockerfile-parser", "either", "futures 0.3.31", + "indexmap", "json-patch", "k8s-openapi", "kube", @@ -2121,6 +2123,7 @@ dependencies = [ "serde_yaml", "snafu 0.8.5", "stackable-operator-derive", + "stackable-shared", "strum", "tokio", "tracing", @@ -2133,7 +2136,7 @@ dependencies = [ [[package]] name = "stackable-operator-derive" version = "0.3.1" -source = "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.74.0#c77a5423b66bc1667b63af7d8bec00de88a5303f" +source = "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.79.0#a2ac5f525edfd9a2fab884ba753e79a325fbb752" dependencies = [ "darling", "proc-macro2", @@ -2141,6 +2144,18 @@ dependencies = [ "syn 2.0.79", ] +[[package]] +name = "stackable-shared" +version = "0.0.1" +source = "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.79.0#a2ac5f525edfd9a2fab884ba753e79a325fbb752" +dependencies = [ + "kube", + "semver", + "serde", + "serde_yaml", + "snafu 0.8.5", +] + [[package]] name = "strsim" version = "0.11.1" diff --git a/Cargo.toml b/Cargo.toml index cc489d4..51c9113 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,7 +22,7 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" serde_yaml = "0.9" snafu = "0.8" -stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "stackable-operator-0.74.0" } +stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "stackable-operator-0.79.0" } strum = { version = "0.26", features = ["derive"] } tokio = { version = "1.39", features = ["full"] } tracing = "0.1" diff --git a/deploy/helm/edc-operator/crds/crds.yaml b/deploy/helm/edc-operator/crds/crds.yaml index ea61991..7cb183c 100644 --- a/deploy/helm/edc-operator/crds/crds.yaml +++ b/deploy/helm/edc-operator/crds/crds.yaml @@ -32,7 +32,6 @@ spec: ionos: properties: s3: - description: An S3 bucket definition, it can either be a reference to an explicit S3Bucket object, or it can be an inline definition of a bucket. Read the [S3 resources concept documentation](https://docs.stackable.tech/home/nightly/concepts/s3) to learn more. oneOf: - required: - inline @@ -40,118 +39,103 @@ spec: - reference properties: inline: - description: An inline definition, containing the S3 bucket properties. + description: S3 connection definition as a resource. Learn more on the [S3 concept documentation](https://docs.stackable.tech/home/nightly/concepts/s3). properties: - bucketName: - description: The name of the S3 bucket. + accessStyle: + default: VirtualHosted + description: Which access style to use. Defaults to virtual hosted-style as most of the data products out there. Have a look at the [AWS documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html). + enum: + - Path + - VirtualHosted + type: string + credentials: + description: If the S3 uses authentication you have to specify you S3 credentials. In the most cases a [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) providing `accessKey` and `secretKey` is sufficient. nullable: true + properties: + scope: + description: '[Scope](https://docs.stackable.tech/home/nightly/secret-operator/scope) of the [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass).' + nullable: true + properties: + listenerVolumes: + default: [] + description: The listener volume scope allows Node and Service scopes to be inferred from the applicable listeners. This must correspond to Volume names in the Pod that mount Listeners. + items: + type: string + type: array + node: + default: false + description: The node scope is resolved to the name of the Kubernetes Node object that the Pod is running on. This will typically be the DNS name of the node. + type: boolean + pod: + default: false + description: The pod scope is resolved to the name of the Kubernetes Pod. This allows the secret to differentiate between StatefulSet replicas. + type: boolean + services: + default: [] + description: The service scope allows Pod objects to specify custom scopes. This should typically correspond to Service objects that the Pod participates in. + items: + type: string + type: array + type: object + secretClass: + description: '[SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) containing the LDAP bind credentials.' + type: string + required: + - secretClass + type: object + host: + description: 'Host of the S3 server without any protocol or port. For example: `west1.my-cloud.com`.' type: string - connection: - description: The definition of an S3 connection, either inline or as a reference. + port: + description: Port the S3 server listens on. If not specified the product will determine the port to use. + format: uint16 + minimum: 0.0 + nullable: true + type: integer + tls: + description: Use a TLS connection. If not specified no TLS will be used. nullable: true - oneOf: - - required: - - inline - - required: - - reference properties: - inline: - description: Inline definition of an S3 connection. + verification: + description: The verification method used to verify the certificates of the server and/or the client. + oneOf: + - required: + - none + - required: + - server properties: - accessStyle: - description: Which access style to use. Defaults to virtual hosted-style as most of the data products out there. Have a look at the [AWS documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html). - enum: - - Path - - VirtualHosted - nullable: true - type: string - credentials: - description: If the S3 uses authentication you have to specify you S3 credentials. In the most cases a [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) providing `accessKey` and `secretKey` is sufficient. - nullable: true - properties: - scope: - description: '[Scope](https://docs.stackable.tech/home/nightly/secret-operator/scope) of the [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass).' - nullable: true - properties: - node: - default: false - description: The node scope is resolved to the name of the Kubernetes Node object that the Pod is running on. This will typically be the DNS name of the node. - type: boolean - pod: - default: false - description: The pod scope is resolved to the name of the Kubernetes Pod. This allows the secret to differentiate between StatefulSet replicas. - type: boolean - services: - default: [] - description: The service scope allows Pod objects to specify custom scopes. This should typically correspond to Service objects that the Pod participates in. - items: - type: string - type: array - type: object - secretClass: - description: '[SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) containing the LDAP bind credentials.' - type: string - required: - - secretClass + none: + description: Use TLS but don't verify certificates. type: object - host: - description: 'Hostname of the S3 server without any protocol or port. For example: `west1.my-cloud.com`.' - nullable: true - type: string - port: - description: Port the S3 server listens on. If not specified the product will determine the port to use. - format: uint16 - minimum: 0.0 - nullable: true - type: integer - tls: - description: If you want to use TLS when talking to S3 you can enable TLS encrypted communication with this setting. - nullable: true + server: + description: Use TLS and a CA certificate to verify the server. properties: - verification: - description: The verification method used to verify the certificates of the server and/or the client. + caCert: + description: CA cert to verify the server. oneOf: - required: - - none + - webPki - required: - - server + - secretClass properties: - none: - description: Use TLS but don't verify certificates. - type: object - server: - description: Use TLS and a CA certificate to verify the server. - properties: - caCert: - description: CA cert to verify the server. - oneOf: - - required: - - webPki - - required: - - secretClass - properties: - secretClass: - description: Name of the [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) which will provide the CA certificate. Note that a SecretClass does not need to have a key but can also work with just a CA certificate, so if you got provided with a CA cert but don't have access to the key you can still use this method. - type: string - webPki: - description: Use TLS and the CA certificates trusted by the common web browsers to verify the server. This can be useful when you e.g. use public AWS S3 or other public available services. - type: object - type: object - required: - - caCert + secretClass: + description: Name of the [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) which will provide the CA certificate. Note that a SecretClass does not need to have a key but can also work with just a CA certificate, so if you got provided with a CA cert but don't have access to the key you can still use this method. + type: string + webPki: + description: Use TLS and the CA certificates trusted by the common web browsers to verify the server. This can be useful when you e.g. use public AWS S3 or other public available services. type: object type: object required: - - verification + - caCert type: object type: object - reference: - description: A reference to an S3Connection resource. - type: string + required: + - verification type: object + required: + - host type: object reference: - description: A reference to an S3 bucket object. This is simply the name of the `S3Bucket` resource. type: string type: object tokenSecret: @@ -239,10 +223,6 @@ spec: nullable: true type: object x-kubernetes-preserve-unknown-fields: true - required: - - nodeAffinity - - podAffinity - - podAntiAffinity type: object logging: default: @@ -502,10 +482,6 @@ spec: nullable: true type: object x-kubernetes-preserve-unknown-fields: true - required: - - nodeAffinity - - podAffinity - - podAntiAffinity type: object logging: default: @@ -734,8 +710,10 @@ spec: description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: 'Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string + required: + - name type: object nullable: true type: array diff --git a/deploy/helm/edc-operator/values.yaml b/deploy/helm/edc-operator/values.yaml index 778f44e..07c7b6b 100644 --- a/deploy/helm/edc-operator/values.yaml +++ b/deploy/helm/edc-operator/values.yaml @@ -47,3 +47,8 @@ nodeSelector: {} tolerations: [] affinity: {} + +# When running on a non-default Kubernetes cluster domain and the auto detection is not working correctly, +# you can set your custom cluster domain here. +# See the https://docs.stackable.tech/home/stable/guides/kubernetes-cluster-domain guide for details +# kubernetesClusterDomain: my-cluster.local diff --git a/rust/operator-binary/src/controller.rs b/rust/operator-binary/src/controller.rs index 97a9b82..706a52b 100644 --- a/rust/operator-binary/src/controller.rs +++ b/rust/operator-binary/src/controller.rs @@ -18,8 +18,10 @@ use product_config::{ ProductConfigManager, }; use snafu::{OptionExt, ResultExt, Snafu}; +use stackable_operator::commons::tls_verification::{CaCert, TlsVerification}; use stackable_operator::{ builder::{ + self, configmap::ConfigMapBuilder, meta::ObjectMetaBuilder, pod::{ @@ -36,11 +38,8 @@ use stackable_operator::{ client::GetApi, cluster_resources::{ClusterResourceApplyStrategy, ClusterResources}, commons::{ - authentication::tls::{CaCert, TlsVerification}, - product_image_selection::ResolvedProductImage, - rbac::build_rbac_resources, - s3::S3ConnectionSpec, - secret_class::SecretClassVolumeError, + product_image_selection::ResolvedProductImage, rbac::build_rbac_resources, + s3::S3ConnectionSpec, secret_class::SecretClassVolumeError, }, k8s_openapi::{ api::core::v1::SecretVolumeSource, @@ -60,6 +59,7 @@ use stackable_operator::{ product_config_utils::{transform_all_roles_to_config, validate_all_roles_and_groups_config}, product_logging::{ self, + framework::LoggingError, spec::{ ConfigMapLogConfig, ContainerLogConfig, ContainerLogConfigChoice, CustomContainerLogConfig, @@ -158,7 +158,7 @@ pub enum Error { }, #[snafu(display("failed to resolve S3 connection"))] ResolveS3Connection { - source: stackable_operator::commons::s3::Error, + source: stackable_operator::commons::s3::S3Error, }, #[snafu(display("failed to resolve and merge resource config for role and role group"))] FailedToResolveResourceConfig { source: crate::crd::Error }, @@ -228,6 +228,17 @@ pub enum Error { source: SecretClassVolumeError, volume_name: String, }, + + #[snafu(display("failed to configure logging"))] + ConfigureLogging { source: LoggingError }, + + #[snafu(display("failed to add needed volume"))] + AddVolume { source: builder::pod::Error }, + + #[snafu(display("failed to add needed volumeMount"))] + AddVolumeMount { + source: builder::pod::container::Error, + }, } type Result = std::result::Result; @@ -250,6 +261,7 @@ pub async fn reconcile_edc(edc: Arc, ctx: Arc) -> Result, ctx: Arc) -> Result, ctx: Arc) -> Result, role_group_config: &HashMap>, merged_config: &ConnectorConfig, - s3_conn: Option<&S3ConnectionSpec>, + s3_conn: &S3ConnectionSpec, vector_aggregator_address: Option<&str>, ) -> Result { let mut config_properties = String::new(); @@ -456,10 +468,8 @@ fn build_connector_rolegroup_config_map( let mut conf: BTreeMap> = Default::default(); match property_name_kind { PropertyNameKind::File(file_name) if file_name == CONFIG_PROPERTIES => { - if let Some(conn) = s3_conn { - if let Some(endpoint) = conn.endpoint() { - conf.insert(EDC_IONOS_ENDPOINT.to_string(), Some(endpoint)); - } + if let Ok(endpoint) = s3_conn.endpoint() { + conf.insert(EDC_IONOS_ENDPOINT.to_string(), Some(endpoint.to_string())); } let transformed_config: BTreeMap> = config @@ -582,7 +592,7 @@ fn build_server_rolegroup_statefulset( rolegroup_ref: &RoleGroupRef, metastore_config: &HashMap>, merged_config: &ConnectorConfig, - s3_conn: Option<&S3ConnectionSpec>, + s3_conn: &S3ConnectionSpec, sa_name: &str, ) -> Result { let rolegroup = edc @@ -640,13 +650,11 @@ fn build_server_rolegroup_statefulset( )); // Add S3 secret and access keys from the files mounted by the secret Operator - if let Some(c) = s3_conn { - if c.credentials.is_some() { - let path = format!("{}/{}", STACKABLE_SECRETS_DIR, SECRET_KEY_S3_ACCESS_KEY); - java_cmd.push(format!("-D{}=$(cat {})", EDC_IONOS_ACCESS_KEY, path)); - let path = format!("{}/{}", STACKABLE_SECRETS_DIR, SECRET_KEY_S3_SECRET_KEY); - java_cmd.push(format!("-D{}=$(cat {})", EDC_IONOS_SECRET_KEY, path)); - } + if s3_conn.credentials.is_some() { + let path = format!("{}/{}", STACKABLE_SECRETS_DIR, SECRET_KEY_S3_ACCESS_KEY); + java_cmd.push(format!("-D{}=$(cat {})", EDC_IONOS_ACCESS_KEY, path)); + let path = format!("{}/{}", STACKABLE_SECRETS_DIR, SECRET_KEY_S3_SECRET_KEY); + java_cmd.push(format!("-D{}=$(cat {})", EDC_IONOS_SECRET_KEY, path)); } // JVM security properties configured via configOverrides @@ -663,12 +671,16 @@ fn build_server_rolegroup_statefulset( .args(vec![format!("{}", java_cmd.join(" "))]) .image_from_product_image(resolved_product_image) .add_volume_mount(STACKABLE_CONFIG_DIR_NAME, STACKABLE_CONFIG_DIR) + .context(AddVolumeMountSnafu)? .add_volume_mount(STACKABLE_CERT_MOUNT_DIR_NAME, STACKABLE_CERT_MOUNT_DIR) + .context(AddVolumeMountSnafu)? .add_volume_mount(STACKABLE_LOG_DIR_NAME, STACKABLE_LOG_DIR) + .context(AddVolumeMountSnafu)? .add_volume_mount( STACKABLE_LOG_CONFIG_MOUNT_DIR_NAME, STACKABLE_LOG_CONFIG_MOUNT_DIR, ) + .context(AddVolumeMountSnafu)? .add_container_port(HTTP_PORT_NAME, HTTP_PORT.into()) .add_container_port(CONTROL_PORT_NAME, CONTROL_PORT.into()) .add_container_port(MANAGEMENT_PORT_NAME, MANAGEMENT_PORT.into()) @@ -718,11 +730,12 @@ fn build_server_rolegroup_statefulset( .add_volume(stackable_operator::k8s_openapi::api::core::v1::Volume { name: STACKABLE_CONFIG_DIR_NAME.to_string(), config_map: Some(ConfigMapVolumeSource { - name: Some(rolegroup_ref.object_name()), + name: rolegroup_ref.object_name(), ..Default::default() }), ..Default::default() }) + .context(AddVolumeSnafu)? .add_volume(Volume { name: STACKABLE_LOG_DIR_NAME.to_string(), empty_dir: Some(EmptyDirVolumeSource { @@ -733,6 +746,7 @@ fn build_server_rolegroup_statefulset( }), ..Volume::default() }) + .context(AddVolumeSnafu)? .add_volume(Volume { name: STACKABLE_CERT_MOUNT_DIR_NAME.to_string(), secret: Some(SecretVolumeSource { @@ -741,6 +755,7 @@ fn build_server_rolegroup_statefulset( }), ..Default::default() }) + .context(AddVolumeSnafu)? .affinity(&merged_config.affinity) .service_account_name(sa_name) .security_context( @@ -758,38 +773,45 @@ fn build_server_rolegroup_statefulset( })), }) = merged_config.logging.containers.get(&Container::Connector) { - pod_builder.add_volume(Volume { - name: STACKABLE_LOG_CONFIG_MOUNT_DIR_NAME.to_string(), - config_map: Some(ConfigMapVolumeSource { - name: Some(config_map.into()), - ..ConfigMapVolumeSource::default() - }), - ..Volume::default() - }); + pod_builder + .add_volume(Volume { + name: STACKABLE_LOG_CONFIG_MOUNT_DIR_NAME.to_string(), + config_map: Some(ConfigMapVolumeSource { + name: config_map.into(), + ..ConfigMapVolumeSource::default() + }), + ..Volume::default() + }) + .context(AddVolumeSnafu)?; } else { - pod_builder.add_volume(Volume { - name: STACKABLE_LOG_CONFIG_MOUNT_DIR_NAME.to_string(), - config_map: Some(ConfigMapVolumeSource { - name: Some(rolegroup_ref.object_name()), - ..ConfigMapVolumeSource::default() - }), - ..Volume::default() - }); + pod_builder + .add_volume(Volume { + name: STACKABLE_LOG_CONFIG_MOUNT_DIR_NAME.to_string(), + config_map: Some(ConfigMapVolumeSource { + name: rolegroup_ref.object_name(), + ..ConfigMapVolumeSource::default() + }), + ..Volume::default() + }) + .context(AddVolumeSnafu)?; } if merged_config.logging.enable_vector_agent { - pod_builder.add_container(product_logging::framework::vector_container( - resolved_product_image, - STACKABLE_CONFIG_DIR_NAME, - STACKABLE_LOG_DIR_NAME, - merged_config.logging.containers.get(&Container::Vector), - ResourceRequirementsBuilder::new() - .with_cpu_request("250m") - .with_cpu_limit("500m") - .with_memory_request("128Mi") - .with_memory_limit("128Mi") - .build(), - )); + pod_builder.add_container( + product_logging::framework::vector_container( + resolved_product_image, + STACKABLE_CONFIG_DIR_NAME, + STACKABLE_LOG_DIR_NAME, + merged_config.logging.containers.get(&Container::Vector), + ResourceRequirementsBuilder::new() + .with_cpu_request("250m") + .with_cpu_limit("500m") + .with_memory_request("128Mi") + .with_memory_limit("128Mi") + .build(), + ) + .context(ConfigureLoggingSnafu)?, + ); } let metadata = ObjectMetaBuilder::new() @@ -839,48 +861,51 @@ fn build_server_rolegroup_statefulset( } fn add_s3_volume_and_volume_mounts( - s3_conn: Option<&S3ConnectionSpec>, + s3_conn: &S3ConnectionSpec, cb_druid: &mut ContainerBuilder, pb: &mut PodBuilder, ) -> Result<()> { - if let Some(s3_conn) = s3_conn { - if let Some(credentials) = &s3_conn.credentials { - const VOLUME_NAME: &str = "s3-credentials"; - pb.add_volume(credentials.to_volume(VOLUME_NAME).context( - CredentialsToVolumeSnafu { + if let Some(credentials) = &s3_conn.credentials { + const VOLUME_NAME: &str = "s3-credentials"; + pb.add_volume( + credentials + .to_volume(VOLUME_NAME) + .context(CredentialsToVolumeSnafu { volume_name: VOLUME_NAME, - }, - )?); - cb_druid.add_volume_mount(VOLUME_NAME, STACKABLE_SECRETS_DIR); - } + })?, + ) + .context(AddVolumeSnafu)?; + cb_druid + .add_volume_mount(VOLUME_NAME, STACKABLE_SECRETS_DIR) + .context(AddVolumeMountSnafu)?; + } - if let Some(tls) = &s3_conn.tls { - match &tls.verification { - TlsVerification::None {} => return S3TlsNoVerificationNotSupportedSnafu.fail(), - TlsVerification::Server(server_verification) => { - match &server_verification.ca_cert { - CaCert::WebPki {} => {} - CaCert::SecretClass(secret_class) => { - let volume_name = format!("{secret_class}-tls-certificate"); - - let volume = VolumeBuilder::new(&volume_name) - .ephemeral( - SecretOperatorVolumeSourceBuilder::new(secret_class) - .build() - .context(BuildTlsVolumeSnafu { - volume_name: &volume_name, - })?, - ) - .build(); - pb.add_volume(volume); - cb_druid.add_volume_mount( - &volume_name, - format!("{STACKABLE_CERTS_DIR}/{volume_name}"), - ); - } - } + if let Some(tls) = &s3_conn.tls.tls { + match &tls.verification { + TlsVerification::None {} => return S3TlsNoVerificationNotSupportedSnafu.fail(), + TlsVerification::Server(server_verification) => match &server_verification.ca_cert { + CaCert::WebPki {} => {} + CaCert::SecretClass(secret_class) => { + let volume_name = format!("{secret_class}-tls-certificate"); + + let volume = VolumeBuilder::new(&volume_name) + .ephemeral( + SecretOperatorVolumeSourceBuilder::new(secret_class) + .build() + .context(BuildTlsVolumeSnafu { + volume_name: &volume_name, + })?, + ) + .build(); + pb.add_volume(volume).context(AddVolumeSnafu)?; + cb_druid + .add_volume_mount( + &volume_name, + format!("{STACKABLE_CERTS_DIR}/{volume_name}"), + ) + .context(AddVolumeMountSnafu)?; } - } + }, } } diff --git a/rust/operator-binary/src/crd.rs b/rust/operator-binary/src/crd.rs index f3a0e1f..ac41ae4 100644 --- a/rust/operator-binary/src/crd.rs +++ b/rust/operator-binary/src/crd.rs @@ -15,7 +15,10 @@ use stackable_operator::{ }, s3, }, - config::{fragment, fragment::Fragment, fragment::ValidationError, merge::Merge}, + config::{ + fragment::{self, Fragment, ValidationError}, + merge::Merge, + }, k8s_openapi::apimachinery::pkg::api::resource::Quantity, kube::{runtime::reflector::ObjectRef, CustomResource, ResourceExt}, product_config_utils::{self, Configuration}, @@ -23,6 +26,7 @@ use stackable_operator::{ role_utils::{Role, RoleGroupRef}, schemars::{self, JsonSchema}, status::condition::{ClusterCondition, HasStatusCondition}, + utils::cluster_domain::KUBERNETES_CLUSTER_DOMAIN, }; use strum::{Display, EnumIter}; @@ -161,7 +165,7 @@ pub struct EDCClusterConfig { #[serde(rename_all = "camelCase")] pub struct Ionos { pub token_secret: String, - pub s3: s3::S3BucketDef, + pub s3: s3::S3ConnectionInlineOrReference, } // TODO: the secret should be mounted as an env var, and then in the secret should be a EDC_IONOS_TOKEN var with the value. // The jar should be able to pick it up @@ -545,9 +549,12 @@ pub struct PodRef { impl PodRef { pub fn fqdn(&self) -> String { + let cluster_domain = KUBERNETES_CLUSTER_DOMAIN + .get() + .expect("KUBERNETES_CLUSTER_DOMAIN must first be set by calling initialize_operator"); format!( - "{}.{}.{}.svc.cluster.local", - self.pod_name, self.role_group_service_name, self.namespace + "{}.{}.{}.svc.{}", + self.pod_name, self.role_group_service_name, self.namespace, cluster_domain ) } } diff --git a/rust/operator-binary/src/main.rs b/rust/operator-binary/src/main.rs index 3dbbaac..d860421 100644 --- a/rust/operator-binary/src/main.rs +++ b/rust/operator-binary/src/main.rs @@ -63,7 +63,8 @@ async fn main() -> anyhow::Result<()> { ])?; let client = - stackable_operator::client::create_client(Some(OPERATOR_NAME.to_string())).await?; + stackable_operator::client::initialize_operator(Some(OPERATOR_NAME.to_string())) + .await?; Controller::new( watch_namespace.get_api::(&client),