From 991cca8c58aa31e0942c0ad5eacdbc0e403a7e48 Mon Sep 17 00:00:00 2001 From: iGxnon Date: Tue, 1 Aug 2023 18:27:12 +0800 Subject: [PATCH 01/11] feat: add metrics exporter to prometheus Signed-off-by: iGxnon --- Cargo.toml | 2 +- operator-k8s/Cargo.toml | 12 +++--- operator-k8s/src/controller/mod.rs | 5 +++ operator-k8s/src/lib.rs | 2 + operator-k8s/src/metrics.rs | 67 ++++++++++++++++++++++++++++++ operator-k8s/src/operator.rs | 21 ++++++---- 6 files changed, 95 insertions(+), 14 deletions(-) create mode 100644 operator-k8s/src/metrics.rs diff --git a/Cargo.toml b/Cargo.toml index 64463e13..f5dc833f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [workspace] members = [ + "operator-api", "operator-k8s", "sidecar", "utils", - "operator-api", ] diff --git a/operator-k8s/Cargo.toml b/operator-k8s/Cargo.toml index 5f8d3823..af798cd0 100644 --- a/operator-k8s/Cargo.toml +++ b/operator-k8s/Cargo.toml @@ -14,10 +14,17 @@ keywords = ["kubernetes", "xline", "operator"] [dependencies] anyhow = "1.0.71" async-trait = "0.1.68" +axum = "0.6.18" clap = { version = "4.3.4", features = ["derive"] } +clippy-utilities = "0.2.0" +event-listener = "2.5.3" +flume = "0.10.14" futures = "0.3.28" k8s-openapi = { version = "0.18.0", features = ["v1_26", "schemars"] } kube = { version = "0.83.0", features = ["runtime", "derive"] } +lazy_static = "1.4.0" +operator-api = { path = "../operator-api" } +prometheus = "0.13.3" schemars = "0.8.6" serde = { version = "1.0.130", features = ["derive"] } serde_json = "1.0.97" @@ -31,11 +38,6 @@ tokio = { version = "1.0", features = [ tracing = "0.1.37" tracing-subscriber = { version = "0.3.16", features = ["env-filter"] } utils = { path = "../utils" } -axum = "0.6.18" -operator-api = { path = "../operator-api" } -flume = "0.10.14" -clippy-utilities = "0.2.0" -event-listener = "2.5.3" [dev-dependencies] garde = { version = "0.11.2", default-features = false, features = ["derive", "pattern"] } diff --git a/operator-k8s/src/controller/mod.rs b/operator-k8s/src/controller/mod.rs index 50766167..11dfa1d9 100644 --- a/operator-k8s/src/controller/mod.rs +++ b/operator-k8s/src/controller/mod.rs @@ -11,6 +11,7 @@ use kube::{Api, Resource}; use serde::de::DeserializeOwned; use crate::controller::consts::DEFAULT_REQUEUE_DURATION; +use crate::metrics::{RECONCILE_DURATION, RECONCILE_FAILED_COUNT}; /// Cluster controller pub(crate) mod cluster; @@ -48,6 +49,7 @@ where /// The reconcile function used in kube::runtime::Controller async fn reconcile(resource: Arc, ctx: Arc>) -> Result { + let _timer = RECONCILE_DURATION.start_timer(); let controller = &ctx.controller; controller.reconcile_once(&resource).await?; Ok(Action::requeue(DEFAULT_REQUEUE_DURATION)) @@ -55,6 +57,9 @@ where /// The on_error function used in kube::runtime::Controller fn on_error(resource: Arc, err: &Self::Error, ctx: Arc>) -> Action { + RECONCILE_FAILED_COUNT + .with_label_values(&[&err.to_string()]) + .inc(); let controller = &ctx.controller; controller.handle_error(&resource, err); Action::requeue(DEFAULT_REQUEUE_DURATION) diff --git a/operator-k8s/src/lib.rs b/operator-k8s/src/lib.rs index 8b2dd208..a6b8b486 100644 --- a/operator-k8s/src/lib.rs +++ b/operator-k8s/src/lib.rs @@ -155,6 +155,8 @@ pub mod config; mod controller; /// Custom resource definition mod crd; +/// Xline operator metrics +mod metrics; /// Xline operator pub mod operator; /// Maintain the state of sidecar operators diff --git a/operator-k8s/src/metrics.rs b/operator-k8s/src/metrics.rs new file mode 100644 index 00000000..c315b042 --- /dev/null +++ b/operator-k8s/src/metrics.rs @@ -0,0 +1,67 @@ +#![allow(clippy::expect_used)] // it is safe to unwrap static metrics + +use clippy_utilities::NumericCast; +use lazy_static::lazy_static; +use prometheus::{Encoder, Histogram, HistogramOpts, IntCounterVec, Opts, Registry}; +use std::iter::repeat; +use std::ops::Mul; +use tracing::error; + +/// Returns a vector of time buckets for the reconcile duration histogram. +fn exponential_time_bucket(start: f64, factor: f64, count: usize) -> Vec { + repeat(factor) + .enumerate() + .take(count) + .map(|(i, f)| start.mul(f.powi(i.numeric_cast()))) + .collect::>() +} + +lazy_static! { + pub(crate) static ref REGISTRY: Registry = Registry::new(); + pub(crate) static ref RECONCILE_DURATION: Histogram = Histogram::with_opts( + HistogramOpts::new( + "operator_reconcile_duration_seconds", + "Duration of operator reconcile loop in seconds", + ) + .buckets(exponential_time_bucket(0.1, 2.0, 10)) + ) + .expect("failed to create operator_reconcile_duration_seconds histogram"); + pub(crate) static ref RECONCILE_FAILED_COUNT: IntCounterVec = IntCounterVec::new( + Opts::new( + "operator_reconcile_failed_count", + "Number of failed times the operator reconcile loop has run" + ), + &["reason"] + ) + .expect("failed to create operator_reconcile_failed_count counter"); +} + +/// init metrics +pub(crate) fn init() { + REGISTRY + .register(Box::new(RECONCILE_DURATION.clone())) + .expect("failed to register operator_reconcile_duration_seconds histogram"); + REGISTRY + .register(Box::new(RECONCILE_FAILED_COUNT.clone())) + .expect("failed to register operator_reconcile_failed_count counter"); +} + +/// metrics handler +#[allow(clippy::unused_async)] // require by axum +pub(crate) async fn metrics() -> String { + let mut buf1 = Vec::new(); + let encoder = prometheus::TextEncoder::new(); + let metric_families = REGISTRY.gather(); + if let Err(err) = encoder.encode(&metric_families, &mut buf1) { + error!("failed to encode custom metrics: {}", err); + return String::new(); + } + let mut res = String::from_utf8(buf1).unwrap_or_default(); + let mut buf2 = Vec::new(); + if let Err(err) = encoder.encode(&prometheus::gather(), &mut buf2) { + error!("failed to encode prometheus metrics: {}", err); + return String::new(); + } + res.push_str(&String::from_utf8_lossy(&buf2)); + res +} diff --git a/operator-k8s/src/operator.rs b/operator-k8s/src/operator.rs index c3ac5787..8f44084b 100644 --- a/operator-k8s/src/operator.rs +++ b/operator-k8s/src/operator.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use std::time::Duration; use anyhow::Result; +use axum::routing::any; use axum::routing::post; use axum::{Json, Router}; use flume::Sender; @@ -21,6 +22,7 @@ use crate::config::{Config, Namespace}; use crate::controller::cluster::Controller as ClusterController; use crate::controller::{Context, Controller}; use crate::crd::Cluster; +use crate::metrics; use crate::sidecar_state::SidecarState; /// wait crd to establish timeout @@ -48,6 +50,7 @@ impl Operator { /// Return `Err` when run failed #[inline] pub async fn run(&self) -> Result<()> { + metrics::init(); let kube_client: Client = Client::try_default().await?; self.prepare_crd(&kube_client).await?; let (cluster_api, pod_api): (Api, Api) = match self.config.namespace { @@ -188,14 +191,16 @@ impl Operator { /// Run a server that receive sidecar operators' status async fn web_server(&self, status_tx: Sender) -> Result<()> { - let status = Router::new().route( - "/status", - post(|body: Json| async move { - if let Err(e) = status_tx.send(body.0) { - error!("channel send error: {e}"); - } - }), - ); + let status = Router::new() + .route( + "/status", + post(|body: Json| async move { + if let Err(e) = status_tx.send(body.0) { + error!("channel send error: {e}"); + } + }), + ) + .route("/metrics", any(metrics::metrics)); axum::Server::bind(&self.config.listen_addr.parse()?) .serve(status.into_make_service()) From 7d64d9f32cd4cdaea94dc39d7b63b29d7fcd33a7 Mon Sep 17 00:00:00 2001 From: iGxnon Date: Wed, 16 Aug 2023 10:57:31 +0800 Subject: [PATCH 02/11] refactor: move metrics into cluster controller Signed-off-by: iGxnon --- operator-k8s/src/controller/cluster/mod.rs | 72 +++++++++++++++++++ .../src/controller/cluster/v1alpha.rs | 16 +++++ .../src/controller/cluster/v1alpha1.rs | 16 +++++ operator-k8s/src/controller/mod.rs | 5 -- operator-k8s/src/lib.rs | 4 +- operator-k8s/src/metrics.rs | 67 ----------------- operator-k8s/src/operator.rs | 38 +++++----- operator-k8s/src/router.rs | 42 +++++++++++ operator-k8s/src/sidecar_state.rs | 4 +- 9 files changed, 171 insertions(+), 93 deletions(-) delete mode 100644 operator-k8s/src/metrics.rs create mode 100644 operator-k8s/src/router.rs diff --git a/operator-k8s/src/controller/cluster/mod.rs b/operator-k8s/src/controller/cluster/mod.rs index 9649684a..bdbf367e 100644 --- a/operator-k8s/src/controller/cluster/mod.rs +++ b/operator-k8s/src/controller/cluster/mod.rs @@ -1,3 +1,9 @@ +use clippy_utilities::NumericCast; +use prometheus::{Histogram, HistogramOpts, HistogramTimer, IntCounterVec, Opts, Registry}; + +use std::iter::repeat; +use std::ops::Mul; + /// Controller v1alpha mod v1alpha; /// Controller v1alpha1 @@ -5,3 +11,69 @@ mod v1alpha1; /// Current controller of cluster pub(crate) type Controller = v1alpha::ClusterController; + +/// Cluster metrics +pub(crate) struct ClusterMetrics { + /// Reconcile duration histogram + reconcile_duration: Histogram, + /// Reconcile failed count + reconcile_failed_count: IntCounterVec, +} + +/// Label error +trait LabelError { + /// Label + fn label(&self) -> &str; +} + +impl ClusterMetrics { + /// Create a new cluster metrics + #[allow(clippy::expect_used)] + pub(crate) fn new() -> Self { + Self { + reconcile_duration: Histogram::with_opts( + HistogramOpts::new( + "operator_reconcile_duration_seconds", + "Duration of operator reconcile loop in seconds", + ) + .buckets(exponential_time_bucket(0.1, 2.0, 10)), + ) + .expect(""), + reconcile_failed_count: IntCounterVec::new( + Opts::new( + "operator_reconcile_failed_count", + "Number of failed times the operator reconcile loop has run", + ), + &["reason"], + ) + .expect("failed to create operator_reconcile_failed_count counter"), + } + } + + /// Register metrics + pub(crate) fn register(&self, registry: &Registry) -> Result<(), prometheus::Error> { + registry.register(Box::new(self.reconcile_duration.clone()))?; + registry.register(Box::new(self.reconcile_failed_count.clone())) + } + + /// Record duration + fn record_duration(&self) -> HistogramTimer { + self.reconcile_duration.start_timer() + } + + /// Increment failed count + fn incr_failed_count(&self, reason: &impl LabelError) { + self.reconcile_failed_count + .with_label_values(&[reason.label()]) + .inc(); + } +} + +/// Returns a vector of time buckets for the reconcile duration histogram. +fn exponential_time_bucket(start: f64, factor: f64, count: usize) -> Vec { + repeat(factor) + .enumerate() + .take(count) + .map(|(i, f)| start.mul(f.powi(i.numeric_cast()))) + .collect::>() +} diff --git a/operator-k8s/src/controller/cluster/v1alpha.rs b/operator-k8s/src/controller/cluster/v1alpha.rs index ccad3ce3..dbc47445 100644 --- a/operator-k8s/src/controller/cluster/v1alpha.rs +++ b/operator-k8s/src/controller/cluster/v1alpha.rs @@ -17,6 +17,7 @@ use kube::{Api, Client, Resource, ResourceExt}; use tracing::{debug, error}; use utils::consts::{DEFAULT_BACKUP_DIR, DEFAULT_DATA_DIR}; +use crate::controller::cluster::{ClusterMetrics, LabelError}; use crate::controller::consts::{ DATA_EMPTY_DIR_NAME, DEFAULT_XLINE_PORT, FIELD_MANAGER, XLINE_POD_NAME_ENV, XLINE_PORT_NAME, }; @@ -29,6 +30,8 @@ pub(crate) struct ClusterController { pub(crate) kube_client: Client, /// The kubernetes cluster dns suffix pub(crate) cluster_suffix: String, + /// Cluster metrics + pub(crate) metrics: ClusterMetrics, } /// All possible errors @@ -48,6 +51,17 @@ pub(crate) enum Error { InvalidVolumeName(&'static str), } +impl LabelError for Error { + fn label(&self) -> &str { + match *self { + Self::MissingObject(_) => "missing_object", + Self::Kube(_) => "kube", + Self::CannotMount(_) => "cannot_mount", + Self::InvalidVolumeName(_) => "invalid_volume_name", + } + } +} + /// Controller result type Result = std::result::Result; @@ -370,6 +384,7 @@ impl Controller for ClusterController { type Error = Error; async fn reconcile_once(&self, cluster: &Arc) -> Result<()> { + let _timer = self.metrics.record_duration(); debug!( "Reconciling cluster: \n{}", serde_json::to_string_pretty(cluster.as_ref()).unwrap_or_default() @@ -388,6 +403,7 @@ impl Controller for ClusterController { } fn handle_error(&self, resource: &Arc, err: &Self::Error) { + self.metrics.incr_failed_count(err); error!("{:?} reconciliation error: {}", resource.metadata.name, err); } } diff --git a/operator-k8s/src/controller/cluster/v1alpha1.rs b/operator-k8s/src/controller/cluster/v1alpha1.rs index 332c595a..849002ff 100644 --- a/operator-k8s/src/controller/cluster/v1alpha1.rs +++ b/operator-k8s/src/controller/cluster/v1alpha1.rs @@ -18,6 +18,7 @@ use kube::{Api, Client, Resource, ResourceExt}; use tracing::{debug, error}; use utils::consts::{DEFAULT_BACKUP_DIR, DEFAULT_DATA_DIR}; +use crate::controller::cluster::{ClusterMetrics, LabelError}; use crate::controller::consts::{ CRONJOB_IMAGE, DATA_EMPTY_DIR_NAME, DEFAULT_SIDECAR_PORT, DEFAULT_XLINE_PORT, FIELD_MANAGER, SIDECAR_PORT_NAME, XLINE_POD_NAME_ENV, XLINE_PORT_NAME, @@ -31,6 +32,8 @@ pub(crate) struct ClusterController { pub(crate) kube_client: Client, /// The kubernetes cluster dns suffix pub(crate) cluster_suffix: String, + /// Cluster metrics + pub(crate) metrics: ClusterMetrics, } /// All possible errors @@ -50,6 +53,17 @@ pub(crate) enum Error { InvalidVolumeName(&'static str), } +impl LabelError for Error { + fn label(&self) -> &str { + match *self { + Self::MissingObject(_) => "missing_object", + Self::Kube(_) => "kube", + Self::CannotMount(_) => "cannot_mount", + Self::InvalidVolumeName(_) => "invalid_volume_name", + } + } +} + /// Controller result type Result = std::result::Result; @@ -457,6 +471,7 @@ impl Controller for ClusterController { type Error = Error; async fn reconcile_once(&self, cluster: &Arc) -> Result<()> { + let _timer = self.metrics.record_duration(); debug!( "Reconciling cluster: \n{}", serde_json::to_string_pretty(cluster.as_ref()).unwrap_or_default() @@ -487,6 +502,7 @@ impl Controller for ClusterController { } fn handle_error(&self, resource: &Arc, err: &Self::Error) { + self.metrics.incr_failed_count(err); error!("{:?} reconciliation error: {}", resource.metadata.name, err); } } diff --git a/operator-k8s/src/controller/mod.rs b/operator-k8s/src/controller/mod.rs index 11dfa1d9..50766167 100644 --- a/operator-k8s/src/controller/mod.rs +++ b/operator-k8s/src/controller/mod.rs @@ -11,7 +11,6 @@ use kube::{Api, Resource}; use serde::de::DeserializeOwned; use crate::controller::consts::DEFAULT_REQUEUE_DURATION; -use crate::metrics::{RECONCILE_DURATION, RECONCILE_FAILED_COUNT}; /// Cluster controller pub(crate) mod cluster; @@ -49,7 +48,6 @@ where /// The reconcile function used in kube::runtime::Controller async fn reconcile(resource: Arc, ctx: Arc>) -> Result { - let _timer = RECONCILE_DURATION.start_timer(); let controller = &ctx.controller; controller.reconcile_once(&resource).await?; Ok(Action::requeue(DEFAULT_REQUEUE_DURATION)) @@ -57,9 +55,6 @@ where /// The on_error function used in kube::runtime::Controller fn on_error(resource: Arc, err: &Self::Error, ctx: Arc>) -> Action { - RECONCILE_FAILED_COUNT - .with_label_values(&[&err.to_string()]) - .inc(); let controller = &ctx.controller; controller.handle_error(&resource, err); Action::requeue(DEFAULT_REQUEUE_DURATION) diff --git a/operator-k8s/src/lib.rs b/operator-k8s/src/lib.rs index a6b8b486..a91212ee 100644 --- a/operator-k8s/src/lib.rs +++ b/operator-k8s/src/lib.rs @@ -155,9 +155,9 @@ pub mod config; mod controller; /// Custom resource definition mod crd; -/// Xline operator metrics -mod metrics; /// Xline operator pub mod operator; +/// Xline operator web server router +mod router; /// Maintain the state of sidecar operators mod sidecar_state; diff --git a/operator-k8s/src/metrics.rs b/operator-k8s/src/metrics.rs deleted file mode 100644 index c315b042..00000000 --- a/operator-k8s/src/metrics.rs +++ /dev/null @@ -1,67 +0,0 @@ -#![allow(clippy::expect_used)] // it is safe to unwrap static metrics - -use clippy_utilities::NumericCast; -use lazy_static::lazy_static; -use prometheus::{Encoder, Histogram, HistogramOpts, IntCounterVec, Opts, Registry}; -use std::iter::repeat; -use std::ops::Mul; -use tracing::error; - -/// Returns a vector of time buckets for the reconcile duration histogram. -fn exponential_time_bucket(start: f64, factor: f64, count: usize) -> Vec { - repeat(factor) - .enumerate() - .take(count) - .map(|(i, f)| start.mul(f.powi(i.numeric_cast()))) - .collect::>() -} - -lazy_static! { - pub(crate) static ref REGISTRY: Registry = Registry::new(); - pub(crate) static ref RECONCILE_DURATION: Histogram = Histogram::with_opts( - HistogramOpts::new( - "operator_reconcile_duration_seconds", - "Duration of operator reconcile loop in seconds", - ) - .buckets(exponential_time_bucket(0.1, 2.0, 10)) - ) - .expect("failed to create operator_reconcile_duration_seconds histogram"); - pub(crate) static ref RECONCILE_FAILED_COUNT: IntCounterVec = IntCounterVec::new( - Opts::new( - "operator_reconcile_failed_count", - "Number of failed times the operator reconcile loop has run" - ), - &["reason"] - ) - .expect("failed to create operator_reconcile_failed_count counter"); -} - -/// init metrics -pub(crate) fn init() { - REGISTRY - .register(Box::new(RECONCILE_DURATION.clone())) - .expect("failed to register operator_reconcile_duration_seconds histogram"); - REGISTRY - .register(Box::new(RECONCILE_FAILED_COUNT.clone())) - .expect("failed to register operator_reconcile_failed_count counter"); -} - -/// metrics handler -#[allow(clippy::unused_async)] // require by axum -pub(crate) async fn metrics() -> String { - let mut buf1 = Vec::new(); - let encoder = prometheus::TextEncoder::new(); - let metric_families = REGISTRY.gather(); - if let Err(err) = encoder.encode(&metric_families, &mut buf1) { - error!("failed to encode custom metrics: {}", err); - return String::new(); - } - let mut res = String::from_utf8(buf1).unwrap_or_default(); - let mut buf2 = Vec::new(); - if let Err(err) = encoder.encode(&prometheus::gather(), &mut buf2) { - error!("failed to encode prometheus metrics: {}", err); - return String::new(); - } - res.push_str(&String::from_utf8_lossy(&buf2)); - res -} diff --git a/operator-k8s/src/operator.rs b/operator-k8s/src/operator.rs index 8f44084b..51f01e71 100644 --- a/operator-k8s/src/operator.rs +++ b/operator-k8s/src/operator.rs @@ -5,7 +5,7 @@ use std::time::Duration; use anyhow::Result; use axum::routing::any; use axum::routing::post; -use axum::{Json, Router}; +use axum::{Extension, Router}; use flume::Sender; use futures::FutureExt; use k8s_openapi::api::core::v1::Pod; @@ -14,15 +14,16 @@ use kube::api::{ListParams, Patch, PatchParams, PostParams}; use kube::runtime::wait::{await_condition, conditions}; use kube::{Api, Client, CustomResourceExt, Resource}; use operator_api::HeartbeatStatus; +use prometheus::Registry; use tokio::signal; -use tracing::{debug, error, info, warn}; +use tracing::{debug, info, warn}; use utils::migration::ApiVersion; use crate::config::{Config, Namespace}; -use crate::controller::cluster::Controller as ClusterController; +use crate::controller::cluster::{ClusterMetrics, Controller as ClusterController}; use crate::controller::{Context, Controller}; use crate::crd::Cluster; -use crate::metrics; +use crate::router::{healthz, metrics, sidecar_state}; use crate::sidecar_state::SidecarState; /// wait crd to establish timeout @@ -50,7 +51,6 @@ impl Operator { /// Return `Err` when run failed #[inline] pub async fn run(&self) -> Result<()> { - metrics::init(); let kube_client: Client = Client::try_default().await?; self.prepare_crd(&kube_client).await?; let (cluster_api, pod_api): (Api, Api) = match self.config.namespace { @@ -72,8 +72,6 @@ impl Operator { let _ctrl_c_c = tokio::signal::ctrl_c().await; }; - let web_server = self.web_server(status_tx); - let state_update_task = SidecarState::new( status_rx, self.config.heartbeat_period, @@ -83,12 +81,18 @@ impl Operator { ) .run_with_graceful_shutdown(graceful_shutdown_event.listen()); + let metrics = ClusterMetrics::new(); + let registry = Registry::new(); + metrics.register(®istry)?; let ctx = Arc::new(Context::new(ClusterController { kube_client, cluster_suffix: self.config.cluster_suffix.clone(), + metrics, })); let mut controller = ClusterController::run(ctx, cluster_api); + let web_server = self.web_server(status_tx, registry); + tokio::pin!(forceful_shutdown); tokio::pin!(web_server); tokio::pin!(state_update_task); @@ -190,17 +194,17 @@ impl Operator { } /// Run a server that receive sidecar operators' status - async fn web_server(&self, status_tx: Sender) -> Result<()> { + async fn web_server( + &self, + status_tx: Sender, + registry: Registry, + ) -> Result<()> { let status = Router::new() - .route( - "/status", - post(|body: Json| async move { - if let Err(e) = status_tx.send(body.0) { - error!("channel send error: {e}"); - } - }), - ) - .route("/metrics", any(metrics::metrics)); + .route("/status", post(sidecar_state)) + .route("/metrics", any(metrics)) + .route("/healthz", any(healthz)) + .layer(Extension(status_tx)) + .layer(Extension(registry)); axum::Server::bind(&self.config.listen_addr.parse()?) .serve(status.into_make_service()) diff --git a/operator-k8s/src/router.rs b/operator-k8s/src/router.rs new file mode 100644 index 00000000..41436a56 --- /dev/null +++ b/operator-k8s/src/router.rs @@ -0,0 +1,42 @@ +use axum::{Extension, Json}; +use flume::Sender; +use operator_api::HeartbeatStatus; +use prometheus::{Encoder, Registry}; +use tracing::error; + +/// metrics handler +#[allow(clippy::unused_async)] // require by axum +pub(crate) async fn metrics(Extension(registry): Extension) -> String { + let mut buf1 = Vec::new(); + let encoder = prometheus::TextEncoder::new(); + let metric_families = registry.gather(); + if let Err(err) = encoder.encode(&metric_families, &mut buf1) { + error!("failed to encode custom metrics: {}", err); + return String::new(); + } + let mut res = String::from_utf8(buf1).unwrap_or_default(); + let mut buf2 = Vec::new(); + if let Err(err) = encoder.encode(&prometheus::gather(), &mut buf2) { + error!("failed to encode prometheus metrics: {}", err); + return String::new(); + } + res.push_str(&String::from_utf8_lossy(&buf2)); + res +} + +/// healthz handler +#[allow(clippy::unused_async)] // require by axum +pub(crate) async fn healthz() -> &'static str { + "healthy" +} + +/// sidecar state handler +#[allow(clippy::unused_async)] // require by axum +pub(crate) async fn sidecar_state( + Extension(status_tx): Extension>, + Json(status): Json, +) { + if let Err(e) = status_tx.send(status) { + error!("channel send error: {e}"); + } +} diff --git a/operator-k8s/src/sidecar_state.rs b/operator-k8s/src/sidecar_state.rs index a9d11198..249843ca 100644 --- a/operator-k8s/src/sidecar_state.rs +++ b/operator-k8s/src/sidecar_state.rs @@ -109,9 +109,9 @@ impl SidecarState { } let _ignore = self.unreachable.insert(name.clone(), 0); } - // If recoverd, remove it from the cache + // If recovered, remove it from the cache } else if self.unreachable.remove(name).is_some() { - debug!("operator {name} recoverd"); + debug!("operator {name} recovered"); } else { debug!("operator {name} online"); } From b68d5d10bf972ae3f6c2c1a40092359c165284f3 Mon Sep 17 00:00:00 2001 From: iGxnon Date: Thu, 7 Sep 2023 21:40:41 +0800 Subject: [PATCH 03/11] refactor: add Metrics trait Signed-off-by: iGxnon --- operator-k8s/src/controller/cluster/mod.rs | 50 ++++++++++--------- .../src/controller/cluster/v1alpha.rs | 23 +++++---- .../src/controller/cluster/v1alpha1.rs | 23 +++++---- operator-k8s/src/controller/mod.rs | 34 ++++++++++++- operator-k8s/src/operator.rs | 2 +- 5 files changed, 86 insertions(+), 46 deletions(-) diff --git a/operator-k8s/src/controller/cluster/mod.rs b/operator-k8s/src/controller/cluster/mod.rs index bdbf367e..ef8e33c8 100644 --- a/operator-k8s/src/controller/cluster/mod.rs +++ b/operator-k8s/src/controller/cluster/mod.rs @@ -1,9 +1,11 @@ use clippy_utilities::NumericCast; -use prometheus::{Histogram, HistogramOpts, HistogramTimer, IntCounterVec, Opts, Registry}; +use prometheus::{Error, Histogram, HistogramOpts, HistogramTimer, IntCounterVec, Opts, Registry}; use std::iter::repeat; use std::ops::Mul; +use crate::controller::Metrics; + /// Controller v1alpha mod v1alpha; /// Controller v1alpha1 @@ -20,10 +22,28 @@ pub(crate) struct ClusterMetrics { reconcile_failed_count: IntCounterVec, } -/// Label error -trait LabelError { - /// Label - fn label(&self) -> &str; +impl Default for ClusterMetrics { + fn default() -> Self { + Self::new() + } +} + +impl Metrics for ClusterMetrics { + /// Register metrics + fn register(&self, registry: &Registry) -> Result<(), Error> { + registry.register(Box::new(self.reconcile_duration.clone()))?; + registry.register(Box::new(self.reconcile_failed_count.clone())) + } + + /// Record duration + fn record_duration(&self) -> HistogramTimer { + self.reconcile_duration.start_timer() + } + + /// Increment failed count + fn record_failed_count(&self, labels: &[&str]) { + self.reconcile_failed_count.with_label_values(labels).inc(); + } } impl ClusterMetrics { @@ -38,7 +58,7 @@ impl ClusterMetrics { ) .buckets(exponential_time_bucket(0.1, 2.0, 10)), ) - .expect(""), + .expect("failed to create operator_reconcile_duration_seconds histogram"), reconcile_failed_count: IntCounterVec::new( Opts::new( "operator_reconcile_failed_count", @@ -49,24 +69,6 @@ impl ClusterMetrics { .expect("failed to create operator_reconcile_failed_count counter"), } } - - /// Register metrics - pub(crate) fn register(&self, registry: &Registry) -> Result<(), prometheus::Error> { - registry.register(Box::new(self.reconcile_duration.clone()))?; - registry.register(Box::new(self.reconcile_failed_count.clone())) - } - - /// Record duration - fn record_duration(&self) -> HistogramTimer { - self.reconcile_duration.start_timer() - } - - /// Increment failed count - fn incr_failed_count(&self, reason: &impl LabelError) { - self.reconcile_failed_count - .with_label_values(&[reason.label()]) - .inc(); - } } /// Returns a vector of time buckets for the reconcile duration histogram. diff --git a/operator-k8s/src/controller/cluster/v1alpha.rs b/operator-k8s/src/controller/cluster/v1alpha.rs index dbc47445..f718ec5f 100644 --- a/operator-k8s/src/controller/cluster/v1alpha.rs +++ b/operator-k8s/src/controller/cluster/v1alpha.rs @@ -17,11 +17,11 @@ use kube::{Api, Client, Resource, ResourceExt}; use tracing::{debug, error}; use utils::consts::{DEFAULT_BACKUP_DIR, DEFAULT_DATA_DIR}; -use crate::controller::cluster::{ClusterMetrics, LabelError}; +use crate::controller::cluster::ClusterMetrics; use crate::controller::consts::{ DATA_EMPTY_DIR_NAME, DEFAULT_XLINE_PORT, FIELD_MANAGER, XLINE_POD_NAME_ENV, XLINE_PORT_NAME, }; -use crate::controller::Controller; +use crate::controller::{Controller, MetricsLabeled}; use crate::crd::v1alpha::Cluster; /// CRD `XlineCluster` controller @@ -51,13 +51,13 @@ pub(crate) enum Error { InvalidVolumeName(&'static str), } -impl LabelError for Error { - fn label(&self) -> &str { +impl MetricsLabeled for Error { + fn labels(&self) -> Vec<&str> { match *self { - Self::MissingObject(_) => "missing_object", - Self::Kube(_) => "kube", - Self::CannotMount(_) => "cannot_mount", - Self::InvalidVolumeName(_) => "invalid_volume_name", + Self::MissingObject(_) => vec!["missing_object"], + Self::Kube(_) => vec!["kube"], + Self::CannotMount(_) => vec!["cannot_mount"], + Self::InvalidVolumeName(_) => vec!["invalid_volume_name"], } } } @@ -382,9 +382,13 @@ impl ClusterController { #[async_trait] impl Controller for ClusterController { type Error = Error; + type Metrics = ClusterMetrics; + + fn metrics(&self) -> &Self::Metrics { + &self.metrics + } async fn reconcile_once(&self, cluster: &Arc) -> Result<()> { - let _timer = self.metrics.record_duration(); debug!( "Reconciling cluster: \n{}", serde_json::to_string_pretty(cluster.as_ref()).unwrap_or_default() @@ -403,7 +407,6 @@ impl Controller for ClusterController { } fn handle_error(&self, resource: &Arc, err: &Self::Error) { - self.metrics.incr_failed_count(err); error!("{:?} reconciliation error: {}", resource.metadata.name, err); } } diff --git a/operator-k8s/src/controller/cluster/v1alpha1.rs b/operator-k8s/src/controller/cluster/v1alpha1.rs index 849002ff..2286a5a6 100644 --- a/operator-k8s/src/controller/cluster/v1alpha1.rs +++ b/operator-k8s/src/controller/cluster/v1alpha1.rs @@ -18,12 +18,12 @@ use kube::{Api, Client, Resource, ResourceExt}; use tracing::{debug, error}; use utils::consts::{DEFAULT_BACKUP_DIR, DEFAULT_DATA_DIR}; -use crate::controller::cluster::{ClusterMetrics, LabelError}; +use crate::controller::cluster::ClusterMetrics; use crate::controller::consts::{ CRONJOB_IMAGE, DATA_EMPTY_DIR_NAME, DEFAULT_SIDECAR_PORT, DEFAULT_XLINE_PORT, FIELD_MANAGER, SIDECAR_PORT_NAME, XLINE_POD_NAME_ENV, XLINE_PORT_NAME, }; -use crate::controller::Controller; +use crate::controller::{Controller, MetricsLabeled}; use crate::crd::v1alpha1::{Cluster, StorageSpec}; /// CRD `XlineCluster` controller @@ -53,13 +53,13 @@ pub(crate) enum Error { InvalidVolumeName(&'static str), } -impl LabelError for Error { - fn label(&self) -> &str { +impl MetricsLabeled for Error { + fn labels(&self) -> Vec<&str> { match *self { - Self::MissingObject(_) => "missing_object", - Self::Kube(_) => "kube", - Self::CannotMount(_) => "cannot_mount", - Self::InvalidVolumeName(_) => "invalid_volume_name", + Self::MissingObject(_) => vec!["missing_object"], + Self::Kube(_) => vec!["kube"], + Self::CannotMount(_) => vec!["cannot_mount"], + Self::InvalidVolumeName(_) => vec!["invalid_volume_name"], } } } @@ -469,9 +469,13 @@ impl ClusterController { #[async_trait] impl Controller for ClusterController { type Error = Error; + type Metrics = ClusterMetrics; + + fn metrics(&self) -> &Self::Metrics { + &self.metrics + } async fn reconcile_once(&self, cluster: &Arc) -> Result<()> { - let _timer = self.metrics.record_duration(); debug!( "Reconciling cluster: \n{}", serde_json::to_string_pretty(cluster.as_ref()).unwrap_or_default() @@ -502,7 +506,6 @@ impl Controller for ClusterController { } fn handle_error(&self, resource: &Arc, err: &Self::Error) { - self.metrics.incr_failed_count(err); error!("{:?} reconciliation error: {}", resource.metadata.name, err); } } diff --git a/operator-k8s/src/controller/mod.rs b/operator-k8s/src/controller/mod.rs index 50766167..6eedcde8 100644 --- a/operator-k8s/src/controller/mod.rs +++ b/operator-k8s/src/controller/mod.rs @@ -30,6 +30,30 @@ impl Context { } } +/// Metrics labeled +pub(crate) trait MetricsLabeled { + /// Label + #[allow(clippy::indexing_slicing)] // labels should always have at least one element + fn label(&self) -> &str { + self.labels()[0] + } + + /// Labels + fn labels(&self) -> Vec<&str>; +} + +/// The common metrics shared by all controllers +pub(crate) trait Metrics: Default { + /// Register metrics + fn register(&self, registry: &prometheus::Registry) -> Result<(), prometheus::Error>; + + /// Record duration + fn record_duration(&self) -> prometheus::HistogramTimer; + + /// Record failed count + fn record_failed_count(&self, labels: &[&str]); +} + /// The controller #[async_trait] pub(crate) trait Controller: Sized + Send + Sync + 'static @@ -38,7 +62,13 @@ where R::DynamicType: Hash + Eq + Clone + Default + Unpin + Debug, { /// The error generated by this controller - type Error: Error + Send + Sync + 'static; + type Error: MetricsLabeled + Error + Send + Sync + 'static; + + /// The metrics used by this controller + type Metrics: Metrics; + + /// Get the metrics + fn metrics(&self) -> &Self::Metrics; /// Use &self to execute a reconcile async fn reconcile_once(&self, resource: &Arc) -> Result<(), Self::Error>; @@ -49,6 +79,7 @@ where /// The reconcile function used in kube::runtime::Controller async fn reconcile(resource: Arc, ctx: Arc>) -> Result { let controller = &ctx.controller; + let _timer = controller.metrics().record_duration(); controller.reconcile_once(&resource).await?; Ok(Action::requeue(DEFAULT_REQUEUE_DURATION)) } @@ -56,6 +87,7 @@ where /// The on_error function used in kube::runtime::Controller fn on_error(resource: Arc, err: &Self::Error, ctx: Arc>) -> Action { let controller = &ctx.controller; + controller.metrics().record_failed_count(&err.labels()); controller.handle_error(&resource, err); Action::requeue(DEFAULT_REQUEUE_DURATION) } diff --git a/operator-k8s/src/operator.rs b/operator-k8s/src/operator.rs index 51f01e71..2b09456d 100644 --- a/operator-k8s/src/operator.rs +++ b/operator-k8s/src/operator.rs @@ -21,7 +21,7 @@ use utils::migration::ApiVersion; use crate::config::{Config, Namespace}; use crate::controller::cluster::{ClusterMetrics, Controller as ClusterController}; -use crate::controller::{Context, Controller}; +use crate::controller::{Context, Controller, Metrics}; use crate::crd::Cluster; use crate::router::{healthz, metrics, sidecar_state}; use crate::sidecar_state::SidecarState; From 077227ef3cd9c8e7411ba3d83afd0f271aa5a557 Mon Sep 17 00:00:00 2001 From: iGxnon Date: Fri, 18 Aug 2023 17:37:45 +0800 Subject: [PATCH 04/11] refactor: refactor controller logic Signed-off-by: iGxnon --- operator-k8s/src/{controller => }/consts.rs | 11 +- .../src/controller/cluster/v1alpha.rs | 4 +- .../src/controller/cluster/v1alpha1.rs | 4 +- operator-k8s/src/controller/mod.rs | 4 +- operator-k8s/src/lib.rs | 6 +- operator-k8s/src/manager/cluster.rs | 353 ++++++++++++++++++ operator-k8s/src/manager/mod.rs | 2 + 7 files changed, 374 insertions(+), 10 deletions(-) rename operator-k8s/src/{controller => }/consts.rs (63%) create mode 100644 operator-k8s/src/manager/cluster.rs create mode 100644 operator-k8s/src/manager/mod.rs diff --git a/operator-k8s/src/controller/consts.rs b/operator-k8s/src/consts.rs similarity index 63% rename from operator-k8s/src/controller/consts.rs rename to operator-k8s/src/consts.rs index fb7be637..e1b3a96f 100644 --- a/operator-k8s/src/controller/consts.rs +++ b/operator-k8s/src/consts.rs @@ -1,9 +1,9 @@ use std::time::Duration; /// The default requeue duration to achieve eventual consistency -pub(super) const DEFAULT_REQUEUE_DURATION: Duration = Duration::from_secs(600); +pub(crate) const DEFAULT_REQUEUE_DURATION: Duration = Duration::from_secs(600); /// The field manager identifier of xline operator -pub(super) const FIELD_MANAGER: &str = "xlineoperator.datenlord.io"; +pub(crate) const FIELD_MANAGER: &str = "xlineoperator.datenlord.io"; /// The emptyDir volume name of each pod if there is no data pvc specified pub(crate) const DATA_EMPTY_DIR_NAME: &str = "xline-data-empty-dir"; /// The image used for cronjob to trigger backup @@ -21,3 +21,10 @@ pub(crate) const DEFAULT_XLINE_PORT: i32 = 2379; pub(crate) const DEFAULT_SIDECAR_PORT: i32 = 2380; /// The environment name of the xline pod name pub(crate) const XLINE_POD_NAME_ENV: &str = "XLINE_POD_NAME"; +/// The annotation used to inherit labels in `XlineCluster` +pub(crate) const ANNOTATION_INHERIT_LABELS_PREFIX: &str = + "xlineoperator.datenlord.io/inherit-label-prefix"; +/// The label attach to subresources, indicate the xlinecluster name +pub(crate) const LABEL_CLUSTER_NAME: &str = "xlinecluster/name"; +/// The label attach to subresources, indicate the component type of this subresource +pub(crate) const LABEL_CLUSTER_COMPONENT: &str = "xlinecluster/component"; diff --git a/operator-k8s/src/controller/cluster/v1alpha.rs b/operator-k8s/src/controller/cluster/v1alpha.rs index f718ec5f..49e76300 100644 --- a/operator-k8s/src/controller/cluster/v1alpha.rs +++ b/operator-k8s/src/controller/cluster/v1alpha.rs @@ -17,10 +17,10 @@ use kube::{Api, Client, Resource, ResourceExt}; use tracing::{debug, error}; use utils::consts::{DEFAULT_BACKUP_DIR, DEFAULT_DATA_DIR}; -use crate::controller::cluster::ClusterMetrics; -use crate::controller::consts::{ +use crate::consts::{ DATA_EMPTY_DIR_NAME, DEFAULT_XLINE_PORT, FIELD_MANAGER, XLINE_POD_NAME_ENV, XLINE_PORT_NAME, }; +use crate::controller::cluster::ClusterMetrics; use crate::controller::{Controller, MetricsLabeled}; use crate::crd::v1alpha::Cluster; diff --git a/operator-k8s/src/controller/cluster/v1alpha1.rs b/operator-k8s/src/controller/cluster/v1alpha1.rs index 2286a5a6..7458937e 100644 --- a/operator-k8s/src/controller/cluster/v1alpha1.rs +++ b/operator-k8s/src/controller/cluster/v1alpha1.rs @@ -18,11 +18,11 @@ use kube::{Api, Client, Resource, ResourceExt}; use tracing::{debug, error}; use utils::consts::{DEFAULT_BACKUP_DIR, DEFAULT_DATA_DIR}; -use crate::controller::cluster::ClusterMetrics; -use crate::controller::consts::{ +use crate::consts::{ CRONJOB_IMAGE, DATA_EMPTY_DIR_NAME, DEFAULT_SIDECAR_PORT, DEFAULT_XLINE_PORT, FIELD_MANAGER, SIDECAR_PORT_NAME, XLINE_POD_NAME_ENV, XLINE_PORT_NAME, }; +use crate::controller::cluster::ClusterMetrics; use crate::controller::{Controller, MetricsLabeled}; use crate::crd::v1alpha1::{Cluster, StorageSpec}; diff --git a/operator-k8s/src/controller/mod.rs b/operator-k8s/src/controller/mod.rs index 6eedcde8..8d8b4d03 100644 --- a/operator-k8s/src/controller/mod.rs +++ b/operator-k8s/src/controller/mod.rs @@ -10,12 +10,10 @@ use kube::runtime::watcher::Config as WatcherConfig; use kube::{Api, Resource}; use serde::de::DeserializeOwned; -use crate::controller::consts::DEFAULT_REQUEUE_DURATION; +use crate::consts::DEFAULT_REQUEUE_DURATION; /// Cluster controller pub(crate) mod cluster; -/// CRD constants -mod consts; /// The common context pub(crate) struct Context { diff --git a/operator-k8s/src/lib.rs b/operator-k8s/src/lib.rs index a91212ee..60c93131 100644 --- a/operator-k8s/src/lib.rs +++ b/operator-k8s/src/lib.rs @@ -151,10 +151,14 @@ /// Xline operator config pub mod config; -/// Xline operator controller +/// Some constants +mod consts; +/// Custom resource controller mod controller; /// Custom resource definition mod crd; +/// Custom resource manager +mod manager; /// Xline operator pub mod operator; /// Xline operator web server router diff --git a/operator-k8s/src/manager/cluster.rs b/operator-k8s/src/manager/cluster.rs new file mode 100644 index 00000000..a716aa9f --- /dev/null +++ b/operator-k8s/src/manager/cluster.rs @@ -0,0 +1,353 @@ +#![allow(unused)] // remove when implemented + +use crate::consts::{ + ANNOTATION_INHERIT_LABELS_PREFIX, DEFAULT_SIDECAR_PORT, DEFAULT_XLINE_PORT, + LABEL_CLUSTER_COMPONENT, LABEL_CLUSTER_NAME, SIDECAR_PORT_NAME, XLINE_PORT_NAME, +}; +use crate::crd::v1alpha1::{Cluster, StorageSpec}; + +use std::collections::BTreeMap; +use std::sync::Arc; + +use k8s_openapi::api::core::v1::{ + Container, ContainerPort, GRPCAction, PersistentVolumeClaim, PersistentVolumeClaimVolumeSource, + Pod, PodSpec, PodTemplateSpec, Probe, Service, ServicePort, ServiceSpec, Volume, VolumeMount, +}; +use k8s_openapi::apimachinery::pkg::apis::meta::v1::{ObjectMeta, OwnerReference}; +use kube::{Resource, ResourceExt}; +use utils::consts::{DEFAULT_BACKUP_DIR, DEFAULT_DATA_DIR}; + +/// Read objects from `XlineCluster` +pub(crate) struct Extractor<'a> { + /// `XlineCluster` + cluster: &'a Cluster, +} + +/// The component of `XlineCluster` +#[derive(Copy, Clone)] +pub(crate) enum Component { + /// A xline node + Node, + /// A service + Service, + /// A backup job + BackupJob, +} + +impl Component { + /// Get the component name + fn label(&self) -> &str { + match *self { + Component::Node => "node", + Component::Service => "srv", + Component::BackupJob => "job", + } + } +} + +impl<'a> Extractor<'a> { + /// Constructor + pub(crate) fn new(cluster: &'a Cluster) -> Self { + Self { cluster } + } + + /// Extract the exposed ports in `XlineCluster` + /// Return the xline port, sidecar port, and a list of service ports + /// gathered from all exposed ports, which will be used to build a `Service`. + /// If the `XlineCluster` does not specified the xline ports (a port with name 'xline') or + /// the sidecar ports (a port with name 'sidecar'), the default port (xline: 2379, sidecar: 2380) + /// will be used. + fn extract_ports(&self) -> (ContainerPort, ContainerPort, Vec) { + // expose all the container's ports + let mut xline_port = None; + let mut sidecar_port = None; + let container_ports = self + .cluster + .spec + .container + .ports + .clone() + .unwrap_or_default(); + let mut service_ports: Vec<_> = container_ports + .into_iter() + .map(|port| { + // the port with name `xline` is considered to be the port of xline + if matches!(port.name.as_deref(), Some(XLINE_PORT_NAME)) { + xline_port = Some(port.clone()); + } + // the port with name `sidecar` is considered to be the port of xline + if matches!(port.name.as_deref(), Some(SIDECAR_PORT_NAME)) { + sidecar_port = Some(port.clone()); + } + ServicePort { + name: port.name.clone(), + port: port.container_port, + ..ServicePort::default() + } + }) + .collect(); + if xline_port.is_none() { + // add default xline port 2379 to service port if xline port is not specified + service_ports.push(ServicePort { + name: Some(XLINE_PORT_NAME.to_owned()), + port: DEFAULT_XLINE_PORT, + ..ServicePort::default() + }); + } + if sidecar_port.is_none() { + // add default sidecar port 2380 to service port if sidecar port is not specified + service_ports.push(ServicePort { + name: Some(SIDECAR_PORT_NAME.to_owned()), + port: DEFAULT_SIDECAR_PORT, + ..ServicePort::default() + }); + } + // if it is not specified, 2379 is used as xline port + let xline_port = xline_port.unwrap_or(ContainerPort { + name: Some(XLINE_PORT_NAME.to_owned()), + container_port: DEFAULT_XLINE_PORT, + ..ContainerPort::default() + }); + // if it is not specified, 2380 is used as sidecar port + let sidecar_port = sidecar_port.unwrap_or(ContainerPort { + name: Some(SIDECAR_PORT_NAME.to_owned()), + container_port: DEFAULT_SIDECAR_PORT, + ..ContainerPort::default() + }); + (xline_port, sidecar_port, service_ports) + } + + /// Extract all PVC templates + /// The PVC template is used to create PVC for every pod + fn extract_pvc_template(&self) -> Vec { + self.cluster + .spec + .backup + .iter() + .filter_map(|spec| { + if let StorageSpec::Pvc { pvc } = spec.storage.clone() { + Some(pvc) + } else { + None + } + }) + .chain(self.cluster.spec.data.iter().cloned()) + .chain(self.cluster.spec.pvcs.iter().flatten().cloned()) + .collect() + } + + /// Extract volume mount for backup and data pvc + /// Other pvc should be mounted by user + fn extract_additional_volume_mount(&self) -> Vec { + self.cluster + .spec + .backup + .iter() + .filter_map(|spec| { + if let StorageSpec::Pvc { pvc } = spec.storage.clone() { + Some(pvc) + } else { + None + } + }) + .map(|pvc| VolumeMount { + name: pvc.name_any(), // because the volume name is the same as pvc template name, we can use it in volume mount + mount_path: DEFAULT_BACKUP_DIR.to_owned(), + ..VolumeMount::default() + }) + .chain( + self.cluster + .spec + .data + .iter() + .cloned() + .map(|pvc| VolumeMount { + name: pvc.name_any(), + mount_path: DEFAULT_DATA_DIR.to_owned(), + ..VolumeMount::default() + }), + ) + .collect() + } + + /// Extract owner reference + fn extract_owner_ref(&self) -> OwnerReference { + // unwrap controller_owner_ref is always safe + let Some(owner_ref) = self.cluster.controller_owner_ref(&()) else { unreachable!("kube-runtime has undergone some changes.") }; + owner_ref + } + + /// Extract name, namespace + #[allow(clippy::expect_used)] // it is ok because xlinecluster has field validation + fn extract_id(&self) -> (&str, &str) { + let namespace = self + .cluster + .metadata + .namespace + .as_deref() + .expect("xlinecluster resource should have a namespace"); + let name = self + .cluster + .metadata + .name + .as_deref() + .expect("xlinecluster resource should have a name"); + (namespace, name) + } + + /// Extract inherit labels + fn extract_inherit_labels(&self) -> BTreeMap { + let Some(prefix) = self + .cluster + .metadata + .annotations + .as_ref() + .and_then(|annotations| annotations.get(ANNOTATION_INHERIT_LABELS_PREFIX)) else { return BTreeMap::new() }; + let prefix: Vec<_> = prefix + .split(',') + .map(str::trim) + .filter(|p| !p.is_empty()) + .collect(); + let Some(labels) = self.cluster.metadata.labels.as_ref() else { return BTreeMap::new() }; + labels + .iter() + .filter_map(|(l, v)| { + prefix + .iter() + .find(|p| l.starts_with(*p)) + .and(Some((l.clone(), v.clone()))) + }) + .collect() + } +} + +/// Factory generate the objects in k8s +pub(crate) struct Factory { + /// The kubernetes cluster dns suffix + cluster_suffix: String, + /// `XlineCluster` + cluster: Arc, +} + +impl Factory { + /// Constructor + pub(crate) fn new(cluster: Arc, cluster_suffix: &str) -> Self { + Self { + cluster_suffix: cluster_suffix.to_owned(), + cluster, + } + } + + /// Get the full component name + fn component_name(cluster_name: &str, component: Component) -> String { + format!("{cluster_name}-{}", component.label()) + } + + /// Get the general metadata + fn general_metadata(&self, component: Component) -> ObjectMeta { + let extractor = Extractor::new(self.cluster.as_ref()); + let mut labels = extractor.extract_inherit_labels(); + let (name, namespace) = extractor.extract_id(); + let owner_ref = extractor.extract_owner_ref(); + let _ig = labels.insert(LABEL_CLUSTER_NAME.to_owned(), name.to_owned()); + let __ig = labels.insert( + LABEL_CLUSTER_COMPONENT.to_owned(), + component.label().to_owned(), + ); + ObjectMeta { + labels: Some(labels), // it is used in selector + name: Some(Self::component_name(name, component)), // all subresources share the same name + namespace: Some(namespace.to_owned()), // all subresources share the same namespace + owner_references: Some(vec![owner_ref]), // allow k8s GC to automatically clean up itself + ..ObjectMeta::default() + } + } + + /// Get the node headless service + fn node_service(&self) -> Service { + let extractor = Extractor::new(self.cluster.as_ref()); + let (_, _, service_ports) = extractor.extract_ports(); + let (name, _) = extractor.extract_id(); + Service { + metadata: self.general_metadata(Component::Service), + spec: Some(ServiceSpec { + cluster_ip: None, + ports: Some(service_ports), + selector: Some( + [ + (LABEL_CLUSTER_NAME.to_owned(), name.to_owned()), + ( + LABEL_CLUSTER_COMPONENT.to_owned(), + Component::Node.label().to_owned(), + ), + ] + .into(), + ), + ..ServiceSpec::default() + }), + ..Service::default() + } + } + + /// Mount the additional volumes on the container + #[allow(clippy::unused_self)] + fn mount_volume_on_container(&self, container: &mut Container) { + let extractor = Extractor::new(self.cluster.as_ref()); + let volume_mount = extractor.extract_additional_volume_mount(); + container + .volume_mounts + .get_or_insert(vec![]) + .extend(volume_mount); + } + + /// Get the xline container + fn xline_container(&self) -> Container { + let mut container = self.cluster.spec.container.clone(); + self.mount_volume_on_container(&mut container); + container + } + + /// Get the node pod + fn node_pod(&self, index: usize) -> PodTemplateSpec { + let extractor = Extractor::new(self.cluster.as_ref()); + let (name, _) = extractor.extract_id(); + let node_name = format!("{}-{index}", Self::component_name(name, Component::Node)); + let xline = self.xline_container(); + let volumes = extractor + .extract_pvc_template() + .into_iter() + .map(|pvc_template| Volume { + name: pvc_template.name_any(), // the volume name is the same as pvc template name + persistent_volume_claim: Some(PersistentVolumeClaimVolumeSource { + claim_name: format!("{}-{}", pvc_template.name_any(), node_name), // the pvc detail name is template name + node name + ..PersistentVolumeClaimVolumeSource::default() + }), + ..Volume::default() + }) + .collect(); + let mut meta = self.general_metadata(Component::Node); + meta.name = Some(node_name); + PodTemplateSpec { + metadata: Some(meta), + spec: Some(PodSpec { + init_containers: Some(vec![]), + containers: vec![xline], + affinity: self.cluster.spec.affinity.clone(), + volumes: Some(volumes), + ..PodSpec::default() + }), + } + } + + /// Get the pvc for a node pod + fn pvc(&self, index: usize) -> Vec { + let extractor = Extractor::new(self.cluster.as_ref()); + let mut pvcs = extractor.extract_pvc_template(); + let (name, _) = extractor.extract_id(); + let node_name = format!("{}-{index}", Self::component_name(name, Component::Node)); + for pvc in &mut pvcs { + pvc.metadata.name = Some(format!("{}-{}", pvc.name_any(), node_name)); + } + pvcs + } +} diff --git a/operator-k8s/src/manager/mod.rs b/operator-k8s/src/manager/mod.rs new file mode 100644 index 00000000..d273d9a2 --- /dev/null +++ b/operator-k8s/src/manager/mod.rs @@ -0,0 +1,2 @@ +/// `XlineCluster` manager +mod cluster; From 8263b917cc44f893ed25c4bef65c9f00709b09b4 Mon Sep 17 00:00:00 2001 From: iGxnon Date: Fri, 18 Aug 2023 22:43:12 +0800 Subject: [PATCH 05/11] refactor: set command to container Signed-off-by: iGxnon --- operator-k8s/src/manager/cluster.rs | 51 ++++++++++++++++++++++++++--- 1 file changed, 46 insertions(+), 5 deletions(-) diff --git a/operator-k8s/src/manager/cluster.rs b/operator-k8s/src/manager/cluster.rs index a716aa9f..5947703b 100644 --- a/operator-k8s/src/manager/cluster.rs +++ b/operator-k8s/src/manager/cluster.rs @@ -2,7 +2,8 @@ use crate::consts::{ ANNOTATION_INHERIT_LABELS_PREFIX, DEFAULT_SIDECAR_PORT, DEFAULT_XLINE_PORT, - LABEL_CLUSTER_COMPONENT, LABEL_CLUSTER_NAME, SIDECAR_PORT_NAME, XLINE_PORT_NAME, + LABEL_CLUSTER_COMPONENT, LABEL_CLUSTER_NAME, SIDECAR_PORT_NAME, XLINE_POD_NAME_ENV, + XLINE_PORT_NAME, }; use crate::crd::v1alpha1::{Cluster, StorageSpec}; @@ -10,8 +11,9 @@ use std::collections::BTreeMap; use std::sync::Arc; use k8s_openapi::api::core::v1::{ - Container, ContainerPort, GRPCAction, PersistentVolumeClaim, PersistentVolumeClaimVolumeSource, - Pod, PodSpec, PodTemplateSpec, Probe, Service, ServicePort, ServiceSpec, Volume, VolumeMount, + Container, ContainerPort, EnvVar, EnvVarSource, GRPCAction, ObjectFieldSelector, + PersistentVolumeClaim, PersistentVolumeClaimVolumeSource, Pod, PodSpec, PodTemplateSpec, Probe, + Service, ServicePort, ServiceSpec, Volume, VolumeMount, }; use k8s_openapi::apimachinery::pkg::apis::meta::v1::{ObjectMeta, OwnerReference}; use kube::{Resource, ResourceExt}; @@ -300,10 +302,49 @@ impl Factory { .extend(volume_mount); } + /// Set the entrypoint of the container + fn set_command(&self, container: &mut Container, index: usize) { + let extractor = Extractor::new(self.cluster.as_ref()); + let (name, namespace) = extractor.extract_id(); + let (xline_port, _, _) = extractor.extract_ports(); + let srv_name = Self::component_name(name, Component::Service); + let mut members = vec![]; + // the node before this index has already been added to the members + // we use the members from 0 to index to build the initial cluster config for this node + // and then do membership change to update the cluster config + for i in 0..=index { + let node_name = format!("{}-{i}", Self::component_name(name, Component::Node)); + members.push(format!( + "{node_name}={node_name}.{srv_name}.{namespace}.svc.{}:{}", + self.cluster_suffix, xline_port.container_port + )); + } + // TODO add additional arguments config to CRD and append to the command + let xline_cmd = format!("xline --name $({XLINE_POD_NAME_ENV}) --storage-engine rocksdb --data-dir {DEFAULT_DATA_DIR} --members {}", members.join(",")) + .split_whitespace() + .map(ToOwned::to_owned) + .collect::>(); + // TODO we need a sidecar systemd process to take care of xline + container.command = Some(xline_cmd); + } + /// Get the xline container - fn xline_container(&self) -> Container { + fn xline_container(&self, index: usize) -> Container { let mut container = self.cluster.spec.container.clone(); self.mount_volume_on_container(&mut container); + self.set_command(&mut container, index); + // we need to set the env variable to get the pod name in the container + container.env = Some(vec![EnvVar { + name: XLINE_POD_NAME_ENV.to_owned(), + value_from: Some(EnvVarSource { + field_ref: Some(ObjectFieldSelector { + field_path: "metadata.name".to_owned(), + ..ObjectFieldSelector::default() + }), + ..EnvVarSource::default() + }), + ..EnvVar::default() + }]); container } @@ -312,7 +353,7 @@ impl Factory { let extractor = Extractor::new(self.cluster.as_ref()); let (name, _) = extractor.extract_id(); let node_name = format!("{}-{index}", Self::component_name(name, Component::Node)); - let xline = self.xline_container(); + let xline = self.xline_container(index); let volumes = extractor .extract_pvc_template() .into_iter() From b54cd47908cfc803c8ed38bcf270b906818cd3d1 Mon Sep 17 00:00:00 2001 From: iGxnon Date: Wed, 6 Sep 2023 16:33:16 +0800 Subject: [PATCH 06/11] test: add unit test for extractor Signed-off-by: iGxnon --- Cargo.lock | 5 +- operator-k8s/Cargo.toml | 1 + operator-k8s/src/consts.rs | 2 + operator-k8s/src/crd/v1alpha1/cluster.rs | 9 + operator-k8s/src/crd/v1alpha1/mod.rs | 3 +- operator-k8s/src/manager/cluster.rs | 339 ++++++++++++++++++----- 6 files changed, 289 insertions(+), 70 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 11bb8e85..87dba6c1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2812,9 +2812,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.22" +version = "0.9.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "452e67b9c20c37fa79df53201dc03839651086ed9bbe92b3ca585ca9fdaa7d85" +checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574" dependencies = [ "indexmap 2.0.0", "itoa", @@ -3907,6 +3907,7 @@ dependencies = [ "schemars", "serde", "serde_json", + "serde_yaml", "thiserror", "tokio", "tracing", diff --git a/operator-k8s/Cargo.toml b/operator-k8s/Cargo.toml index af798cd0..01a4e18e 100644 --- a/operator-k8s/Cargo.toml +++ b/operator-k8s/Cargo.toml @@ -41,3 +41,4 @@ utils = { path = "../utils" } [dev-dependencies] garde = { version = "0.11.2", default-features = false, features = ["derive", "pattern"] } +serde_yaml = "0.9.25" diff --git a/operator-k8s/src/consts.rs b/operator-k8s/src/consts.rs index e1b3a96f..1403144f 100644 --- a/operator-k8s/src/consts.rs +++ b/operator-k8s/src/consts.rs @@ -28,3 +28,5 @@ pub(crate) const ANNOTATION_INHERIT_LABELS_PREFIX: &str = pub(crate) const LABEL_CLUSTER_NAME: &str = "xlinecluster/name"; /// The label attach to subresources, indicate the component type of this subresource pub(crate) const LABEL_CLUSTER_COMPONENT: &str = "xlinecluster/component"; +/// Indicate the version of operator that creates this subresource +pub(crate) const LABEL_OPERATOR_VERSION: &str = "xlinecluster/operator-version"; diff --git a/operator-k8s/src/crd/v1alpha1/cluster.rs b/operator-k8s/src/crd/v1alpha1/cluster.rs index 3bac92ee..ed840f2f 100644 --- a/operator-k8s/src/crd/v1alpha1/cluster.rs +++ b/operator-k8s/src/crd/v1alpha1/cluster.rs @@ -89,6 +89,15 @@ pub(crate) enum StorageSpec { }, } +impl StorageSpec { + pub(crate) fn as_pvc(&self) -> Option<&PersistentVolumeClaim> { + match *self { + Self::Pvc { ref pvc } => Some(pvc), + Self::S3 { .. } => None, + } + } +} + /// Xline cluster backup S3 specification #[derive(Serialize, Deserialize, Clone, Debug, JsonSchema)] #[cfg_attr(test, derive(Validate))] diff --git a/operator-k8s/src/crd/v1alpha1/mod.rs b/operator-k8s/src/crd/v1alpha1/mod.rs index fc397c42..c2343934 100644 --- a/operator-k8s/src/crd/v1alpha1/mod.rs +++ b/operator-k8s/src/crd/v1alpha1/mod.rs @@ -1,6 +1,5 @@ #![allow(unused)] // TODO: remove when this CRD is used -pub(crate) use cluster::Cluster; -pub(crate) use cluster::StorageSpec; +pub(crate) use cluster::{BackupSpec, Cluster, ClusterSpec, StorageSpec}; mod cluster; diff --git a/operator-k8s/src/manager/cluster.rs b/operator-k8s/src/manager/cluster.rs index 5947703b..5d38ac7b 100644 --- a/operator-k8s/src/manager/cluster.rs +++ b/operator-k8s/src/manager/cluster.rs @@ -2,10 +2,10 @@ use crate::consts::{ ANNOTATION_INHERIT_LABELS_PREFIX, DEFAULT_SIDECAR_PORT, DEFAULT_XLINE_PORT, - LABEL_CLUSTER_COMPONENT, LABEL_CLUSTER_NAME, SIDECAR_PORT_NAME, XLINE_POD_NAME_ENV, - XLINE_PORT_NAME, + LABEL_CLUSTER_COMPONENT, LABEL_CLUSTER_NAME, LABEL_OPERATOR_VERSION, SIDECAR_PORT_NAME, + XLINE_POD_NAME_ENV, XLINE_PORT_NAME, }; -use crate::crd::v1alpha1::{Cluster, StorageSpec}; +use crate::crd::v1alpha1::{BackupSpec, Cluster, ClusterSpec, StorageSpec}; use std::collections::BTreeMap; use std::sync::Arc; @@ -126,13 +126,7 @@ impl<'a> Extractor<'a> { .spec .backup .iter() - .filter_map(|spec| { - if let StorageSpec::Pvc { pvc } = spec.storage.clone() { - Some(pvc) - } else { - None - } - }) + .filter_map(|spec| spec.storage.as_pvc().cloned()) .chain(self.cluster.spec.data.iter().cloned()) .chain(self.cluster.spec.pvcs.iter().flatten().cloned()) .collect() @@ -145,13 +139,7 @@ impl<'a> Extractor<'a> { .spec .backup .iter() - .filter_map(|spec| { - if let StorageSpec::Pvc { pvc } = spec.storage.clone() { - Some(pvc) - } else { - None - } - }) + .filter_map(|spec| spec.storage.as_pvc().cloned()) .map(|pvc| VolumeMount { name: pvc.name_any(), // because the volume name is the same as pvc template name, we can use it in volume mount mount_path: DEFAULT_BACKUP_DIR.to_owned(), @@ -173,28 +161,30 @@ impl<'a> Extractor<'a> { } /// Extract owner reference + #[allow(clippy::expect_used)] // it is ok because xlinecluster always populated from the apiserver fn extract_owner_ref(&self) -> OwnerReference { // unwrap controller_owner_ref is always safe - let Some(owner_ref) = self.cluster.controller_owner_ref(&()) else { unreachable!("kube-runtime has undergone some changes.") }; - owner_ref + self.cluster + .controller_owner_ref(&()) + .expect("metadata doesn't have name or uid") } /// Extract name, namespace #[allow(clippy::expect_used)] // it is ok because xlinecluster has field validation fn extract_id(&self) -> (&str, &str) { - let namespace = self - .cluster - .metadata - .namespace - .as_deref() - .expect("xlinecluster resource should have a namespace"); let name = self .cluster .metadata .name .as_deref() .expect("xlinecluster resource should have a name"); - (namespace, name) + let namespace = self + .cluster + .metadata + .namespace + .as_deref() + .expect("xlinecluster resource should have a namespace"); + (name, namespace) } /// Extract inherit labels @@ -251,16 +241,20 @@ impl Factory { let mut labels = extractor.extract_inherit_labels(); let (name, namespace) = extractor.extract_id(); let owner_ref = extractor.extract_owner_ref(); - let _ig = labels.insert(LABEL_CLUSTER_NAME.to_owned(), name.to_owned()); - let __ig = labels.insert( + _ = labels.insert(LABEL_CLUSTER_NAME.to_owned(), name.to_owned()); + _ = labels.insert( LABEL_CLUSTER_COMPONENT.to_owned(), component.label().to_owned(), ); + _ = labels.insert( + LABEL_OPERATOR_VERSION.to_owned(), + env!("CARGO_PKG_VERSION").to_owned(), + ); ObjectMeta { - labels: Some(labels), // it is used in selector - name: Some(Self::component_name(name, component)), // all subresources share the same name + labels: Some(labels), // it is used in selector + name: Some(Self::component_name(name, component)), namespace: Some(namespace.to_owned()), // all subresources share the same namespace - owner_references: Some(vec![owner_ref]), // allow k8s GC to automatically clean up itself + owner_references: Some(vec![owner_ref]), // allow k8s GC to automatically clean up itself when `XlineCluster` is deleted ..ObjectMeta::default() } } @@ -303,16 +297,13 @@ impl Factory { } /// Set the entrypoint of the container - fn set_command(&self, container: &mut Container, index: usize) { + fn set_command(&self, container: &mut Container, size: usize) { let extractor = Extractor::new(self.cluster.as_ref()); let (name, namespace) = extractor.extract_id(); let (xline_port, _, _) = extractor.extract_ports(); let srv_name = Self::component_name(name, Component::Service); let mut members = vec![]; - // the node before this index has already been added to the members - // we use the members from 0 to index to build the initial cluster config for this node - // and then do membership change to update the cluster config - for i in 0..=index { + for i in 0..=size { let node_name = format!("{}-{i}", Self::component_name(name, Component::Node)); members.push(format!( "{node_name}={node_name}.{srv_name}.{namespace}.svc.{}:{}", @@ -329,10 +320,10 @@ impl Factory { } /// Get the xline container - fn xline_container(&self, index: usize) -> Container { + fn xline_container(&self, size: usize) -> Container { let mut container = self.cluster.spec.container.clone(); self.mount_volume_on_container(&mut container); - self.set_command(&mut container, index); + self.set_command(&mut container, size); // we need to set the env variable to get the pod name in the container container.env = Some(vec![EnvVar { name: XLINE_POD_NAME_ENV.to_owned(), @@ -349,46 +340,262 @@ impl Factory { } /// Get the node pod - fn node_pod(&self, index: usize) -> PodTemplateSpec { + fn pod_spec(&self, size: usize) -> PodTemplateSpec { let extractor = Extractor::new(self.cluster.as_ref()); let (name, _) = extractor.extract_id(); - let node_name = format!("{}-{index}", Self::component_name(name, Component::Node)); - let xline = self.xline_container(index); - let volumes = extractor - .extract_pvc_template() - .into_iter() - .map(|pvc_template| Volume { - name: pvc_template.name_any(), // the volume name is the same as pvc template name - persistent_volume_claim: Some(PersistentVolumeClaimVolumeSource { - claim_name: format!("{}-{}", pvc_template.name_any(), node_name), // the pvc detail name is template name + node name - ..PersistentVolumeClaimVolumeSource::default() - }), - ..Volume::default() - }) - .collect(); - let mut meta = self.general_metadata(Component::Node); - meta.name = Some(node_name); + let xline = self.xline_container(size); PodTemplateSpec { - metadata: Some(meta), + metadata: Some(self.general_metadata(Component::Node)), spec: Some(PodSpec { init_containers: Some(vec![]), containers: vec![xline], affinity: self.cluster.spec.affinity.clone(), - volumes: Some(volumes), ..PodSpec::default() }), } } +} - /// Get the pvc for a node pod - fn pvc(&self, index: usize) -> Vec { - let extractor = Extractor::new(self.cluster.as_ref()); - let mut pvcs = extractor.extract_pvc_template(); - let (name, _) = extractor.extract_id(); - let node_name = format!("{}-{index}", Self::component_name(name, Component::Node)); - for pvc in &mut pvcs { - pvc.metadata.name = Some(format!("{}-{}", pvc.name_any(), node_name)); +#[cfg(test)] +mod tests { + use super::*; + use k8s_openapi::api::core::v1::{Affinity, NodeAffinity, PersistentVolumeClaimSpec}; + use kube::CustomResourceExt; + + static CLUSTER_1: &str = r#" +apiVersion: xlineoperator.xline.cloud/v1alpha +kind: XlineCluster +metadata: + name: my-xline-cluster + labels: + app: my-xline-cluster + appNamespace: default + annotations: + xlineoperator.datenlord.io/inherit-label-prefix: "app" +spec: + size: 3 + container: + image: "datenlord/xline" + name: "my-xline" + ports: + - containerPort: 2379 + name: xline + "#; + + static CLUSTER_2: &str = r#" +apiVersion: xlineoperator.xline.cloud/v1alpha +kind: XlineCluster +metadata: + name: my-xline-cluster +spec: + size: 5 + container: + image: "datenlord/xline" + name: "my-xline" + ports: + - containerPort: 3000 + name: xline + - containerPort: 3001 + name: sidecar + data: + metadata: + name: my-xline-cluster-data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "my-storage-class" + resources: + requests: + storage: 1Gi + "#; + + static CLUSTER_3: &str = r#" +apiVersion: xlineoperator.datenlord.io/v1alpha +kind: XlineCluster +metadata: + name: my-xline-cluster +spec: + size: 3 + backup: + cron: "*/15 * * * *" + pvc: + metadata: + name: backup-pvc + spec: + storageClassName: xline-backup + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 1Gi + container: + image: "datenlord/xline" + name: "my-xline" + ports: + - containerPort: 2379 + name: xline + "#; + + static CLUSTER_4: &str = r#" +apiVersion: xlineoperator.datenlord.io/v1alpha +kind: XlineCluster +metadata: + name: my-xline-cluster +spec: + size: 3 + container: + image: "datenlord/xline" + name: "my-xline" + ports: + - containerPort: 2379 + name: xline + pvcs: + - metadata: + name: xline-pvc + spec: + storageClassName: xline-backup + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 1Gi + "#; + + fn after_apiserver(cluster: &mut Cluster) { + cluster.metadata.namespace = Some("default".to_owned()); // use default namespace if no namespace specified in the yaml + cluster.metadata.uid = Some("this-is-a-random-uid".to_owned()); + } + + #[test] + fn extract_ports_should_work() { + for (cluster_raw, xline, sidecar) in [ + (CLUSTER_1, 2379, 2380), + (CLUSTER_2, 3000, 3001), + (CLUSTER_3, 2379, 2380), + (CLUSTER_4, 2379, 2380), + ] { + let mut cluster: Cluster = serde_yaml::from_str(cluster_raw).unwrap(); + after_apiserver(&mut cluster); + let extractor = Extractor::new(&cluster); + let (xline_port, sidecar_port, service_ports) = extractor.extract_ports(); + assert_eq!(xline_port.container_port, xline); + assert_eq!(sidecar_port.container_port, sidecar); + assert_eq!(service_ports.len(), 2); + assert_eq!(service_ports[0].name.as_deref(), Some(XLINE_PORT_NAME)); + assert_eq!(service_ports[0].port, xline); + assert_eq!(service_ports[1].name.as_deref(), Some(SIDECAR_PORT_NAME)); + assert_eq!(service_ports[1].port, sidecar); + } + } + + #[test] + fn extract_id_should_work() { + for cluster_raw in [CLUSTER_1, CLUSTER_2, CLUSTER_3, CLUSTER_4] { + let mut cluster: Cluster = serde_yaml::from_str(cluster_raw).unwrap(); + after_apiserver(&mut cluster); + let extractor = Extractor::new(&cluster); + let (name, namespace) = extractor.extract_id(); + assert_eq!(name, "my-xline-cluster"); + assert_eq!(namespace, "default"); + } + } + + #[test] + fn extract_pvc_should_work() { + let mut cluster: Cluster = serde_yaml::from_str(CLUSTER_1).unwrap(); + after_apiserver(&mut cluster); + let extractor = Extractor::new(&cluster); + let pvcs = extractor.extract_pvc_template(); + assert_eq!(pvcs.len(), 0); + + let mut cluster: Cluster = serde_yaml::from_str(CLUSTER_2).unwrap(); + after_apiserver(&mut cluster); + let extractor = Extractor::new(&cluster); + let pvcs = extractor.extract_pvc_template(); + assert_eq!(pvcs.len(), 1); + assert_eq!( + pvcs[0].metadata.name.as_deref(), + Some("my-xline-cluster-data") + ); + + let mut cluster: Cluster = serde_yaml::from_str(CLUSTER_3).unwrap(); + after_apiserver(&mut cluster); + let extractor = Extractor::new(&cluster); + let pvcs = extractor.extract_pvc_template(); + assert_eq!(pvcs.len(), 1); + assert_eq!(pvcs[0].metadata.name.as_deref(), Some("backup-pvc")); + + let mut cluster: Cluster = serde_yaml::from_str(CLUSTER_4).unwrap(); + after_apiserver(&mut cluster); + let extractor = Extractor::new(&cluster); + let pvcs = extractor.extract_pvc_template(); + assert_eq!(pvcs.len(), 1); + assert_eq!(pvcs[0].metadata.name.as_deref(), Some("xline-pvc")); + } + + #[test] + fn extract_volume_mount_should_work() { + let mut cluster: Cluster = serde_yaml::from_str(CLUSTER_1).unwrap(); + after_apiserver(&mut cluster); + let extractor = Extractor::new(&cluster); + let volume_mount = extractor.extract_additional_volume_mount(); + assert_eq!(volume_mount.len(), 0); + + let mut cluster: Cluster = serde_yaml::from_str(CLUSTER_2).unwrap(); + after_apiserver(&mut cluster); + let extractor = Extractor::new(&cluster); + let volume_mount = extractor.extract_additional_volume_mount(); + assert_eq!(volume_mount.len(), 1); + assert_eq!(volume_mount[0].name, "my-xline-cluster-data"); + assert_eq!(volume_mount[0].mount_path, DEFAULT_DATA_DIR); + + let mut cluster: Cluster = serde_yaml::from_str(CLUSTER_3).unwrap(); + after_apiserver(&mut cluster); + let extractor = Extractor::new(&cluster); + let volume_mount = extractor.extract_additional_volume_mount(); + assert_eq!(volume_mount.len(), 1); + assert_eq!(volume_mount[0].name, "backup-pvc"); + assert_eq!(volume_mount[0].mount_path, DEFAULT_BACKUP_DIR); + + let mut cluster: Cluster = serde_yaml::from_str(CLUSTER_4).unwrap(); + after_apiserver(&mut cluster); + let extractor = Extractor::new(&cluster); + let volume_mount = extractor.extract_additional_volume_mount(); + assert_eq!(volume_mount.len(), 0); + } + + #[test] + fn extract_owner_ref_should_work() { + for cluster_raw in [CLUSTER_1, CLUSTER_2, CLUSTER_3, CLUSTER_4] { + let mut cluster: Cluster = serde_yaml::from_str(cluster_raw).unwrap(); + after_apiserver(&mut cluster); + let extractor = Extractor::new(&cluster); + let owner_ref = extractor.extract_owner_ref(); + assert_eq!(owner_ref.name, "my-xline-cluster"); } - pvcs + } + + #[test] + fn extract_inherit_labels_should_work() { + let mut cluster: Cluster = serde_yaml::from_str(CLUSTER_1).unwrap(); + after_apiserver(&mut cluster); + let extractor = Extractor::new(&cluster); + let labels = extractor.extract_inherit_labels(); + assert_eq!(labels.len(), 2); + assert_eq!(&labels["app"], "my-xline-cluster"); + assert_eq!(&labels["appNamespace"], "default"); + } + + #[test] + fn factory_component_name_should_work() { + assert_eq!( + Factory::component_name("my-xline-cluster", Component::Node), + "my-xline-cluster-node" + ); + assert_eq!( + Factory::component_name("my-xline-cluster", Component::Service), + "my-xline-cluster-srv" + ); + assert_eq!( + Factory::component_name("my-xline-cluster", Component::BackupJob), + "my-xline-cluster-job" + ); } } From 55e08afb0d17a3735bde9bc5180cf341925117e0 Mon Sep 17 00:00:00 2001 From: iGxnon Date: Thu, 7 Sep 2023 15:08:11 +0800 Subject: [PATCH 07/11] refactor: remove useless Context Signed-off-by: iGxnon --- operator-k8s/src/controller/mod.rs | 23 ++++------------------- operator-k8s/src/operator.rs | 8 ++++---- 2 files changed, 8 insertions(+), 23 deletions(-) diff --git a/operator-k8s/src/controller/mod.rs b/operator-k8s/src/controller/mod.rs index 8d8b4d03..9d96afab 100644 --- a/operator-k8s/src/controller/mod.rs +++ b/operator-k8s/src/controller/mod.rs @@ -15,19 +15,6 @@ use crate::consts::DEFAULT_REQUEUE_DURATION; /// Cluster controller pub(crate) mod cluster; -/// The common context -pub(crate) struct Context { - /// The controller held by this context - controller: C, -} - -impl Context { - /// Constructor - pub(crate) fn new(controller: C) -> Self { - Self { controller } - } -} - /// Metrics labeled pub(crate) trait MetricsLabeled { /// Label @@ -75,26 +62,24 @@ where fn handle_error(&self, resource: &Arc, err: &Self::Error); /// The reconcile function used in kube::runtime::Controller - async fn reconcile(resource: Arc, ctx: Arc>) -> Result { - let controller = &ctx.controller; + async fn reconcile(resource: Arc, controller: Arc) -> Result { let _timer = controller.metrics().record_duration(); controller.reconcile_once(&resource).await?; Ok(Action::requeue(DEFAULT_REQUEUE_DURATION)) } /// The on_error function used in kube::runtime::Controller - fn on_error(resource: Arc, err: &Self::Error, ctx: Arc>) -> Action { - let controller = &ctx.controller; + fn on_error(resource: Arc, err: &Self::Error, controller: Arc) -> Action { controller.metrics().record_failed_count(&err.labels()); controller.handle_error(&resource, err); Action::requeue(DEFAULT_REQUEUE_DURATION) } /// Run this controller - async fn run(ctx: Arc>, api: Api) { + async fn run(controller: Arc, api: Api) { kube::runtime::Controller::new(api, WatcherConfig::default()) .shutdown_on_signal() - .run(Self::reconcile, Self::on_error, ctx) + .run(Self::reconcile, Self::on_error, controller) .filter_map(|res| async move { res.ok() }) .for_each(|_| futures::future::ready(())) .await; diff --git a/operator-k8s/src/operator.rs b/operator-k8s/src/operator.rs index 2b09456d..23ebb471 100644 --- a/operator-k8s/src/operator.rs +++ b/operator-k8s/src/operator.rs @@ -21,7 +21,7 @@ use utils::migration::ApiVersion; use crate::config::{Config, Namespace}; use crate::controller::cluster::{ClusterMetrics, Controller as ClusterController}; -use crate::controller::{Context, Controller, Metrics}; +use crate::controller::{Controller, Metrics}; use crate::crd::Cluster; use crate::router::{healthz, metrics, sidecar_state}; use crate::sidecar_state::SidecarState; @@ -84,12 +84,12 @@ impl Operator { let metrics = ClusterMetrics::new(); let registry = Registry::new(); metrics.register(®istry)?; - let ctx = Arc::new(Context::new(ClusterController { + let controller = Arc::new(ClusterController { kube_client, cluster_suffix: self.config.cluster_suffix.clone(), metrics, - })); - let mut controller = ClusterController::run(ctx, cluster_api); + }); + let mut controller = ClusterController::run(controller, cluster_api); let web_server = self.web_server(status_tx, registry); From 4d07f7453b75ba2eb724d2c89d97ce53840d239a Mon Sep 17 00:00:00 2001 From: iGxnon Date: Sun, 10 Sep 2023 13:08:43 +0800 Subject: [PATCH 08/11] chore: module stuff Signed-off-by: iGxnon --- .../src/controller/cluster/metrics.rs | 73 +++++++++++++++++ operator-k8s/src/controller/cluster/mod.rs | 79 +------------------ operator-k8s/src/crd/mod.rs | 2 +- operator-k8s/src/operator.rs | 2 +- 4 files changed, 79 insertions(+), 77 deletions(-) create mode 100644 operator-k8s/src/controller/cluster/metrics.rs diff --git a/operator-k8s/src/controller/cluster/metrics.rs b/operator-k8s/src/controller/cluster/metrics.rs new file mode 100644 index 00000000..29627d0b --- /dev/null +++ b/operator-k8s/src/controller/cluster/metrics.rs @@ -0,0 +1,73 @@ +use clippy_utilities::NumericCast; +use prometheus::{Error, Histogram, HistogramOpts, HistogramTimer, IntCounterVec, Opts, Registry}; + +use std::iter::repeat; +use std::ops::Mul; + +use crate::controller::Metrics; + +/// Cluster metrics +pub(crate) struct ClusterMetrics { + /// Reconcile duration histogram + reconcile_duration: Histogram, + /// Reconcile failed count + reconcile_failed_count: IntCounterVec, +} + +impl Default for ClusterMetrics { + fn default() -> Self { + Self::new() + } +} + +impl Metrics for ClusterMetrics { + /// Register metrics + fn register(&self, registry: &Registry) -> Result<(), Error> { + registry.register(Box::new(self.reconcile_duration.clone()))?; + registry.register(Box::new(self.reconcile_failed_count.clone())) + } + + /// Record duration + fn record_duration(&self) -> HistogramTimer { + self.reconcile_duration.start_timer() + } + + /// Increment failed count + fn record_failed_count(&self, labels: &[&str]) { + self.reconcile_failed_count.with_label_values(labels).inc(); + } +} + +impl ClusterMetrics { + /// Create a new cluster metrics + #[allow(clippy::expect_used)] + pub(crate) fn new() -> Self { + Self { + reconcile_duration: Histogram::with_opts( + HistogramOpts::new( + "operator_reconcile_duration_seconds", + "Duration of operator reconcile loop in seconds", + ) + .buckets(exponential_time_bucket(0.1, 2.0, 10)), + ) + .expect("failed to create operator_reconcile_duration_seconds histogram"), + reconcile_failed_count: IntCounterVec::new( + Opts::new( + "operator_reconcile_failed_count", + "Number of failed times the operator reconcile loop has run", + ), + &["reason"], + ) + .expect("failed to create operator_reconcile_failed_count counter"), + } + } +} + +/// Returns a vector of time buckets for the reconcile duration histogram. +fn exponential_time_bucket(start: f64, factor: f64, count: usize) -> Vec { + repeat(factor) + .enumerate() + .take(count) + .map(|(i, f)| start.mul(f.powi(i.numeric_cast()))) + .collect::>() +} diff --git a/operator-k8s/src/controller/cluster/mod.rs b/operator-k8s/src/controller/cluster/mod.rs index ef8e33c8..f7fe474d 100644 --- a/operator-k8s/src/controller/cluster/mod.rs +++ b/operator-k8s/src/controller/cluster/mod.rs @@ -1,81 +1,10 @@ -use clippy_utilities::NumericCast; -use prometheus::{Error, Histogram, HistogramOpts, HistogramTimer, IntCounterVec, Opts, Registry}; - -use std::iter::repeat; -use std::ops::Mul; - -use crate::controller::Metrics; - /// Controller v1alpha mod v1alpha; /// Controller v1alpha1 mod v1alpha1; -/// Current controller of cluster -pub(crate) type Controller = v1alpha::ClusterController; - -/// Cluster metrics -pub(crate) struct ClusterMetrics { - /// Reconcile duration histogram - reconcile_duration: Histogram, - /// Reconcile failed count - reconcile_failed_count: IntCounterVec, -} - -impl Default for ClusterMetrics { - fn default() -> Self { - Self::new() - } -} - -impl Metrics for ClusterMetrics { - /// Register metrics - fn register(&self, registry: &Registry) -> Result<(), Error> { - registry.register(Box::new(self.reconcile_duration.clone()))?; - registry.register(Box::new(self.reconcile_failed_count.clone())) - } - - /// Record duration - fn record_duration(&self) -> HistogramTimer { - self.reconcile_duration.start_timer() - } - - /// Increment failed count - fn record_failed_count(&self, labels: &[&str]) { - self.reconcile_failed_count.with_label_values(labels).inc(); - } -} - -impl ClusterMetrics { - /// Create a new cluster metrics - #[allow(clippy::expect_used)] - pub(crate) fn new() -> Self { - Self { - reconcile_duration: Histogram::with_opts( - HistogramOpts::new( - "operator_reconcile_duration_seconds", - "Duration of operator reconcile loop in seconds", - ) - .buckets(exponential_time_bucket(0.1, 2.0, 10)), - ) - .expect("failed to create operator_reconcile_duration_seconds histogram"), - reconcile_failed_count: IntCounterVec::new( - Opts::new( - "operator_reconcile_failed_count", - "Number of failed times the operator reconcile loop has run", - ), - &["reason"], - ) - .expect("failed to create operator_reconcile_failed_count counter"), - } - } -} +/// Controller metrics +mod metrics; -/// Returns a vector of time buckets for the reconcile duration histogram. -fn exponential_time_bucket(start: f64, factor: f64, count: usize) -> Vec { - repeat(factor) - .enumerate() - .take(count) - .map(|(i, f)| start.mul(f.powi(i.numeric_cast()))) - .collect::>() -} +pub(crate) use metrics::ClusterMetrics; +pub(crate) use v1alpha::ClusterController; diff --git a/operator-k8s/src/crd/mod.rs b/operator-k8s/src/crd/mod.rs index 5f4c0aae..aaacab3a 100644 --- a/operator-k8s/src/crd/mod.rs +++ b/operator-k8s/src/crd/mod.rs @@ -12,4 +12,4 @@ pub(crate) mod v1alpha; pub(crate) mod v1alpha1; /// Current CRD `XineCluster` -pub(crate) type Cluster = v1alpha::Cluster; +pub(crate) use v1alpha::Cluster; diff --git a/operator-k8s/src/operator.rs b/operator-k8s/src/operator.rs index 23ebb471..ac5c0c79 100644 --- a/operator-k8s/src/operator.rs +++ b/operator-k8s/src/operator.rs @@ -20,7 +20,7 @@ use tracing::{debug, info, warn}; use utils::migration::ApiVersion; use crate::config::{Config, Namespace}; -use crate::controller::cluster::{ClusterMetrics, Controller as ClusterController}; +use crate::controller::cluster::{ClusterController, ClusterMetrics}; use crate::controller::{Controller, Metrics}; use crate::crd::Cluster; use crate::router::{healthz, metrics, sidecar_state}; From a6a5fc72d8b5180ff31f50cdbd870c3576801f89 Mon Sep 17 00:00:00 2001 From: iGxnon Date: Sun, 10 Sep 2023 16:01:21 +0800 Subject: [PATCH 09/11] fix: fix CRD version conflict Signed-off-by: iGxnon --- operator-k8s/src/config.rs | 3 + operator-k8s/src/controller/cluster/mod.rs | 2 +- operator-k8s/src/crd/mod.rs | 2 +- operator-k8s/src/operator.rs | 190 +++++++++++++++------ utils/src/migration.rs | 8 + 5 files changed, 153 insertions(+), 52 deletions(-) diff --git a/operator-k8s/src/config.rs b/operator-k8s/src/config.rs index cc6f4638..52690617 100644 --- a/operator-k8s/src/config.rs +++ b/operator-k8s/src/config.rs @@ -14,6 +14,9 @@ pub struct Config { /// Whether to create CRD regardless of current version on k8s #[arg(long, default_value = "false")] pub create_crd: bool, + /// Whether to enable auto migration if CRD version is less than current version + #[arg(long, default_value = "false")] + pub auto_migration: bool, /// The kubernetes cluster DNS suffix #[arg(long, default_value = "cluster.local")] pub cluster_suffix: String, diff --git a/operator-k8s/src/controller/cluster/mod.rs b/operator-k8s/src/controller/cluster/mod.rs index f7fe474d..765e4854 100644 --- a/operator-k8s/src/controller/cluster/mod.rs +++ b/operator-k8s/src/controller/cluster/mod.rs @@ -7,4 +7,4 @@ mod v1alpha1; mod metrics; pub(crate) use metrics::ClusterMetrics; -pub(crate) use v1alpha::ClusterController; +pub(crate) use v1alpha1::ClusterController; diff --git a/operator-k8s/src/crd/mod.rs b/operator-k8s/src/crd/mod.rs index aaacab3a..87910eb4 100644 --- a/operator-k8s/src/crd/mod.rs +++ b/operator-k8s/src/crd/mod.rs @@ -12,4 +12,4 @@ pub(crate) mod v1alpha; pub(crate) mod v1alpha1; /// Current CRD `XineCluster` -pub(crate) use v1alpha::Cluster; +pub(crate) use v1alpha1::Cluster; diff --git a/operator-k8s/src/operator.rs b/operator-k8s/src/operator.rs index ac5c0c79..5dab140c 100644 --- a/operator-k8s/src/operator.rs +++ b/operator-k8s/src/operator.rs @@ -1,4 +1,3 @@ -use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; @@ -10,7 +9,8 @@ use flume::Sender; use futures::FutureExt; use k8s_openapi::api::core::v1::Pod; use k8s_openapi::apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition; -use kube::api::{ListParams, Patch, PatchParams, PostParams}; +use kube::api::{DynamicObject, ListParams, Patch, PatchParams}; +use kube::core::crd::merge_crds; use kube::runtime::wait::{await_condition, conditions}; use kube::{Api, Client, CustomResourceExt, Resource}; use operator_api::HeartbeatStatus; @@ -20,6 +20,7 @@ use tracing::{debug, info, warn}; use utils::migration::ApiVersion; use crate::config::{Config, Namespace}; +use crate::consts::FIELD_MANAGER; use crate::controller::cluster::{ClusterController, ClusterMetrics}; use crate::controller::{Controller, Metrics}; use crate::crd::Cluster; @@ -132,64 +133,153 @@ impl Operator { Ok(()) } + /// Wait for CRD to be established + async fn wait_crd_established( + crd_api: Api, + crd_name: &str, + ) -> Result<()> { + let establish = await_condition(crd_api, crd_name, conditions::is_crd_established()); + debug!("wait for crd established"); + _ = tokio::time::timeout(CRD_ESTABLISH_TIMEOUT, establish).await??; + Ok(()) + } + /// Prepare CRD /// This method attempts to initialize the CRD if it does not already exist. /// Additionally, it could migrate CRD with the version of `CURRENT_VERSION`. async fn prepare_crd(&self, kube_client: &Client) -> Result<()> { let crd_api: Api = Api::all(kube_client.clone()); - let crds: HashMap<_, _> = crd_api - .list(&ListParams::default()) - .await? - .items - .into_iter() - .filter_map(|crd| crd.metadata.name.map(|name| (name, crd.spec.versions))) - .collect(); let definition = Cluster::crd(); - match crds.get(Cluster::crd_name()) { - None => { - // cannot find crd name, initial CRD - debug!("cannot found XlineCluster CRD, try to init it"); - let _crd = crd_api.create(&PostParams::default(), &definition).await?; + let current_version: ApiVersion = Cluster::version(&()).as_ref().parse()?; + + let ret = crd_api.get(Cluster::crd_name()).await; + if let Err(kube::Error::Api(kube::error::ErrorResponse { code: 404, .. })) = ret { + if !self.config.create_crd { + return Err(anyhow::anyhow!( + "cannot found XlineCluster CRD, please set --create-crd to true or apply the CRD manually" + )); } - Some(versions) => { - let current_version = Cluster::version(&()); - debug!("found XlineCluster CRD, current version {current_version}"); - let current_version: ApiVersion = current_version.as_ref().parse()?; - let versions: Vec> = versions - .iter() - .map(|v| v.name.parse()) - .collect::>()?; - if versions.iter().all(|ver| ¤t_version > ver) { - debug!("{current_version} is larger than all version on k8s, patch to latest"); - let _crd = crd_api - .patch( - Cluster::crd_name(), - &PatchParams::default(), - &Patch::Merge(definition), - ) - .await?; - return Ok(()); + // the following code needs `customresourcedefinitions` write permission + debug!("cannot found XlineCluster CRD, try to init it"); + _ = crd_api + .patch( + Cluster::crd_name(), + &PatchParams::apply(FIELD_MANAGER), + &Patch::Apply(definition.clone()), + ) + .await?; + Self::wait_crd_established(crd_api.clone(), Cluster::crd_name()).await?; + return Ok(()); + } + + debug!("found XlineCluster CRD, current version: {current_version}"); + + let mut add = true; + let mut storage = String::new(); + + let mut crds = ret? + .spec + .versions + .iter() + .cloned() + .map(|ver| { + let mut crd = definition.clone(); + if ver.name == current_version.to_string() { + add = false; } - assert!(self.config.create_crd || !versions.iter().any(|ver| ver > ¤t_version), "The current XlineCluster CRD version {current_version} is not compatible with higher version on k8s. Please use the latest xline-operator or set --create_crd to true."); - if self.config.create_crd { - debug!("create_crd set to true, force patch this CRD"); - let _crd = crd_api - .patch( - Cluster::crd_name(), - &PatchParams::default(), - &Patch::Merge(definition), - ) - .await?; + if ver.storage { + storage = ver.name.clone(); } - } + crd.spec.versions = vec![ver]; + crd + }) + .collect::>(); + + if add { + crds.push(definition.clone()); + } else { + debug!("current version already exists, try to migrate"); + self.try_migration(kube_client, crds, ¤t_version, &storage) + .await?; + return Ok(()); } - let establish = await_condition( - crd_api, - Cluster::crd_name(), - conditions::is_crd_established(), - ); - let _crd = tokio::time::timeout(CRD_ESTABLISH_TIMEOUT, establish).await??; - debug!("crd established"); + + if !self.config.create_crd { + return Err(anyhow::anyhow!( + "cannot found XlineCluster CRD with version {current_version}, please set --create-crd to true or apply the CRD manually" + )); + } + + let merged_crd = merge_crds(crds.clone(), &storage)?; + debug!("try to update crd"); + _ = crd_api + .patch( + Cluster::crd_name(), + &PatchParams::apply(FIELD_MANAGER), + &Patch::Apply(merged_crd), + ) + .await?; + Self::wait_crd_established(crd_api.clone(), Cluster::crd_name()).await?; + + debug!("crd updated, try to migrate"); + self.try_migration(kube_client, crds, ¤t_version, &storage) + .await?; + + Ok(()) + } + + /// Try to migrate CRD + #[allow(clippy::indexing_slicing)] // there is at least one element in `versions` + #[allow(clippy::expect_used)] + async fn try_migration( + &self, + kube_client: &Client, + crds: Vec, + current_version: &ApiVersion, + storage: &str, + ) -> Result<()> { + if !self.config.auto_migration { + debug!("auto migration is disabled, skip migration"); + return Ok(()); + } + if current_version.to_string() == storage { + // stop migration if current version is already in storage + debug!("current version is already in storage, skip migration"); + return Ok(()); + } + let versions: Vec> = crds + .iter() + .map(|crd| crd.spec.versions[0].name.parse()) + .collect::>()?; + if versions.iter().any(|ver| current_version < ver) { + // stop migration if current version is less than any version in `versions` + debug!("current version is less than some version in crd, skip migration"); + return Ok(()); + } + let group = kube::discovery::group(kube_client, Cluster::group(&()).as_ref()).await?; + let Some((ar, _)) = group + .versioned_resources(storage) + .into_iter() + .find(|res| res.0.kind == Cluster::kind(&())) else { return Ok(()) }; + let api: Api = Api::all_with(kube_client.clone(), &ar); + let clusters = api.list(&ListParams::default()).await?.items; + if !clusters.is_empty() && !current_version.compat_with(&storage.parse()?) { + // there is some clusters with storage version and is not compat with current version, stop migration + // TODO add a flag to these clusters to indicate that they need to be migrated + return Ok(()); + } + // start migration as there is no cluster with storage version + let merged_crd = merge_crds(crds, ¤t_version.to_string())?; + let crd_api: Api = Api::all(kube_client.clone()); + debug!("try to migrate crd from {storage} to {current_version}"); + _ = crd_api + .patch( + Cluster::crd_name(), + &PatchParams::apply(FIELD_MANAGER), + &Patch::Apply(merged_crd), + ) + .await?; + Self::wait_crd_established(crd_api.clone(), Cluster::crd_name()).await?; Ok(()) } diff --git a/utils/src/migration.rs b/utils/src/migration.rs index 1588c375..700973bf 100644 --- a/utils/src/migration.rs +++ b/utils/src/migration.rs @@ -39,6 +39,14 @@ impl ApiVersion { Self::Stable(main, PhantomData) } + /// Check whether the version is compatible with the other version + /// We promise that we keep compatible with the same main version + #[must_use] + #[inline] + pub fn compat_with(&self, other: &Self) -> bool { + self.main_version() == other.main_version() + } + /// return the main version fn main_version(&self) -> u32 { match *self { From d8c748bae3c591f52a279bfbcf4cd0e066b0f779 Mon Sep 17 00:00:00 2001 From: iGxnon Date: Mon, 11 Sep 2023 21:34:45 +0800 Subject: [PATCH 10/11] refactor: refactor controller with factory Signed-off-by: iGxnon --- .github/workflows/e2e.yml | 2 +- build/xline-operator.Dockerfile | 5 - operator-k8s/src/consts.rs | 4 +- operator-k8s/src/controller/cluster/mod.rs | 2 - .../src/controller/cluster/v1alpha.rs | 412 --------------- .../src/controller/cluster/v1alpha1.rs | 490 ++---------------- operator-k8s/src/crd/mod.rs | 7 - operator-k8s/src/crd/v1alpha/cluster.rs | 91 ---- operator-k8s/src/crd/v1alpha/mod.rs | 3 - operator-k8s/src/crd/v1alpha1/mod.rs | 4 +- operator-k8s/src/manager/cluster.rs | 180 +++++-- operator-k8s/src/manager/mod.rs | 2 +- tests/e2e/cases/ci.sh | 21 +- tests/e2e/cases/manifests/cluster.yml | 4 +- tests/e2e/cases/manifests/operators.yml | 7 +- tests/e2e/testenv/testenv.sh | 4 +- 16 files changed, 207 insertions(+), 1031 deletions(-) delete mode 100644 build/xline-operator.Dockerfile delete mode 100644 operator-k8s/src/controller/cluster/v1alpha.rs delete mode 100644 operator-k8s/src/crd/v1alpha/cluster.rs delete mode 100644 operator-k8s/src/crd/v1alpha/mod.rs diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 26d9c6cb..26619ed1 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -40,7 +40,7 @@ jobs: cargo build --release cd build cp ../target/release/xline-operator . - docker build . -t datenlord/xline-operator:latest -f operator.Dockerfile + docker build . -t xline-kv/xline-operator:latest -f operator.Dockerfile - name: 'E2E CI' env: KIND_CLUSTER_IMAGE: kindest/node:${{ matrix.k8s }} diff --git a/build/xline-operator.Dockerfile b/build/xline-operator.Dockerfile deleted file mode 100644 index e100dcdb..00000000 --- a/build/xline-operator.Dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -FROM ubuntu:latest - -COPY xline-operator /usr/local/bin - -CMD ["/usr/local/bin/xline-operator"] diff --git a/operator-k8s/src/consts.rs b/operator-k8s/src/consts.rs index 1403144f..607e86e8 100644 --- a/operator-k8s/src/consts.rs +++ b/operator-k8s/src/consts.rs @@ -1,11 +1,11 @@ +#![allow(unused)] // TODO remove + use std::time::Duration; /// The default requeue duration to achieve eventual consistency pub(crate) const DEFAULT_REQUEUE_DURATION: Duration = Duration::from_secs(600); /// The field manager identifier of xline operator pub(crate) const FIELD_MANAGER: &str = "xlineoperator.datenlord.io"; -/// The emptyDir volume name of each pod if there is no data pvc specified -pub(crate) const DATA_EMPTY_DIR_NAME: &str = "xline-data-empty-dir"; /// The image used for cronjob to trigger backup /// The following command line tool should be available in this image /// 1. curl diff --git a/operator-k8s/src/controller/cluster/mod.rs b/operator-k8s/src/controller/cluster/mod.rs index 765e4854..2a4b8058 100644 --- a/operator-k8s/src/controller/cluster/mod.rs +++ b/operator-k8s/src/controller/cluster/mod.rs @@ -1,5 +1,3 @@ -/// Controller v1alpha -mod v1alpha; /// Controller v1alpha1 mod v1alpha1; diff --git a/operator-k8s/src/controller/cluster/v1alpha.rs b/operator-k8s/src/controller/cluster/v1alpha.rs deleted file mode 100644 index 49e76300..00000000 --- a/operator-k8s/src/controller/cluster/v1alpha.rs +++ /dev/null @@ -1,412 +0,0 @@ -use std::collections::BTreeMap; -use std::sync::Arc; - -use async_trait::async_trait; -use k8s_openapi::api::apps::v1::{ - RollingUpdateStatefulSetStrategy, StatefulSet, StatefulSetSpec, StatefulSetUpdateStrategy, -}; -use k8s_openapi::api::core::v1::{ - Container, ContainerPort, EmptyDirVolumeSource, EnvVar, EnvVarSource, ObjectFieldSelector, - PersistentVolumeClaim, PodSpec, PodTemplateSpec, Service, ServicePort, ServiceSpec, Volume, - VolumeMount, -}; -use k8s_openapi::apimachinery::pkg::apis::meta::v1::{LabelSelector, ObjectMeta, OwnerReference}; -use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString; -use kube::api::{Patch, PatchParams}; -use kube::{Api, Client, Resource, ResourceExt}; -use tracing::{debug, error}; -use utils::consts::{DEFAULT_BACKUP_DIR, DEFAULT_DATA_DIR}; - -use crate::consts::{ - DATA_EMPTY_DIR_NAME, DEFAULT_XLINE_PORT, FIELD_MANAGER, XLINE_POD_NAME_ENV, XLINE_PORT_NAME, -}; -use crate::controller::cluster::ClusterMetrics; -use crate::controller::{Controller, MetricsLabeled}; -use crate::crd::v1alpha::Cluster; - -/// CRD `XlineCluster` controller -pub(crate) struct ClusterController { - /// Kubernetes client - pub(crate) kube_client: Client, - /// The kubernetes cluster dns suffix - pub(crate) cluster_suffix: String, - /// Cluster metrics - pub(crate) metrics: ClusterMetrics, -} - -/// All possible errors -#[derive(thiserror::Error, Debug)] -pub(crate) enum Error { - /// Missing an object in cluster - #[error("Missing object key {0} in cluster")] - MissingObject(&'static str), - /// Kube error - #[error("Kubernetes api error")] - Kube(#[from] kube::Error), - /// Backup PV mount path is already mounted - #[error("The path {0} is internally used in the xline operator and cannot be mounted.")] - CannotMount(&'static str), - /// Volume(PVC) name conflict with `DATA_EMPTY_DIR_NAME` - #[error("The {0} is conflict with the name internally used in the xline operator")] - InvalidVolumeName(&'static str), -} - -impl MetricsLabeled for Error { - fn labels(&self) -> Vec<&str> { - match *self { - Self::MissingObject(_) => vec!["missing_object"], - Self::Kube(_) => vec!["kube"], - Self::CannotMount(_) => vec!["cannot_mount"], - Self::InvalidVolumeName(_) => vec!["invalid_volume_name"], - } - } -} - -/// Controller result -type Result = std::result::Result; - -impl ClusterController { - /// Extract ports - fn extract_ports(cluster: &Arc) -> (ContainerPort, Vec) { - // expose all the container's ports - let mut xline_port = None; - let container_ports = cluster.spec.container.ports.clone().unwrap_or_default(); - let mut service_ports: Vec<_> = container_ports - .into_iter() - .map(|port| { - // the port with name `xline` is considered to be the port of xline - if matches!(port.name.as_deref(), Some(XLINE_PORT_NAME)) { - xline_port = Some(port.clone()); - } - ServicePort { - name: port.name.clone(), - port: port.container_port, - ..ServicePort::default() - } - }) - .collect(); - if xline_port.is_none() { - // add default xline port 2379 to service port if xline port is not specified - service_ports.push(ServicePort { - name: Some(XLINE_PORT_NAME.to_owned()), - port: DEFAULT_XLINE_PORT, - ..ServicePort::default() - }); - } - // if it is not specified, 2379 is used as xline port - let xline_port = xline_port.unwrap_or(ContainerPort { - name: Some(XLINE_PORT_NAME.to_owned()), - container_port: DEFAULT_XLINE_PORT, - ..ContainerPort::default() - }); - (xline_port, service_ports) - } - - /// Extract persistent volume claims - fn extract_pvcs(cluster: &Arc) -> Result> { - let mut pvcs = Vec::new(); - // check if the data pvc if specified, add the pvc to pvcs - if let Some(pvc) = cluster.spec.data.as_ref() { - pvcs.push(pvc.clone()); - } - // extend the user defined pvcs - if let Some(spec_pvcs) = cluster.spec.pvcs.clone() { - if spec_pvcs - .iter() - .any(|pvc| pvc.name_any() == DATA_EMPTY_DIR_NAME) - { - return Err(Error::InvalidVolumeName(".spec.pvcs[].metadata.name")); - } - pvcs.extend(spec_pvcs); - } - Ok(pvcs) - } - - /// Extract owner reference - fn extract_owner_ref(cluster: &Arc) -> OwnerReference { - // unwrap controller_owner_ref is always safe - let Some(owner_ref) = cluster.controller_owner_ref(&()) else { unreachable!("kube-runtime has undergone some changes.") }; - owner_ref - } - - /// Extract name, namespace - fn extract_id(cluster: &Arc) -> Result<(&str, &str)> { - let namespace = cluster - .metadata - .namespace - .as_deref() - .ok_or(Error::MissingObject(".metadata.namespace"))?; - let name = cluster - .metadata - .name - .as_deref() - .ok_or(Error::MissingObject(".metadata.name"))?; - Ok((namespace, name)) - } - - /// Build the metadata which shares between all subresources - fn build_metadata(namespace: &str, name: &str, owner_ref: OwnerReference) -> ObjectMeta { - let mut labels: BTreeMap = BTreeMap::new(); - let _: Option<_> = labels.insert("app".to_owned(), name.to_owned()); - ObjectMeta { - labels: Some(labels.clone()), // it is used in selector - name: Some(name.to_owned()), // all subresources share the same name - namespace: Some(namespace.to_owned()), // all subresources share the same namespace - owner_references: Some(vec![owner_ref]), // allow k8s GC to automatically clean up itself - ..ObjectMeta::default() - } - } - - /// Apply headless service - async fn apply_headless_service( - &self, - namespace: &str, - name: &str, - metadata: &ObjectMeta, - service_ports: Vec, - ) -> Result<()> { - let api: Api = Api::namespaced(self.kube_client.clone(), namespace); - let _: Service = api - .patch( - name, - &PatchParams::apply(FIELD_MANAGER), - &Patch::Apply(Service { - metadata: metadata.clone(), - spec: Some(ServiceSpec { - cluster_ip: None, - ports: Some(service_ports), - selector: metadata.labels.clone(), - ..ServiceSpec::default() - }), - ..Service::default() - }), - ) - .await?; - Ok(()) - } - - /// Prepare container volume - fn prepare_container_volume( - cluster: &Arc, - mut container: Container, - ) -> Result<(Container, Option>)> { - let data = cluster.spec.data.clone(); - let mut volumes = None; - // mount data volume to `DEFAULT_DATA_DIR` in container - let data_mount = if let Some(pvc) = data { - let name = pvc - .metadata - .name - .ok_or(Error::MissingObject(".spec.data.metadata.name"))?; - if name == DATA_EMPTY_DIR_NAME { - return Err(Error::InvalidVolumeName(".spec.data.metadata.name")); - } - Some(VolumeMount { - mount_path: DEFAULT_DATA_DIR.to_owned(), - name, - ..VolumeMount::default() - }) - } else { - None - }; - let mut mounts = Vec::new(); - // check if the container has specified volume_mounts before - if let Some(spec_mounts) = container.volume_mounts { - // if the container mount the dir used in operator, return error - if spec_mounts - .iter() - .any(|mount| mount.mount_path.starts_with(DEFAULT_BACKUP_DIR)) - { - return Err(Error::CannotMount(DEFAULT_BACKUP_DIR)); - } - if spec_mounts - .iter() - .any(|mount| mount.mount_path.starts_with(DEFAULT_DATA_DIR)) - { - return Err(Error::CannotMount(DEFAULT_DATA_DIR)); - } - if spec_mounts - .iter() - .any(|mount| mount.name == DATA_EMPTY_DIR_NAME) - { - return Err(Error::InvalidVolumeName( - ".spec.container.volume_mounts[].name", - )); - } - // extend the mounts - mounts.extend(spec_mounts); - } - if let Some(mount) = data_mount { - mounts.push(mount); - } else { - // if data pv is not provided, then use emptyDir as volume - volumes = Some(vec![Volume { - name: DATA_EMPTY_DIR_NAME.to_owned(), - empty_dir: Some(EmptyDirVolumeSource::default()), - ..Volume::default() - }]); - mounts.push(VolumeMount { - mount_path: DEFAULT_DATA_DIR.to_owned(), - name: DATA_EMPTY_DIR_NAME.to_owned(), - ..VolumeMount::default() - }); - } - // override the container volume_mounts - container.volume_mounts = Some(mounts); - Ok((container, volumes)) - } - - /// Prepare container environment - fn prepare_container_env(mut container: Container) -> Container { - // to get pod unique name - let mut env = container.env.unwrap_or_default(); - env.push(EnvVar { - name: XLINE_POD_NAME_ENV.to_owned(), - value_from: Some(EnvVarSource { - field_ref: Some(ObjectFieldSelector { - field_path: "metadata.name".to_owned(), - ..ObjectFieldSelector::default() - }), - ..EnvVarSource::default() - }), - ..EnvVar::default() - }); - // override the pod environments - container.env = Some(env); - container - } - - /// Prepare container command - fn prepare_container_command( - &self, - mut container: Container, - namespace: &str, - name: &str, - size: i32, - xline_port: &ContainerPort, - ) -> Container { - // generate the members and setup xline in command line - let mut members = vec![]; - for i in 0..size { - members.push(format!( - "{name}-{i}={name}-{i}.{name}.{namespace}.svc.{}:{}", - self.cluster_suffix, xline_port.container_port - )); - } - // $(XLINE_POD_NAME_ENV) will read the pod name from environment - container.command = Some( - format!("xline --name $({XLINE_POD_NAME_ENV}) --storage-engine rocksdb --data-dir {DEFAULT_DATA_DIR} --members {}", members.join(",")) - .split_whitespace() - .map(ToOwned::to_owned) - .collect(), - ); - container - } - - /// Prepare the xline container provided by user - fn prepare_container( - &self, - namespace: &str, - name: &str, - cluster: &Arc, - xline_port: &ContainerPort, - ) -> Result<(Container, Option>)> { - let container = cluster.spec.container.clone(); - let (container, volumes) = Self::prepare_container_volume(cluster, container)?; - let container = Self::prepare_container_env(container); - let container = self.prepare_container_command( - container, - namespace, - name, - cluster.spec.size, - xline_port, - ); - Ok((container, volumes)) - } - - /// Apply the statefulset in k8s to reconcile cluster - async fn apply_statefulset( - &self, - namespace: &str, - name: &str, - cluster: &Arc, - xline_port: &ContainerPort, - pvcs: Vec, - metadata: &ObjectMeta, - ) -> Result<()> { - let api: Api = Api::namespaced(self.kube_client.clone(), namespace); - let (container, volumes) = self.prepare_container(namespace, name, cluster, xline_port)?; - let _: StatefulSet = api - .patch( - name, - &PatchParams::apply(FIELD_MANAGER), - &Patch::Apply(StatefulSet { - metadata: metadata.clone(), - spec: Some(StatefulSetSpec { - replicas: Some(cluster.spec.size), - selector: LabelSelector { - match_expressions: None, - match_labels: metadata.labels.clone(), - }, - service_name: name.to_owned(), - volume_claim_templates: Some(pvcs), - update_strategy: Some(StatefulSetUpdateStrategy { - rolling_update: Some(RollingUpdateStatefulSetStrategy { - max_unavailable: Some(IntOrString::String("50%".to_owned())), // allow a maximum of half the cluster quorum shutdown when performing a rolling update - partition: None, - }), - ..StatefulSetUpdateStrategy::default() - }), - template: PodTemplateSpec { - metadata: Some(ObjectMeta { - labels: metadata.labels.clone(), - ..ObjectMeta::default() - }), - spec: Some(PodSpec { - containers: vec![container], - volumes, - affinity: cluster.spec.affinity.clone(), - ..PodSpec::default() - }), - }, - ..StatefulSetSpec::default() - }), - ..StatefulSet::default() - }), - ) - .await?; - Ok(()) - } -} - -#[async_trait] -impl Controller for ClusterController { - type Error = Error; - type Metrics = ClusterMetrics; - - fn metrics(&self) -> &Self::Metrics { - &self.metrics - } - - async fn reconcile_once(&self, cluster: &Arc) -> Result<()> { - debug!( - "Reconciling cluster: \n{}", - serde_json::to_string_pretty(cluster.as_ref()).unwrap_or_default() - ); - let (namespace, name) = Self::extract_id(cluster)?; - let owner_ref = Self::extract_owner_ref(cluster); - let pvcs = Self::extract_pvcs(cluster)?; - let (xline_port, service_ports) = Self::extract_ports(cluster); - let metadata = Self::build_metadata(namespace, name, owner_ref); - - self.apply_headless_service(namespace, name, &metadata, service_ports) - .await?; - self.apply_statefulset(namespace, name, cluster, &xline_port, pvcs, &metadata) - .await?; - Ok(()) - } - - fn handle_error(&self, resource: &Arc, err: &Self::Error) { - error!("{:?} reconciliation error: {}", resource.metadata.name, err); - } -} diff --git a/operator-k8s/src/controller/cluster/v1alpha1.rs b/operator-k8s/src/controller/cluster/v1alpha1.rs index 7458937e..4765f8cc 100644 --- a/operator-k8s/src/controller/cluster/v1alpha1.rs +++ b/operator-k8s/src/controller/cluster/v1alpha1.rs @@ -1,30 +1,19 @@ -use std::collections::BTreeMap; +use std::fmt::Debug; use std::sync::Arc; use async_trait::async_trait; -use k8s_openapi::api::apps::v1::{ - RollingUpdateStatefulSetStrategy, StatefulSet, StatefulSetSpec, StatefulSetUpdateStrategy, -}; -use k8s_openapi::api::batch::v1::{CronJob, CronJobSpec, JobSpec, JobTemplateSpec}; -use k8s_openapi::api::core::v1::{ - Container, ContainerPort, EmptyDirVolumeSource, EnvVar, EnvVarSource, ObjectFieldSelector, - PersistentVolumeClaim, PodSpec, PodTemplateSpec, Service, ServicePort, ServiceSpec, Volume, - VolumeMount, -}; -use k8s_openapi::apimachinery::pkg::apis::meta::v1::{LabelSelector, ObjectMeta, OwnerReference}; -use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString; +use k8s_openapi::NamespaceResourceScope; use kube::api::{Patch, PatchParams}; -use kube::{Api, Client, Resource, ResourceExt}; +use kube::{Api, Client, Resource}; +use serde::de::DeserializeOwned; +use serde::Serialize; use tracing::{debug, error}; -use utils::consts::{DEFAULT_BACKUP_DIR, DEFAULT_DATA_DIR}; -use crate::consts::{ - CRONJOB_IMAGE, DATA_EMPTY_DIR_NAME, DEFAULT_SIDECAR_PORT, DEFAULT_XLINE_PORT, FIELD_MANAGER, - SIDECAR_PORT_NAME, XLINE_POD_NAME_ENV, XLINE_PORT_NAME, -}; +use crate::consts::FIELD_MANAGER; use crate::controller::cluster::ClusterMetrics; use crate::controller::{Controller, MetricsLabeled}; -use crate::crd::v1alpha1::{Cluster, StorageSpec}; +use crate::crd::v1alpha1::Cluster; +use crate::manager::cluster::Factory; /// CRD `XlineCluster` controller pub(crate) struct ClusterController { @@ -36,430 +25,43 @@ pub(crate) struct ClusterController { pub(crate) metrics: ClusterMetrics, } -/// All possible errors -#[derive(thiserror::Error, Debug)] -pub(crate) enum Error { - /// Missing an object in cluster - #[error("Missing object key {0} in cluster")] - MissingObject(&'static str), - /// Kube error - #[error("Kubernetes api error")] - Kube(#[from] kube::Error), - /// Backup PV mount path is already mounted - #[error("The path {0} is internally used in the xline operator and cannot be mounted.")] - CannotMount(&'static str), - /// Volume(PVC) name conflict with `DATA_EMPTY_DIR_NAME` - #[error("The {0} is conflict with the name internally used in the xline operator")] - InvalidVolumeName(&'static str), -} - -impl MetricsLabeled for Error { +impl MetricsLabeled for kube::Error { fn labels(&self) -> Vec<&str> { + #[allow(clippy::wildcard_enum_match_arm)] // the reason is enough match *self { - Self::MissingObject(_) => vec!["missing_object"], - Self::Kube(_) => vec!["kube"], - Self::CannotMount(_) => vec!["cannot_mount"], - Self::InvalidVolumeName(_) => vec!["invalid_volume_name"], + Self::Api(_) => vec!["api error"], + Self::Service(_) => vec!["service error"], + Self::FromUtf8(_) | Self::SerdeError(_) => vec!["encode/decode error"], + Self::Auth(_) => vec!["authorization error"], + Self::OpensslTls(_) => vec!["tls error"], + Self::HyperError(_) | Self::HttpError(_) => vec!["http error"], + _ => vec!["unknown"], } } } /// Controller result -type Result = std::result::Result; +type Result = std::result::Result; impl ClusterController { - /// Extract ports - fn extract_ports(cluster: &Arc) -> (ContainerPort, ContainerPort, Vec) { - // expose all the container's ports - let mut xline_port = None; - let mut sidecar_port = None; - let container_ports = cluster.spec.container.ports.clone().unwrap_or_default(); - let mut service_ports: Vec<_> = container_ports - .into_iter() - .map(|port| { - // the port with name `xline` is considered to be the port of xline - if matches!(port.name.as_deref(), Some(XLINE_PORT_NAME)) { - xline_port = Some(port.clone()); - } - // the port with name `sidecar` is considered to be the port of xline - if matches!(port.name.as_deref(), Some(SIDECAR_PORT_NAME)) { - sidecar_port = Some(port.clone()); - } - ServicePort { - name: port.name.clone(), - port: port.container_port, - ..ServicePort::default() - } - }) - .collect(); - if xline_port.is_none() { - // add default xline port 2379 to service port if xline port is not specified - service_ports.push(ServicePort { - name: Some(XLINE_PORT_NAME.to_owned()), - port: DEFAULT_XLINE_PORT, - ..ServicePort::default() - }); - } - if sidecar_port.is_none() { - // add default sidecar port 2380 to service port if sidecar port is not specified - service_ports.push(ServicePort { - name: Some(SIDECAR_PORT_NAME.to_owned()), - port: DEFAULT_SIDECAR_PORT, - ..ServicePort::default() - }); - } - // if it is not specified, 2379 is used as xline port - let xline_port = xline_port.unwrap_or(ContainerPort { - name: Some(XLINE_PORT_NAME.to_owned()), - container_port: DEFAULT_XLINE_PORT, - ..ContainerPort::default() - }); - // if it is not specified, 2380 is used as sidecar port - let sidecar_port = sidecar_port.unwrap_or(ContainerPort { - name: Some(SIDECAR_PORT_NAME.to_owned()), - container_port: DEFAULT_SIDECAR_PORT, - ..ContainerPort::default() - }); - (xline_port, sidecar_port, service_ports) - } - - /// Extract persistent volume claims - fn extract_pvcs(cluster: &Arc) -> Result> { - let mut pvcs = Vec::new(); - // check if the backup type is PV, add the pvc to pvcs - if let Some(spec) = cluster.spec.backup.as_ref() { - if let StorageSpec::Pvc { pvc } = spec.storage.clone() { - pvcs.push(pvc); - } - } - // check if the data pvc if specified, add the pvc to pvcs - if let Some(pvc) = cluster.spec.data.as_ref() { - pvcs.push(pvc.clone()); - } - // extend the user defined pvcs - if let Some(spec_pvcs) = cluster.spec.pvcs.clone() { - if spec_pvcs - .iter() - .any(|pvc| pvc.name_any() == DATA_EMPTY_DIR_NAME) - { - return Err(Error::InvalidVolumeName(".spec.pvcs[].metadata.name")); - } - pvcs.extend(spec_pvcs); - } - Ok(pvcs) - } - - /// Extract owner reference - fn extract_owner_ref(cluster: &Arc) -> OwnerReference { - // unwrap controller_owner_ref is always safe - let Some(owner_ref) = cluster.controller_owner_ref(&()) else { unreachable!("kube-runtime has undergone some changes.") }; - owner_ref - } - - /// Extract name, namespace - fn extract_id(cluster: &Arc) -> Result<(&str, &str)> { - let namespace = cluster - .metadata - .namespace - .as_deref() - .ok_or(Error::MissingObject(".metadata.namespace"))?; - let name = cluster - .metadata - .name - .as_deref() - .ok_or(Error::MissingObject(".metadata.name"))?; - Ok((namespace, name)) - } - - /// Build the metadata which shares between all subresources - fn build_metadata(namespace: &str, name: &str, owner_ref: OwnerReference) -> ObjectMeta { - let mut labels: BTreeMap = BTreeMap::new(); - let _: Option<_> = labels.insert("app".to_owned(), name.to_owned()); - ObjectMeta { - labels: Some(labels.clone()), // it is used in selector - name: Some(name.to_owned()), // all subresources share the same name - namespace: Some(namespace.to_owned()), // all subresources share the same namespace - owner_references: Some(vec![owner_ref]), // allow k8s GC to automatically clean up itself - ..ObjectMeta::default() - } - } - - /// Apply headless service - async fn apply_headless_service( - &self, - namespace: &str, - name: &str, - metadata: &ObjectMeta, - service_ports: Vec, - ) -> Result<()> { - let api: Api = Api::namespaced(self.kube_client.clone(), namespace); - let _: Service = api - .patch( - name, - &PatchParams::apply(FIELD_MANAGER), - &Patch::Apply(Service { - metadata: metadata.clone(), - spec: Some(ServiceSpec { - cluster_ip: None, - ports: Some(service_ports), - selector: metadata.labels.clone(), - ..ServiceSpec::default() - }), - ..Service::default() - }), - ) - .await?; - Ok(()) - } - - /// Prepare container volume - fn prepare_container_volume( - cluster: &Arc, - mut container: Container, - ) -> Result<(Container, Option>)> { - let backup = cluster.spec.backup.clone(); - let data = cluster.spec.data.clone(); - let mut volumes = None; - // mount backup volume to `DEFAULT_BACKUP_PV_MOUNT_PATH` in container - let backup_mount = if let Some(spec) = backup { - let backup_pvc_name = match spec.storage { - StorageSpec::S3 { .. } => None, - StorageSpec::Pvc { pvc } => Some( - pvc.metadata - .name - .ok_or(Error::MissingObject(".spec.backup.pvc.metadata.name"))?, - ), - }; - if let Some(pvc_name) = backup_pvc_name { - if pvc_name == DATA_EMPTY_DIR_NAME { - return Err(Error::InvalidVolumeName(".spec.backup.metadata.name")); - } - Some(VolumeMount { - mount_path: DEFAULT_BACKUP_DIR.to_owned(), - name: pvc_name, - ..VolumeMount::default() - }) - } else { - None - } - } else { - None - }; - // mount data volume to `DEFAULT_DATA_DIR` in container - let data_mount = if let Some(pvc) = data { - let name = pvc - .metadata - .name - .ok_or(Error::MissingObject(".spec.data.metadata.name"))?; - if name == DATA_EMPTY_DIR_NAME { - return Err(Error::InvalidVolumeName(".spec.data.metadata.name")); - } - Some(VolumeMount { - mount_path: DEFAULT_DATA_DIR.to_owned(), - name, - ..VolumeMount::default() - }) - } else { - None - }; - let mut mounts = Vec::new(); - // check if the container has specified volume_mounts before - if let Some(spec_mounts) = container.volume_mounts { - // if the container mount the dir used in operator, return error - if spec_mounts - .iter() - .any(|mount| mount.mount_path.starts_with(DEFAULT_BACKUP_DIR)) - { - return Err(Error::CannotMount(DEFAULT_BACKUP_DIR)); - } - if spec_mounts - .iter() - .any(|mount| mount.mount_path.starts_with(DEFAULT_DATA_DIR)) - { - return Err(Error::CannotMount(DEFAULT_DATA_DIR)); - } - if spec_mounts - .iter() - .any(|mount| mount.name == DATA_EMPTY_DIR_NAME) - { - return Err(Error::InvalidVolumeName( - ".spec.container.volume_mounts[].name", - )); - } - // extend the mounts - mounts.extend(spec_mounts); - } - if let Some(mount) = backup_mount { - mounts.push(mount); - } - if let Some(mount) = data_mount { - mounts.push(mount); - } else { - // if data pv is not provided, then use emptyDir as volume - volumes = Some(vec![Volume { - name: DATA_EMPTY_DIR_NAME.to_owned(), - empty_dir: Some(EmptyDirVolumeSource::default()), - ..Volume::default() - }]); - mounts.push(VolumeMount { - mount_path: DEFAULT_DATA_DIR.to_owned(), - name: DATA_EMPTY_DIR_NAME.to_owned(), - ..VolumeMount::default() - }); - } - // override the container volume_mounts - container.volume_mounts = Some(mounts); - Ok((container, volumes)) - } - - /// Prepare container environment - fn prepare_container_env(mut container: Container) -> Container { - // to get pod unique name - let mut env = container.env.unwrap_or_default(); - env.push(EnvVar { - name: XLINE_POD_NAME_ENV.to_owned(), - value_from: Some(EnvVarSource { - field_ref: Some(ObjectFieldSelector { - field_path: "metadata.name".to_owned(), - ..ObjectFieldSelector::default() - }), - ..EnvVarSource::default() - }), - ..EnvVar::default() - }); - // override the pod environments - container.env = Some(env); - container - } - - /// Prepare container command - fn prepare_container_command(mut container: Container) -> Container { - // the main command should wait forever so that the sidecar could always contact the xline container - // so we use `tail -F /dev/null` here - container.command = Some( - "tail -F /dev/null" - .split_whitespace() - .map(ToOwned::to_owned) - .collect(), - ); - container - } - - /// Prepare the xline container provided by user - fn prepare_container(cluster: &Arc) -> Result<(Container, Option>)> { - let container = cluster.spec.container.clone(); - let (container, volumes) = Self::prepare_container_volume(cluster, container)?; - let container = Self::prepare_container_env(container); - let container = Self::prepare_container_command(container); - Ok((container, volumes)) - } - - /// Apply the statefulset in k8s to reconcile cluster - async fn apply_statefulset( + /// Apply resource + #[allow(clippy::expect_used)] // use expect rather than unwrap_or_else(|| unreachable()) + async fn apply_resource>( &self, - namespace: &str, - name: &str, - cluster: &Arc, - pvcs: Vec, - metadata: &ObjectMeta, - ) -> Result<()> { - let api: Api = Api::namespaced(self.kube_client.clone(), namespace); - let (container, volumes) = Self::prepare_container(cluster)?; - let _: StatefulSet = api + res: R, + ) -> Result<()> + where + R: Clone + DeserializeOwned + Debug + Serialize, + R::DynamicType: Default, + { + let namespace = res.meta().namespace.as_deref().expect("require namespace"); + let name = res.meta().name.clone().expect("require name"); + let api: Api = Api::namespaced(self.kube_client.clone(), namespace); + _ = api .patch( - name, + &name, &PatchParams::apply(FIELD_MANAGER), - &Patch::Apply(StatefulSet { - metadata: metadata.clone(), - spec: Some(StatefulSetSpec { - replicas: Some(cluster.spec.size), - selector: LabelSelector { - match_expressions: None, - match_labels: metadata.labels.clone(), - }, - service_name: name.to_owned(), - volume_claim_templates: Some(pvcs), - update_strategy: Some(StatefulSetUpdateStrategy { - rolling_update: Some(RollingUpdateStatefulSetStrategy { - max_unavailable: Some(IntOrString::String("50%".to_owned())), // allow a maximum of half the cluster quorum shutdown when performing a rolling update - partition: None, - }), - ..StatefulSetUpdateStrategy::default() - }), - template: PodTemplateSpec { - metadata: Some(ObjectMeta { - labels: metadata.labels.clone(), - ..ObjectMeta::default() - }), - spec: Some(PodSpec { - affinity: cluster.spec.affinity.clone(), - init_containers: Some(vec![]), // TODO publish sidecar operator to registry - containers: vec![container], // TODO inject the sidecar operator container here - volumes, - ..PodSpec::default() - }), - }, - ..StatefulSetSpec::default() - }), - ..StatefulSet::default() - }), - ) - .await?; - Ok(()) - } - - /// Apply the cron job to trigger backup - async fn apply_backup_cron_job( - &self, - namespace: &str, - name: &str, - size: i32, - cron: &str, - sidecar_port: &ContainerPort, - metadata: &ObjectMeta, - ) -> Result<()> { - let api: Api = Api::namespaced(self.kube_client.clone(), namespace); - let trigger_cmd = vec![ - "/bin/sh".to_owned(), - "-ecx".to_owned(), - format!( - "curl {name}-$((RANDOM % {size})).{name}.{namespace}.svc.{}:{}/backup", - self.cluster_suffix, sidecar_port.container_port - ), // choose a node randomly - ]; - let _: CronJob = api - .patch( - name, - &PatchParams::apply(FIELD_MANAGER), - &Patch::Apply(CronJob { - metadata: metadata.clone(), - spec: Some(CronJobSpec { - concurrency_policy: Some("Forbid".to_owned()), // A backup cron job cannot run concurrently - schedule: cron.to_owned(), - job_template: JobTemplateSpec { - spec: Some(JobSpec { - template: PodTemplateSpec { - spec: Some(PodSpec { - containers: vec![Container { - name: format!("{name}-backup-cronjob"), - image_pull_policy: Some("IfNotPresent".to_owned()), - image: Some(CRONJOB_IMAGE.to_owned()), - command: Some(trigger_cmd), - ..Container::default() - }], - restart_policy: Some("OnFailure".to_owned()), - ..PodSpec::default() - }), - ..PodTemplateSpec::default() - }, - ..JobSpec::default() - }), - ..JobTemplateSpec::default() - }, - ..CronJobSpec::default() - }), - ..CronJob::default() - }), + &Patch::Apply(res), ) .await?; Ok(()) @@ -468,7 +70,7 @@ impl ClusterController { #[async_trait] impl Controller for ClusterController { - type Error = Error; + type Error = kube::Error; type Metrics = ClusterMetrics; fn metrics(&self) -> &Self::Metrics { @@ -480,28 +82,12 @@ impl Controller for ClusterController { "Reconciling cluster: \n{}", serde_json::to_string_pretty(cluster.as_ref()).unwrap_or_default() ); - let (namespace, name) = Self::extract_id(cluster)?; - let owner_ref = Self::extract_owner_ref(cluster); - let pvcs = Self::extract_pvcs(cluster)?; - let (_xline_port, sidecar_port, service_ports) = Self::extract_ports(cluster); - let metadata = Self::build_metadata(namespace, name, owner_ref); + let factory = Factory::new(Arc::clone(cluster), &self.cluster_suffix); - self.apply_headless_service(namespace, name, &metadata, service_ports) - .await?; - self.apply_statefulset(namespace, name, cluster, pvcs, &metadata) - .await?; + self.apply_resource(factory.node_service()).await?; + // TODO wait service ready + self.apply_resource(factory.sts()).await?; - if let Some(spec) = cluster.spec.backup.as_ref() { - Box::pin(self.apply_backup_cron_job( - namespace, - name, - cluster.spec.size, - spec.cron.as_str(), - &sidecar_port, - &metadata, - )) - .await?; - } Ok(()) } diff --git a/operator-k8s/src/crd/mod.rs b/operator-k8s/src/crd/mod.rs index 87910eb4..4a403a89 100644 --- a/operator-k8s/src/crd/mod.rs +++ b/operator-k8s/src/crd/mod.rs @@ -1,10 +1,3 @@ -/// v1alpha -/// Features: -/// 1. Basic deployment -/// 2. Scale cluster -/// 3. Xline data PV -pub(crate) mod v1alpha; - /// v1alpha1 /// Features: /// 1. Xline sidecar diff --git a/operator-k8s/src/crd/v1alpha/cluster.rs b/operator-k8s/src/crd/v1alpha/cluster.rs deleted file mode 100644 index c31cb5fb..00000000 --- a/operator-k8s/src/crd/v1alpha/cluster.rs +++ /dev/null @@ -1,91 +0,0 @@ -// The `JsonSchema` and `CustomResource` macro generates codes that does not pass the clippy lint. -#![allow(clippy::str_to_string)] -#![allow(clippy::missing_docs_in_private_items)] - -#[cfg(test)] -use garde::Validate; -use k8s_openapi::api::core::v1::{Affinity, Container, PersistentVolumeClaim}; -use k8s_openapi::serde::{Deserialize, Serialize}; -use kube::CustomResource; -use schemars::JsonSchema; - -/// Xline cluster specification -#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)] -#[cfg_attr(test, derive(Validate))] -#[kube( - group = "xlineoperator.xline.cloud", - version = "v1alpha", - kind = "XlineCluster", - singular = "xlinecluster", - plural = "xlineclusters", - struct = "Cluster", - namespaced, - status = "ClusterStatus", - shortname = "xc", - scale = r#"{"specReplicasPath":".spec.size", "statusReplicasPath":".status.available"}"#, - printcolumn = r#"{"name":"Size", "type":"string", "description":"The cluster size", "jsonPath":".spec.size"}"#, - printcolumn = r#"{"name":"Age", "type":"date", "description":"The cluster age", "jsonPath":".metadata.creationTimestamp"}"# -)] -pub(crate) struct ClusterSpec { - /// Size of the xline cluster, less than 3 is not allowed - #[cfg_attr(test, garde(range(min = 3)))] - #[schemars(range(min = 3))] - pub(crate) size: i32, - /// Xline container specification - #[cfg_attr(test, garde(skip))] - pub(crate) container: Container, - /// The affinity of the xline node - #[cfg_attr(test, garde(skip))] - #[serde(skip_serializing_if = "Option::is_none")] - pub(crate) affinity: Option, - /// The data PVC, if it is not specified, then use emptyDir instead - #[cfg_attr(test, garde(skip))] - #[serde(skip_serializing_if = "Option::is_none")] - pub(crate) data: Option, - /// Some user defined persistent volume claim templates - #[cfg_attr(test, garde(skip))] - #[serde(skip_serializing_if = "Option::is_none")] - pub(crate) pvcs: Option>, -} - -/// Xline cluster status -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Default)] -pub(crate) struct ClusterStatus { - /// The available nodes' number in the cluster - pub(crate) available: i32, -} - -#[cfg(test)] -mod test { - use garde::Validate; - use k8s_openapi::api::core::v1::Container; - - use super::ClusterSpec; - - #[test] - fn validation_ok() { - let ok = ClusterSpec { - size: 3, - container: Container::default(), - affinity: None, - pvcs: None, - data: None, - }; - assert!(Validate::validate(&ok, &()).is_ok()); - } - - #[test] - fn validation_bad_size() { - let bad_size = ClusterSpec { - size: 1, - container: Container::default(), - affinity: None, - pvcs: None, - data: None, - }; - assert_eq!( - Validate::validate(&bad_size, &()).unwrap_err().flatten()[0].0, - "value.size" - ); - } -} diff --git a/operator-k8s/src/crd/v1alpha/mod.rs b/operator-k8s/src/crd/v1alpha/mod.rs deleted file mode 100644 index b0a1a7d0..00000000 --- a/operator-k8s/src/crd/v1alpha/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub(crate) use cluster::Cluster; - -mod cluster; diff --git a/operator-k8s/src/crd/v1alpha1/mod.rs b/operator-k8s/src/crd/v1alpha1/mod.rs index c2343934..b0a1a7d0 100644 --- a/operator-k8s/src/crd/v1alpha1/mod.rs +++ b/operator-k8s/src/crd/v1alpha1/mod.rs @@ -1,5 +1,3 @@ -#![allow(unused)] // TODO: remove when this CRD is used - -pub(crate) use cluster::{BackupSpec, Cluster, ClusterSpec, StorageSpec}; +pub(crate) use cluster::Cluster; mod cluster; diff --git a/operator-k8s/src/manager/cluster.rs b/operator-k8s/src/manager/cluster.rs index 5d38ac7b..ee63d545 100644 --- a/operator-k8s/src/manager/cluster.rs +++ b/operator-k8s/src/manager/cluster.rs @@ -1,24 +1,24 @@ -#![allow(unused)] // remove when implemented - -use crate::consts::{ - ANNOTATION_INHERIT_LABELS_PREFIX, DEFAULT_SIDECAR_PORT, DEFAULT_XLINE_PORT, - LABEL_CLUSTER_COMPONENT, LABEL_CLUSTER_NAME, LABEL_OPERATOR_VERSION, SIDECAR_PORT_NAME, - XLINE_POD_NAME_ENV, XLINE_PORT_NAME, -}; -use crate::crd::v1alpha1::{BackupSpec, Cluster, ClusterSpec, StorageSpec}; +#![allow(unused)] // TODO remove use std::collections::BTreeMap; use std::sync::Arc; +use k8s_openapi::api::apps::v1::{StatefulSet, StatefulSetSpec}; use k8s_openapi::api::core::v1::{ - Container, ContainerPort, EnvVar, EnvVarSource, GRPCAction, ObjectFieldSelector, - PersistentVolumeClaim, PersistentVolumeClaimVolumeSource, Pod, PodSpec, PodTemplateSpec, Probe, - Service, ServicePort, ServiceSpec, Volume, VolumeMount, + Container, ContainerPort, EnvVar, EnvVarSource, ObjectFieldSelector, PersistentVolumeClaim, + PodSpec, PodTemplateSpec, Service, ServicePort, ServiceSpec, VolumeMount, }; -use k8s_openapi::apimachinery::pkg::apis::meta::v1::{ObjectMeta, OwnerReference}; +use k8s_openapi::apimachinery::pkg::apis::meta::v1::{LabelSelector, ObjectMeta, OwnerReference}; use kube::{Resource, ResourceExt}; use utils::consts::{DEFAULT_BACKUP_DIR, DEFAULT_DATA_DIR}; +use crate::consts::{ + ANNOTATION_INHERIT_LABELS_PREFIX, DEFAULT_SIDECAR_PORT, DEFAULT_XLINE_PORT, + LABEL_CLUSTER_COMPONENT, LABEL_CLUSTER_NAME, LABEL_OPERATOR_VERSION, SIDECAR_PORT_NAME, + XLINE_POD_NAME_ENV, XLINE_PORT_NAME, +}; +use crate::crd::v1alpha1::Cluster; + /// Read objects from `XlineCluster` pub(crate) struct Extractor<'a> { /// `XlineCluster` @@ -29,7 +29,7 @@ pub(crate) struct Extractor<'a> { #[derive(Copy, Clone)] pub(crate) enum Component { /// A xline node - Node, + Nodes, /// A service Service, /// A backup job @@ -40,7 +40,7 @@ impl Component { /// Get the component name fn label(&self) -> &str { match *self { - Component::Node => "node", + Component::Nodes => "nodes", Component::Service => "srv", Component::BackupJob => "job", } @@ -59,7 +59,7 @@ impl<'a> Extractor<'a> { /// If the `XlineCluster` does not specified the xline ports (a port with name 'xline') or /// the sidecar ports (a port with name 'sidecar'), the default port (xline: 2379, sidecar: 2380) /// will be used. - fn extract_ports(&self) -> (ContainerPort, ContainerPort, Vec) { + pub(crate) fn extract_ports(&self) -> (ContainerPort, ContainerPort, Vec) { // expose all the container's ports let mut xline_port = None; let mut sidecar_port = None; @@ -121,7 +121,7 @@ impl<'a> Extractor<'a> { /// Extract all PVC templates /// The PVC template is used to create PVC for every pod - fn extract_pvc_template(&self) -> Vec { + pub(crate) fn extract_pvc_template(&self) -> Vec { self.cluster .spec .backup @@ -163,7 +163,6 @@ impl<'a> Extractor<'a> { /// Extract owner reference #[allow(clippy::expect_used)] // it is ok because xlinecluster always populated from the apiserver fn extract_owner_ref(&self) -> OwnerReference { - // unwrap controller_owner_ref is always safe self.cluster .controller_owner_ref(&()) .expect("metadata doesn't have name or uid") @@ -171,7 +170,7 @@ impl<'a> Extractor<'a> { /// Extract name, namespace #[allow(clippy::expect_used)] // it is ok because xlinecluster has field validation - fn extract_id(&self) -> (&str, &str) { + pub(crate) fn extract_id(&self) -> (&str, &str) { let name = self .cluster .metadata @@ -235,17 +234,24 @@ impl Factory { format!("{cluster_name}-{}", component.label()) } + /// Get the selector labels + fn selector_labels(name: &str, component: Component) -> BTreeMap { + BTreeMap::from([ + (LABEL_CLUSTER_NAME.to_owned(), name.to_owned()), + ( + LABEL_CLUSTER_COMPONENT.to_owned(), + component.label().to_owned(), + ), + ]) + } + /// Get the general metadata fn general_metadata(&self, component: Component) -> ObjectMeta { let extractor = Extractor::new(self.cluster.as_ref()); let mut labels = extractor.extract_inherit_labels(); let (name, namespace) = extractor.extract_id(); let owner_ref = extractor.extract_owner_ref(); - _ = labels.insert(LABEL_CLUSTER_NAME.to_owned(), name.to_owned()); - _ = labels.insert( - LABEL_CLUSTER_COMPONENT.to_owned(), - component.label().to_owned(), - ); + labels.extend(Self::selector_labels(name, component)); _ = labels.insert( LABEL_OPERATOR_VERSION.to_owned(), env!("CARGO_PKG_VERSION").to_owned(), @@ -260,7 +266,7 @@ impl Factory { } /// Get the node headless service - fn node_service(&self) -> Service { + pub(crate) fn node_service(&self) -> Service { let extractor = Extractor::new(self.cluster.as_ref()); let (_, _, service_ports) = extractor.extract_ports(); let (name, _) = extractor.extract_id(); @@ -274,7 +280,7 @@ impl Factory { (LABEL_CLUSTER_NAME.to_owned(), name.to_owned()), ( LABEL_CLUSTER_COMPONENT.to_owned(), - Component::Node.label().to_owned(), + Component::Nodes.label().to_owned(), ), ] .into(), @@ -286,7 +292,6 @@ impl Factory { } /// Mount the additional volumes on the container - #[allow(clippy::unused_self)] fn mount_volume_on_container(&self, container: &mut Container) { let extractor = Extractor::new(self.cluster.as_ref()); let volume_mount = extractor.extract_additional_volume_mount(); @@ -297,14 +302,15 @@ impl Factory { } /// Set the entrypoint of the container - fn set_command(&self, container: &mut Container, size: usize) { + fn set_command(&self, container: &mut Container) { + let size = self.cluster.spec.size; let extractor = Extractor::new(self.cluster.as_ref()); let (name, namespace) = extractor.extract_id(); let (xline_port, _, _) = extractor.extract_ports(); let srv_name = Self::component_name(name, Component::Service); let mut members = vec![]; for i in 0..=size { - let node_name = format!("{}-{i}", Self::component_name(name, Component::Node)); + let node_name = format!("{}-{i}", Self::component_name(name, Component::Nodes)); members.push(format!( "{node_name}={node_name}.{srv_name}.{namespace}.svc.{}:{}", self.cluster_suffix, xline_port.container_port @@ -320,10 +326,10 @@ impl Factory { } /// Get the xline container - fn xline_container(&self, size: usize) -> Container { + fn xline_container(&self) -> Container { let mut container = self.cluster.spec.container.clone(); self.mount_volume_on_container(&mut container); - self.set_command(&mut container, size); + self.set_command(&mut container); // we need to set the env variable to get the pod name in the container container.env = Some(vec![EnvVar { name: XLINE_POD_NAME_ENV.to_owned(), @@ -340,12 +346,16 @@ impl Factory { } /// Get the node pod - fn pod_spec(&self, size: usize) -> PodTemplateSpec { + pub(crate) fn pod_spec(&self) -> PodTemplateSpec { let extractor = Extractor::new(self.cluster.as_ref()); let (name, _) = extractor.extract_id(); - let xline = self.xline_container(size); + let xline = self.xline_container(); + let labels = Self::selector_labels(name, Component::Nodes); PodTemplateSpec { - metadata: Some(self.general_metadata(Component::Node)), + metadata: Some(ObjectMeta { + labels: Some(labels), + ..ObjectMeta::default() + }), spec: Some(PodSpec { init_containers: Some(vec![]), containers: vec![xline], @@ -354,13 +364,34 @@ impl Factory { }), } } + + /// Get the statefulset + pub(crate) fn sts(&self) -> StatefulSet { + let size = self.cluster.spec.size; + let extractor = Extractor::new(self.cluster.as_ref()); + let (name, _) = extractor.extract_id(); + let labels = Self::selector_labels(name, Component::Nodes); + StatefulSet { + metadata: self.general_metadata(Component::Nodes), + spec: Some(StatefulSetSpec { + replicas: Some(size), + selector: LabelSelector { + match_expressions: None, + match_labels: Some(labels), + }, + service_name: Self::component_name(name, Component::Service), + volume_claim_templates: Some(extractor.extract_pvc_template()), + template: self.pod_spec(), + ..StatefulSetSpec::default() + }), + status: None, + } + } } #[cfg(test)] mod tests { use super::*; - use k8s_openapi::api::core::v1::{Affinity, NodeAffinity, PersistentVolumeClaimSpec}; - use kube::CustomResourceExt; static CLUSTER_1: &str = r#" apiVersion: xlineoperator.xline.cloud/v1alpha @@ -586,8 +617,8 @@ spec: #[test] fn factory_component_name_should_work() { assert_eq!( - Factory::component_name("my-xline-cluster", Component::Node), - "my-xline-cluster-node" + Factory::component_name("my-xline-cluster", Component::Nodes), + "my-xline-cluster-nodes" ); assert_eq!( Factory::component_name("my-xline-cluster", Component::Service), @@ -598,4 +629,79 @@ spec: "my-xline-cluster-job" ); } + + #[test] + fn factory_general_metadata_should_work() { + let cluster_1_metadata = r#" +labels: + app: my-xline-cluster + appNamespace: default + xlinecluster/component: nodes + xlinecluster/name: my-xline-cluster + xlinecluster/operator-version: 0.1.0 +name: my-xline-cluster-nodes +namespace: default +ownerReferences: +- apiVersion: xlineoperator.xline.cloud/v1alpha1 + controller: true + kind: XlineCluster + name: my-xline-cluster + uid: this-is-a-random-uid + "# + .trim(); + + let cluster_other_metadata = r#" +labels: + xlinecluster/component: nodes + xlinecluster/name: my-xline-cluster + xlinecluster/operator-version: 0.1.0 +name: my-xline-cluster-nodes +namespace: default +ownerReferences: +- apiVersion: xlineoperator.xline.cloud/v1alpha1 + controller: true + kind: XlineCluster + name: my-xline-cluster + uid: this-is-a-random-uid + "# + .trim(); + + for (cluster_raw, metadata_str) in [ + (CLUSTER_1, cluster_1_metadata), + (CLUSTER_2, cluster_other_metadata), + (CLUSTER_3, cluster_other_metadata), + (CLUSTER_4, cluster_other_metadata), + ] { + let mut cluster: Cluster = serde_yaml::from_str(cluster_raw).unwrap(); + after_apiserver(&mut cluster); + let factory = Factory::new(Arc::new(cluster), "cluster.local"); + let metadata = factory.general_metadata(Component::Nodes); + let outputs = serde_yaml::to_string(&metadata).unwrap(); + assert_eq!(outputs.trim(), metadata_str); + } + } + + #[test] + fn factory_node_service_should_work() { + let spec = r#" +spec: + ports: + - name: xline + port: 2379 + - name: sidecar + port: 2380 + selector: + xlinecluster/component: nodes + xlinecluster/name: my-xline-cluster + "# + .trim(); + for cluster_raw in [CLUSTER_1, CLUSTER_3, CLUSTER_4] { + let mut cluster: Cluster = serde_yaml::from_str(cluster_raw).unwrap(); + after_apiserver(&mut cluster); + let factory = Factory::new(Arc::new(cluster), "cluster.local"); + let service = factory.node_service(); + let outputs = serde_yaml::to_string(&service).unwrap(); + assert!(outputs.contains(spec)); + } + } } diff --git a/operator-k8s/src/manager/mod.rs b/operator-k8s/src/manager/mod.rs index d273d9a2..ed8a5d29 100644 --- a/operator-k8s/src/manager/mod.rs +++ b/operator-k8s/src/manager/mod.rs @@ -1,2 +1,2 @@ /// `XlineCluster` manager -mod cluster; +pub(crate) mod cluster; diff --git a/tests/e2e/cases/ci.sh b/tests/e2e/cases/ci.sh index 968a6b82..9e303b83 100644 --- a/tests/e2e/cases/ci.sh +++ b/tests/e2e/cases/ci.sh @@ -8,12 +8,13 @@ _TEST_CI_OPERATOR_NAME="my-xline-operator" _TEST_CI_DNS_SUFFIX="cluster.local" _TEST_CI_NAMESPACE="default" _TEST_CI_XLINE_PORT="2379" -_TEST_CI_LOG_SYNC_TIMEOUT=60 +_TEST_CI_LOG_SYNC_TIMEOUT=30 +_TEST_CI_START_SIZE=3 function test::ci::_mk_endpoints() { - local endpoints="${_TEST_CI_CLUSTER_NAME}-0.${_TEST_CI_CLUSTER_NAME}.${_TEST_CI_NAMESPACE}.svc.${_TEST_CI_DNS_SUFFIX}:${_TEST_CI_XLINE_PORT}" + local endpoints="${_TEST_CI_CLUSTER_NAME}-nodes-0.${_TEST_CI_CLUSTER_NAME}-srv.${_TEST_CI_NAMESPACE}.svc.${_TEST_CI_DNS_SUFFIX}:${_TEST_CI_XLINE_PORT}" for ((i = 1; i < $1; i++)); do - endpoints="${endpoints},${_TEST_CI_CLUSTER_NAME}-${i}.${_TEST_CI_CLUSTER_NAME}.${_TEST_CI_NAMESPACE}.svc.${_TEST_CI_DNS_SUFFIX}:${_TEST_CI_XLINE_PORT}" + endpoints="${endpoints},${_TEST_CI_CLUSTER_NAME}-nodes-${i}.${_TEST_CI_CLUSTER_NAME}-srv.${_TEST_CI_NAMESPACE}.svc.${_TEST_CI_DNS_SUFFIX}:${_TEST_CI_XLINE_PORT}" done echo "$endpoints" } @@ -39,9 +40,9 @@ function test::ci::_start() { k8s::kubectl wait --for=condition=available deployment/$_TEST_CI_OPERATOR_NAME --timeout=300s >/dev/null 2>&1 k8s::kubectl::wait_resource_creation crd xlineclusters.xlineoperator.xline.cloud k8s::kubectl apply -f "$(dirname "${BASH_SOURCE[0]}")/manifests/cluster.yml" >/dev/null 2>&1 - k8s::kubectl::wait_resource_creation sts $_TEST_CI_CLUSTER_NAME - k8s::kubectl wait --for=jsonpath='{.status.updatedReplicas}'=3 sts/$_TEST_CI_CLUSTER_NAME --timeout=300s >/dev/null 2>&1 - k8s::kubectl wait --for=jsonpath='{.status.readyReplicas}'=3 sts/$_TEST_CI_CLUSTER_NAME --timeout=300s >/dev/null 2>&1 + k8s::kubectl::wait_resource_creation sts "${_TEST_CI_CLUSTER_NAME}-nodes" + k8s::kubectl wait --for=jsonpath='{.status.updatedReplicas}'=$_TEST_CI_START_SIZE sts "${_TEST_CI_CLUSTER_NAME}-nodes" --timeout=300s >/dev/null 2>&1 + k8s::kubectl wait --for=jsonpath='{.status.readyReplicas}'=$_TEST_CI_START_SIZE sts "${_TEST_CI_CLUSTER_NAME}-nodes" --timeout=300s >/dev/null 2>&1 log::info "cluster started" } @@ -56,8 +57,8 @@ function test::ci::_teardown() { function test::ci::_scale_cluster() { log::info "scaling cluster to $1" k8s::kubectl scale xc $_TEST_CI_CLUSTER_NAME --replicas="$1" >/dev/null 2>&1 - k8s::kubectl wait --for=jsonpath='{.status.updatedReplicas}'="$1" sts/$_TEST_CI_CLUSTER_NAME --timeout=300s >/dev/null 2>&1 - k8s::kubectl wait --for=jsonpath='{.status.readyReplicas}'="$1" sts/$_TEST_CI_CLUSTER_NAME --timeout=300s >/dev/null 2>&1 + k8s::kubectl wait --for=jsonpath='{.status.updatedReplicas}'="$1" sts "${_TEST_CI_CLUSTER_NAME}-nodes" --timeout=300s >/dev/null 2>&1 + k8s::kubectl wait --for=jsonpath='{.status.readyReplicas}'="$1" sts "${_TEST_CI_CLUSTER_NAME}-nodes" --timeout=300s >/dev/null 2>&1 got=$(k8s::kubectl get xc $_TEST_CI_CLUSTER_NAME -o=jsonpath='{.spec.size}') if [ "$got" -ne "$1" ]; then echo "failed scale cluster" @@ -81,13 +82,13 @@ function test::ci::_chaos() { kill=$((RANDOM % max_kill + 1)) log::info "chaos: kill=$kill" for ((j = 0; j < kill; j++)); do - pod="${_TEST_CI_CLUSTER_NAME}-$((RANDOM % size))" + pod="${_TEST_CI_CLUSTER_NAME}-nodes-$((RANDOM % size))" log::info "chaos: kill pod=$pod" k8s::kubectl delete pod "$pod" --force --grace-period=0 2>/dev/null done test::ci::_etcdctl_expect "$endpoints" "put B $i" "OK" || return $? test::ci::_etcdctl_expect "$endpoints" "get B" "B\n$i" || return $? - k8s::kubectl wait --for=jsonpath='{.status.readyReplicas}'="$size" sts/$_TEST_CI_CLUSTER_NAME --timeout=300s >/dev/null 2>&1 + k8s::kubectl wait --for=jsonpath='{.status.readyReplicas}'="$size" sts "${_TEST_CI_CLUSTER_NAME}-nodes" --timeout=300s >/dev/null 2>&1 log::info "wait for log synchronization" && sleep $_TEST_CI_LOG_SYNC_TIMEOUT done } diff --git a/tests/e2e/cases/manifests/cluster.yml b/tests/e2e/cases/manifests/cluster.yml index 0193a1e7..b1403eb8 100644 --- a/tests/e2e/cases/manifests/cluster.yml +++ b/tests/e2e/cases/manifests/cluster.yml @@ -1,11 +1,11 @@ -apiVersion: xlineoperator.xline.cloud/v1alpha +apiVersion: xlineoperator.xline.cloud/v1alpha1 kind: XlineCluster metadata: name: my-xline-cluster spec: size: 3 container: - image: "datenlord/xline:latest" + image: "ghcr.io/xline-kv/xline:latest" imagePullPolicy: IfNotPresent # we will try to load image into cluster first. name: "my-xline" ports: diff --git a/tests/e2e/cases/manifests/operators.yml b/tests/e2e/cases/manifests/operators.yml index 85992bd9..0e845757 100644 --- a/tests/e2e/cases/manifests/operators.yml +++ b/tests/e2e/cases/manifests/operators.yml @@ -17,5 +17,10 @@ spec: spec: containers: - name: xline-operator - image: datenlord/xline-operator:latest + image: xline-kv/xline-operator:latest + command: + - xline-operator + args: + - --auto-migration + - --create-crd imagePullPolicy: Never diff --git a/tests/e2e/testenv/testenv.sh b/tests/e2e/testenv/testenv.sh index 3f251031..db51783b 100644 --- a/tests/e2e/testenv/testenv.sh +++ b/tests/e2e/testenv/testenv.sh @@ -15,11 +15,11 @@ function testenv::k8s::delete() { function testenv::k8s::load_images() { # xline image - xline_image="${XLINE_IMAGE:-datenlord/xline:latest}" + xline_image="ghcr.io/xline-kv/xline:latest" docker pull "$xline_image" >/dev/null testenv::k8s::kind::load_image "$xline_image" # xline operator image, this needs to be built first - testenv::k8s::kind::load_image datenlord/xline-operator:latest + testenv::k8s::kind::load_image xline-kv/xline-operator:latest # etcdctl image docker pull gcr.io/etcd-development/etcd:v3.5.5 >/dev/null testenv::k8s::kind::load_image gcr.io/etcd-development/etcd:v3.5.5 From 6cd530aeb0d7d34e5adf29ba974e7fbbe9bf9185 Mon Sep 17 00:00:00 2001 From: iGxnon Date: Thu, 12 Oct 2023 22:00:59 +0800 Subject: [PATCH 11/11] doc: add kustomize manifest Signed-off-by: iGxnon --- .pre-commit-config.yaml | 1 - Cargo.lock | 189 +- manifest/crd/xline-operator.yml | 7677 +++++++++++++++++++++++ manifest/rbac/crd-migration.yml | 30 + manifest/rbac/kustomization.yml | 4 + manifest/rbac/namespaced-controller.yml | 46 + manifest/rbac/service-account.yaml | 5 + 7 files changed, 7887 insertions(+), 65 deletions(-) create mode 100644 manifest/crd/xline-operator.yml create mode 100644 manifest/rbac/crd-migration.yml create mode 100644 manifest/rbac/kustomization.yml create mode 100644 manifest/rbac/namespaced-controller.yml create mode 100644 manifest/rbac/service-account.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ea3a46ac..dd574d3f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -8,7 +8,6 @@ repos: - id: check-case-conflict - id: check-merge-conflict - id: check-symlinks - - id: check-yaml - id: check-toml - id: end-of-file-fixer - id: mixed-line-ending diff --git a/Cargo.lock b/Cargo.lock index 87dba6c1..0d207efd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -149,14 +149,14 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.32", ] [[package]] name = "async-task" -version = "4.4.0" +version = "4.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc7ab41815b3c653ccd2978ec3255c81349336702dfdf62ee6f7069b12a3aae" +checksum = "b9441c6b2fe128a7c2bf680a44c34d0df31ce09e5b7e401fcca3faa483dbc921" [[package]] name = "async-trait" @@ -166,7 +166,7 @@ checksum = "7b2d0f03b3640e3a630367e40c468cb7f309529c708ed1d88597047b0e7c6ef7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.32", ] [[package]] @@ -306,13 +306,13 @@ dependencies = [ "lazy_static", "lazycell", "peeking_take_while", - "prettyplease 0.2.12", + "prettyplease 0.2.15", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "syn 2.0.23", + "syn 2.0.32", ] [[package]] @@ -354,9 +354,9 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "byteorder" -version = "1.4.3" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" @@ -485,7 +485,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.32", ] [[package]] @@ -526,9 +526,9 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "concurrent-queue" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" +checksum = "f057a694a54f12365049b0958a1685bb52d567f5593b355fbf685838e873d400" dependencies = [ "crossbeam-utils", ] @@ -558,6 +558,31 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "crossbeam-epoch" +version = "0.9.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +dependencies = [ + "autocfg", + "cfg-if", + "crossbeam-utils", + "memoffset", + "scopeguard", +] + +[[package]] +name = "crossbeam-skiplist" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "883a5821d7d079fcf34ac55f27a833ee61678110f6b97637cc74513c0d0b42fc" +dependencies = [ + "cfg-if", + "crossbeam-epoch", + "crossbeam-utils", + "scopeguard", +] + [[package]] name = "crossbeam-utils" version = "0.8.16" @@ -580,7 +605,7 @@ dependencies = [ [[package]] name = "curp" version = "0.1.0" -source = "git+https://github.com/xline-kv/Xline.git#fadc65670fb6cf5ca2540fe4a25c8f1b4df1b766" +source = "git+https://github.com/xline-kv/Xline.git#b9e4660debf4feb06458bd604473a509e299cfd7" dependencies = [ "async-stream", "async-trait", @@ -605,6 +630,7 @@ dependencies = [ "parking_lot", "prost", "prost-build", + "rand", "serde", "thiserror", "tokio-stream 0.1.12", @@ -612,18 +638,19 @@ dependencies = [ "tracing", "tracing-opentelemetry", "utils 0.1.0 (git+https://github.com/xline-kv/Xline.git)", - "uuid", ] [[package]] name = "curp-external-api" version = "0.1.0" -source = "git+https://github.com/xline-kv/Xline.git#fadc65670fb6cf5ca2540fe4a25c8f1b4df1b766" +source = "git+https://github.com/xline-kv/Xline.git#b9e4660debf4feb06458bd604473a509e299cfd7" dependencies = [ "async-trait", "engine", "mockall", + "prost", "serde", + "thiserror", ] [[package]] @@ -671,7 +698,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.23", + "syn 2.0.32", ] [[package]] @@ -693,14 +720,14 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core 0.20.3", "quote", - "syn 2.0.23", + "syn 2.0.32", ] [[package]] name = "dashmap" -version = "5.5.1" +version = "5.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edd72493923899c6f10c641bdbdeddc7183d6396641d99c1a0d1597f37f92e28" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ "cfg-if", "hashbrown 0.14.0", @@ -825,7 +852,7 @@ checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" [[package]] name = "engine" version = "0.1.0" -source = "git+https://github.com/xline-kv/Xline.git#fadc65670fb6cf5ca2540fe4a25c8f1b4df1b766" +source = "git+https://github.com/xline-kv/Xline.git#b9e4660debf4feb06458bd604473a509e299cfd7" dependencies = [ "async-trait", "bincode", @@ -890,9 +917,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "fastrand" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "fixedbitset" @@ -1014,7 +1041,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.32", ] [[package]] @@ -1067,7 +1094,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.23", + "syn 2.0.32", ] [[package]] @@ -1422,9 +1449,9 @@ checksum = "62b02a5381cc465bd3041d84623d0fa3b66738b52b8e2fc3bab8ad63ab032f4a" [[package]] name = "jobserver" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" +checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" dependencies = [ "libc", ] @@ -1822,6 +1849,15 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +[[package]] +name = "memoffset" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" +dependencies = [ + "autocfg", +] + [[package]] name = "merged_range" version = "0.1.0" @@ -1960,9 +1996,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" dependencies = [ "autocfg", ] @@ -2021,7 +2057,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.32", ] [[package]] @@ -2263,7 +2299,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.32", ] [[package]] @@ -2304,7 +2340,7 @@ checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.32", ] [[package]] @@ -2373,12 +2409,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.12" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" +checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" dependencies = [ "proc-macro2", - "syn 2.0.23", + "syn 2.0.32", ] [[package]] @@ -2424,6 +2460,21 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "prometheus" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" +dependencies = [ + "cfg-if", + "fnv", + "lazy_static", + "memchr", + "parking_lot", + "protobuf", + "thiserror", +] + [[package]] name = "prost" version = "0.11.9" @@ -2478,6 +2529,12 @@ dependencies = [ "prost", ] +[[package]] +name = "protobuf" +version = "2.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" + [[package]] name = "quote" version = "1.0.29" @@ -2700,9 +2757,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.166" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d01b7404f9d441d3ad40e6a636a7782c377d2abdbe4fa2440e2edcc2f4f10db8" +checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" dependencies = [ "serde_derive", ] @@ -2719,13 +2776,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.166" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dd83d6dde2b6b2d466e14d9d1acce8816dedee94f735eac6395808b3483c6d6" +checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.32", ] [[package]] @@ -2807,7 +2864,7 @@ dependencies = [ "darling 0.20.3", "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.32", ] [[package]] @@ -2845,9 +2902,9 @@ dependencies = [ [[package]] name = "shlex" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" +checksum = "a7cee0529a6d40f580e7a5e6c495c8fbfe21b7b52795ed4bb5e62cdf92bc6380" [[package]] name = "signal-hook-registry" @@ -2935,9 +2992,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.23" +version = "2.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59fb7d6d8281a51045d62b8eb3a7d1ce347b76f312af50cd3dc0af39c87c1737" +checksum = "239814284fd6f1a4ffe4ca893952cdd93c224b6a1571c9a9eadd670295c0c9e2" dependencies = [ "proc-macro2", "quote", @@ -2981,9 +3038,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" +checksum = "6093bad37da69aab9d123a8091e4be0aa4a03e4d601ec641c327398315f62b64" dependencies = [ "winapi-util", ] @@ -3017,7 +3074,7 @@ checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.32", ] [[package]] @@ -3054,9 +3111,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.26" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a79d09ac6b08c1ab3906a2f7cc2e81a0e27c7ae89c63812df75e52bef0751e07" +checksum = "426f806f4089c493dcac0d24c29c01e2c38baf8e30f1b716ee37e83d200b18fe" dependencies = [ "deranged", "itoa", @@ -3067,15 +3124,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.12" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75c65469ed6b3a4809d987a41eb1dc918e9bc1d92211cbad7ae82931846f7451" +checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20" dependencies = [ "time-core", ] @@ -3132,7 +3189,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.32", ] [[package]] @@ -3365,7 +3422,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.32", ] [[package]] @@ -3559,11 +3616,12 @@ dependencies = [ [[package]] name = "utils" version = "0.1.0" -source = "git+https://github.com/xline-kv/Xline.git#fadc65670fb6cf5ca2540fe4a25c8f1b4df1b766" +source = "git+https://github.com/xline-kv/Xline.git#b9e4660debf4feb06458bd604473a509e299cfd7" dependencies = [ "clippy-utilities 0.2.0", "derive_builder", "getset", + "madsim-tokio", "madsim-tonic", "opentelemetry", "parking_lot", @@ -3648,7 +3706,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.32", "wasm-bindgen-shared", ] @@ -3670,7 +3728,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.32", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -3820,7 +3878,7 @@ dependencies = [ [[package]] name = "xline" version = "0.4.1" -source = "git+https://github.com/xline-kv/Xline.git#fadc65670fb6cf5ca2540fe4a25c8f1b4df1b766" +source = "git+https://github.com/xline-kv/Xline.git#b9e4660debf4feb06458bd604473a509e299cfd7" dependencies = [ "anyhow", "async-stream", @@ -3828,12 +3886,14 @@ dependencies = [ "bytes", "clap 3.2.25", "clippy-utilities 0.1.0", + "crossbeam-skiplist", "curp", "engine", "event-listener", "flume", "futures", "getset", + "hyper", "itertools", "jsonwebtoken", "log", @@ -3868,7 +3928,7 @@ dependencies = [ [[package]] name = "xline-client" version = "0.1.0" -source = "git+https://github.com/xline-kv/Xline.git#fadc65670fb6cf5ca2540fe4a25c8f1b4df1b766" +source = "git+https://github.com/xline-kv/Xline.git#b9e4660debf4feb06458bd604473a509e299cfd7" dependencies = [ "async-stream", "clippy-utilities 0.1.0", @@ -3883,7 +3943,6 @@ dependencies = [ "thiserror", "tower", "utils 0.1.0 (git+https://github.com/xline-kv/Xline.git)", - "uuid", "xline", "xlineapi", ] @@ -3903,7 +3962,9 @@ dependencies = [ "garde", "k8s-openapi", "kube", + "lazy_static", "operator-api", + "prometheus", "schemars", "serde", "serde_json", @@ -3945,8 +4006,9 @@ dependencies = [ [[package]] name = "xlineapi" version = "0.1.0" -source = "git+https://github.com/xline-kv/Xline.git#fadc65670fb6cf5ca2540fe4a25c8f1b4df1b766" +source = "git+https://github.com/xline-kv/Xline.git#b9e4660debf4feb06458bd604473a509e299cfd7" dependencies = [ + "curp-external-api", "madsim-etcd-client", "madsim-tonic", "madsim-tonic-build", @@ -3962,11 +4024,10 @@ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" [[package]] name = "zstd-sys" -version = "2.0.8+zstd.1.5.5" +version = "2.0.9+zstd.1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" +checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" dependencies = [ "cc", - "libc", "pkg-config", ] diff --git a/manifest/crd/xline-operator.yml b/manifest/crd/xline-operator.yml new file mode 100644 index 00000000..e8103e37 --- /dev/null +++ b/manifest/crd/xline-operator.yml @@ -0,0 +1,7677 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: xlineclusters.xlineoperator.xline.cloud +spec: + conversion: + strategy: None + group: xlineoperator.xline.cloud + names: + kind: XlineCluster + listKind: XlineClusterList + plural: xlineclusters + shortNames: + - xc + singular: xlinecluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The cluster size + jsonPath: .spec.size + name: Size + type: string + - description: The available amount + jsonPath: .status.available + name: Available + type: string + - description: The cron spec defining the interval a backup CronJob is run + jsonPath: .spec.backup.cron + name: Backup Cron + type: string + - description: The cluster age + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Auto-generated derived type for ClusterSpec via `CustomResource` + properties: + spec: + description: Xline cluster specification + properties: + affinity: + description: The affinity of the xline node + nullable: true + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |+ + Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |+ + Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDead. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |+ + Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |+ + Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDead. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDead. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDead. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDead. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDead. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDead. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied to the + union of the namespaces selected by this field and + the ones listed in the namespaces field. null selector + and null or empty namespaces list means "this pod's + namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDead. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDead. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace + names that the term applies to. The term is applied + to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. null or + empty namespaces list and null namespaceSelector means + "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDead. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDead. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDead. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDead. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDead. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDead. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied to the + union of the namespaces selected by this field and + the ones listed in the namespaces field. null selector + and null or empty namespaces list means "this pod's + namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDead. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDead. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace + names that the term applies to. The term is applied + to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. null or + empty namespaces list and null namespaceSelector means + "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + backup: + anyOf: + - required: + - s3 + - required: + - pvc + description: Backup specification + nullable: true + properties: + cron: + description: Cron Spec + pattern: '^(?:\*|[0-5]?\d)(?:[-/,]?(?:\*|[0-5]?\d))*(?: +(?:\*|1?[0-9]|2[0-3])(?:[-/,]?(?:\*|1?[0-9]|2[0-3]))*){4}$' + type: string + pvc: + description: Persistent volume backup type + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST + resource this object represents. Servers may infer this + from the endpoint the client submits requests to. Cannot + be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value + map stored with a resource that may be set by external + tools to store and retrieve arbitrary metadata. They + are not queryable and should be preserved when modifying + objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + creationTimestamp: + description: |- + CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + + Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + format: date-time + type: string + deletionGracePeriodSeconds: + description: Number of seconds allowed for this object + to gracefully terminate before it will be removed from + the system. Only set when deletionTimestamp is also + set. May only be shortened. Read-only. + format: int64 + type: integer + deletionTimestamp: + description: |- + DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. + + Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + format: date-time + type: string + finalizers: + description: Must be empty before the object is deleted + from the registry. Each entry is an identifier for the + responsible component that will remove the entry from + the list. If the deletionTimestamp of the object is + non-nil, entries in this list can only be removed. Finalizers + may be processed and removed in any order. Order is + NOT enforced because it introduces significant risk + of stuck finalizers. finalizers is a shared field, any + actor with permission can reorder it. If the finalizer + list is processed in order, then this can lead to a + situation in which the component responsible for the + first finalizer in the list is waiting for a signal + (field value, external system, or other) produced by + a component responsible for a finalizer later in the + list, resulting in a deadlock. Without enforced ordering + finalizers are free to order amongst themselves and + are not vulnerable to ordering changes in the list. + items: + type: string + type: array + generateName: + description: |- + GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. + + If this field is specified and the generated name exists, the server will return a 409. + + Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency + type: string + generation: + description: A sequence number representing a specific + generation of the desired state. Populated by the system. + Read-only. + format: int64 + type: integer + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be + used to organize and categorize (scope and select) objects. + May match selectors of replication controllers and services. + More info: http://kubernetes.io/docs/user-guide/labels' + type: object + managedFields: + description: ManagedFields maps workflow-id and version + to the set of fields that are managed by that workflow. + This is mostly for internal housekeeping, and users + typically shouldn't need to set or understand this field. + A workflow can be the user's name, a controller's name, + or the name of a specific apply path like "ci-cd". The + set of fields is always in the version that the workflow + used when modifying the object. + items: + description: ManagedFieldsEntry is a workflow-id, a + FieldSet and the group version of the resource that + the fieldset applies to. + properties: + apiVersion: + description: APIVersion defines the version of this + resource that this field set applies to. The format + is "group/version" just like the top-level APIVersion + field. It is necessary to track the version of + a field set because it cannot be automatically + converted. + type: string + fieldsType: + description: 'FieldsType is the discriminator for + the different fields format and version. There + is currently only one possible value: "FieldsV1"' + type: string + fieldsV1: + description: FieldsV1 holds the first JSON version + format as described in the "FieldsV1" type. + type: object + manager: + description: Manager is an identifier of the workflow + managing these fields. + type: string + operation: + description: Operation is the type of operation + which lead to this ManagedFieldsEntry being created. + The only valid values for this field are 'Apply' + and 'Update'. + type: string + subresource: + description: Subresource is the name of the subresource + used to update that object, or empty string if + the object was updated through the main resource. + The value of this field is used to distinguish + between managers, even if they share the same + name. For example, a status update will be distinct + from a regular update using the same manager name. + Note that the APIVersion field is not related + to the Subresource field and it always corresponds + to the version of the main resource. + type: string + time: + description: Time is the timestamp of when the ManagedFields + entry was added. The timestamp will also be updated + if a field is added, the manager changes any of + the owned fields value or removes a field. The + timestamp does not update when a field is removed + from the entry because another manager took it + over. + format: date-time + type: string + type: object + type: array + name: + description: 'Name must be unique within a namespace. + Is required when creating resources, although some resources + may allow a client to request the generation of an appropriate + name automatically. Name is primarily intended for creation + idempotence and configuration definition. Cannot be + updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: |- + Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. + + Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces + type: string + ownerReferences: + description: List of objects depended by this object. + If ALL objects in the list have been deleted, this object + will be garbage collected. If this object is managed + by a controller, then an entry in this list will point + to this controller, with the controller field set to + true. There cannot be more than one managing controller. + items: + description: OwnerReference contains enough information + to let you identify an owning object. An owning object + must be in the same namespace as the dependent, or + be cluster-scoped, so there is no namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" + finalizer, then the owner cannot be deleted from + the key-value store until this reference is removed. + See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion + for how the garbage collector interacts with this + field and enforces the foreground deletion. Defaults + to false. To set this field, a user needs "delete" + permission of the owner, otherwise 422 (Unprocessable + Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the + managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + resourceVersion: + description: |- + An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. + + Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + selfLink: + description: 'Deprecated: selfLink is a legacy read-only + field that is no longer populated by the system.' + type: string + uid: + description: |- + UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. + + Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids + type: string + type: object + spec: + description: 'spec defines the desired characteristics of + a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'accessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the provisioner + or an external controller can support the specified + data source, it will create a new volume based on the + contents of the specified data source. When the AnyVolumeDataSource + feature gate is enabled, dataSource contents will be + copied to dataSourceRef, and dataSourceRef contents + will be copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, then + dataSourceRef will not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace is specified, + a gateway.networking.k8s.io/ReferenceGrant object + is required in the referent namespace to allow that + namespace's owner to accept the reference. See the + ReferenceGrant documentation for details. (Alpha) + This field requires the CrossNamespaceVolumeDataSource + feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify resource + requirements that are lower than previous value but + must still be higher than capacity recorded in the status + field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one + entry in pod.spec.resourceClaims of the Pod + where this field is used. It makes that resource + available inside a container. + type: string + required: + - name + type: object + type: array + limits: + additionalProperties: + description: "Quantity is a fixed-point representation + of a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and + AsInt64() accessors.\n\nThe serialization format + is:\n\n``` ::= \n\n\t(Note + that may be empty, from the \"\" case + in .)\n\n ::= 0 | + 1 | ... | 9 ::= | + ::= | . + | . | . ::= + \"+\" | \"-\" ::= | + ::= + | | ::= + Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International + System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose + the capitalization.)\n\n ::= + \"e\" | \"E\" ```\n\nNo + matter which of the three exponent forms is used, + no quantity may represent a number greater than + 2^63-1 in magnitude, nor may it have more than + 3 decimal places. Numbers larger or more precise + will be capped or rounded up. (E.g.: 0.1m will + rounded up to 1m.) This may be extended in the + future if we require larger or smaller quantities.\n\nWhen + a Quantity is parsed from a string, it will remember + the type of suffix it had, and will use the same + type again when it is serialized.\n\nBefore serializing, + Quantity will be put in \"canonical form\". This + means that Exponent/suffix will be adjusted up + or down (with a corresponding increase or decrease + in Mantissa) such that:\n\n- No precision is lost + - No fractional digits will be emitted - The exponent + (or suffix) is as large as possible.\n\nThe sign + will be omitted unless the number is negative.\n\nExamples:\n\n- + 1.5 will be serialized as \"1500m\" - 1.5Gi will + be serialized as \"1536Mi\"\n\nNote that the quantity + will NEVER be internally represented by a floating + point number. That is the whole point of this + exercise.\n\nNon-canonical values will still parse + as long as they are well formed, but will be re-emitted + in their canonical form. (So always use canonical + form, or don't diff.)\n\nThis format is intended + to make it difficult to use these numbers without + writing some sort of special handling code in + the hopes that that will cause implementors to + also use a fixed point implementation." + type: string + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + description: "Quantity is a fixed-point representation + of a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and + AsInt64() accessors.\n\nThe serialization format + is:\n\n``` ::= \n\n\t(Note + that may be empty, from the \"\" case + in .)\n\n ::= 0 | + 1 | ... | 9 ::= | + ::= | . + | . | . ::= + \"+\" | \"-\" ::= | + ::= + | | ::= + Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International + System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose + the capitalization.)\n\n ::= + \"e\" | \"E\" ```\n\nNo + matter which of the three exponent forms is used, + no quantity may represent a number greater than + 2^63-1 in magnitude, nor may it have more than + 3 decimal places. Numbers larger or more precise + will be capped or rounded up. (E.g.: 0.1m will + rounded up to 1m.) This may be extended in the + future if we require larger or smaller quantities.\n\nWhen + a Quantity is parsed from a string, it will remember + the type of suffix it had, and will use the same + type again when it is serialized.\n\nBefore serializing, + Quantity will be put in \"canonical form\". This + means that Exponent/suffix will be adjusted up + or down (with a corresponding increase or decrease + in Mantissa) such that:\n\n- No precision is lost + - No fractional digits will be emitted - The exponent + (or suffix) is as large as possible.\n\nThe sign + will be omitted unless the number is negative.\n\nExamples:\n\n- + 1.5 will be serialized as \"1500m\" - 1.5Gi will + be serialized as \"1536Mi\"\n\nNote that the quantity + will NEVER be internally represented by a floating + point number. That is the whole point of this + exercise.\n\nNon-canonical values will still parse + as long as they are well formed, but will be re-emitted + in their canonical form. (So always use canonical + form, or don't diff.)\n\nThis format is intended + to make it difficult to use these numbers without + writing some sort of special handling code in + the hopes that that will cause implementors to + also use a fixed point implementation." + type: string + description: 'Requests describes the minimum amount + of compute resources required. If Requests is omitted + for a container, it defaults to Limits if that is + explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDead. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDead. + type: object + type: object + storageClassName: + description: 'storageClassName is the name of the StorageClass + required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is + required by the claim. Value of Filesystem is implied + when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + status: + description: 'status represents the current information/status + of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'accessModes contains the actual access modes + the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + allocatedResources: + additionalProperties: + description: "Quantity is a fixed-point representation + of a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and AsInt64() + accessors.\n\nThe serialization format is:\n\n``` + ::= \n\n\t(Note + that may be empty, from the \"\" case in + .)\n\n ::= 0 | 1 | ... + | 9 ::= | + ::= | . + | . | . ::= \"+\" + | \"-\" ::= | + ::= | + | ::= Ki | Mi | Gi | + Ti | Pi | Ei\n\n\t(International System of units; + See: http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose the + capitalization.)\n\n ::= \"e\" + | \"E\" ```\n\nNo matter which of the + three exponent forms is used, no quantity may represent + a number greater than 2^63-1 in magnitude, nor may + it have more than 3 decimal places. Numbers larger + or more precise will be capped or rounded up. (E.g.: + 0.1m will rounded up to 1m.) This may be extended + in the future if we require larger or smaller quantities.\n\nWhen + a Quantity is parsed from a string, it will remember + the type of suffix it had, and will use the same type + again when it is serialized.\n\nBefore serializing, + Quantity will be put in \"canonical form\". This means + that Exponent/suffix will be adjusted up or down (with + a corresponding increase or decrease in Mantissa) + such that:\n\n- No precision is lost - No fractional + digits will be emitted - The exponent (or suffix) + is as large as possible.\n\nThe sign will be omitted + unless the number is negative.\n\nExamples:\n\n- 1.5 + will be serialized as \"1500m\" - 1.5Gi will be serialized + as \"1536Mi\"\n\nNote that the quantity will NEVER + be internally represented by a floating point number. + That is the whole point of this exercise.\n\nNon-canonical + values will still parse as long as they are well formed, + but will be re-emitted in their canonical form. (So + always use canonical form, or don't diff.)\n\nThis + format is intended to make it difficult to use these + numbers without writing some sort of special handling + code in the hopes that that will cause implementors + to also use a fixed point implementation." + type: string + description: allocatedResources is the storage resource + within AllocatedResources tracks the capacity allocated + to a PVC. It may be larger than the actual capacity + when a volume expansion operation is requested. For + storage quota, the larger value from allocatedResources + and PVC.spec.resources is used. If allocatedResources + is not set, PVC.spec.resources alone is used for quota + calculation. If a volume expansion capacity request + is lowered, allocatedResources is only lowered if there + are no expansion operations in progress and if the actual + volume capacity is equal or lower than the requested + capacity. This is an alpha field and requires enabling + RecoverVolumeExpansionFailure feature. + type: object + capacity: + additionalProperties: + description: "Quantity is a fixed-point representation + of a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and AsInt64() + accessors.\n\nThe serialization format is:\n\n``` + ::= \n\n\t(Note + that may be empty, from the \"\" case in + .)\n\n ::= 0 | 1 | ... + | 9 ::= | + ::= | . + | . | . ::= \"+\" + | \"-\" ::= | + ::= | + | ::= Ki | Mi | Gi | + Ti | Pi | Ei\n\n\t(International System of units; + See: http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose the + capitalization.)\n\n ::= \"e\" + | \"E\" ```\n\nNo matter which of the + three exponent forms is used, no quantity may represent + a number greater than 2^63-1 in magnitude, nor may + it have more than 3 decimal places. Numbers larger + or more precise will be capped or rounded up. (E.g.: + 0.1m will rounded up to 1m.) This may be extended + in the future if we require larger or smaller quantities.\n\nWhen + a Quantity is parsed from a string, it will remember + the type of suffix it had, and will use the same type + again when it is serialized.\n\nBefore serializing, + Quantity will be put in \"canonical form\". This means + that Exponent/suffix will be adjusted up or down (with + a corresponding increase or decrease in Mantissa) + such that:\n\n- No precision is lost - No fractional + digits will be emitted - The exponent (or suffix) + is as large as possible.\n\nThe sign will be omitted + unless the number is negative.\n\nExamples:\n\n- 1.5 + will be serialized as \"1500m\" - 1.5Gi will be serialized + as \"1536Mi\"\n\nNote that the quantity will NEVER + be internally represented by a floating point number. + That is the whole point of this exercise.\n\nNon-canonical + values will still parse as long as they are well formed, + but will be re-emitted in their canonical form. (So + always use canonical form, or don't diff.)\n\nThis + format is intended to make it difficult to use these + numbers without writing some sort of special handling + code in the hopes that that will cause implementors + to also use a fixed point implementation." + type: string + description: capacity represents the actual resources + of the underlying volume. + type: object + conditions: + description: conditions is the current Condition of persistent + volume claim. If underlying persistent volume is being + resized then the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contains + details about state of pvc + properties: + lastProbeTime: + description: lastProbeTime is the time we probed + the condition. + format: date-time + type: string + lastTransitionTime: + description: lastTransitionTime is the time the + condition transitioned from one status to another. + format: date-time + type: string + message: + description: message is the human-readable message + indicating details about last transition. + type: string + reason: + description: reason is a unique, this should be + a short, machine understandable string that gives + the reason for condition's last transition. If + it reports "ResizeStarted" that means the underlying + persistent volume is being resized. + type: string + status: + type: string + type: + type: string + required: + - status + - type + type: object + type: array + phase: + description: |+ + phase represents the current phase of PersistentVolumeClaim. + + type: string + resizeStatus: + description: resizeStatus stores status of resize operation. + ResizeStatus is not set by default but when expansion + is complete resizeStatus is set to empty string by resize + controller or kubelet. This is an alpha field and requires + enabling RecoverVolumeExpansionFailure feature. + type: string + type: object + required: + - metadata + type: object + s3: + description: S3 backup type + properties: + bucket: + description: S3 bucket name to use for backup + pattern: ^[a-z0-9][a-z0-9-]{1,61}[a-z0-9]$ + type: string + required: + - bucket + type: object + required: + - cron + type: object + container: + description: Xline container specification + properties: + args: + description: 'Arguments to the entrypoint. The container image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will be + unchanged. Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The + container image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: + i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether + the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies the output format of the + exposed resources, defaults to "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be a + C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key will + take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of + ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: |+ + Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + + type: string + lifecycle: + description: Actions that the management system should take in + response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + format: int-or-string + type: string + scheme: + description: |+ + Scheme to use for connecting to the host. Defaults to HTTP. + + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as + a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + format: int-or-string + type: string + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The Pod''s termination grace period countdown + begins before the PreStop hook is executed. Regardless of + the outcome of the handler, the container will eventually + terminate within the Pod''s termination grace period (unless + delayed by finalizers). Other management of the container + blocks until the hook completes or until the termination + grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + format: int-or-string + type: string + scheme: + description: |+ + Scheme to use for connecting to the host. Defaults to HTTP. + + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as + a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + format: int-or-string + type: string + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is a beta field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number must + be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + format: int-or-string + type: string + scheme: + description: |+ + Scheme to use for connecting to the host. Defaults to HTTP. + + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + format: int-or-string + type: string + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to + terminate gracefully upon probe failure. The grace period + is the duration in seconds after the processes running in + the pod are sent a termination signal and the time when + the processes are forcibly halted with a kill signal. Set + this value longer than the expected cleanup time for your + process. If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides the value + provided by the pod spec. Value must be non-negative integer. + The value zero indicates stop immediately via the kill signal + (no opportunity to shut down). This is a beta field and + requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is + used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each + container in a pod must have a unique name (DNS_LABEL). Cannot + be updated. + type: string + ports: + description: List of ports to expose from the container. Not specifying + a port here DOES NOT prevent that port from being exposed. Any + port which is listening on the default "0.0.0.0" address inside + a container will be accessible from the network. Modifying this + array with strategic merge patch may corrupt the data. For more + information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in a single + container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, + this must be a valid port number, 0 < x < 65536. If HostNetwork + is specified, this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod must + have a unique name. Name for the port that can be referred + to by services. + type: string + protocol: + description: |+ + Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + description: 'Periodic probe of container service readiness. Container + will be removed from service endpoints if the probe fails. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is a beta field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number must + be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + format: int-or-string + type: string + scheme: + description: |+ + Scheme to use for connecting to the host. Defaults to HTTP. + + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + format: int-or-string + type: string + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to + terminate gracefully upon probe failure. The grace period + is the duration in seconds after the processes running in + the pod are sent a termination signal and the time when + the processes are forcibly halted with a kill signal. Set + this value longer than the expected cleanup time for your + process. If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides the value + provided by the pod spec. Value must be non-negative integer. + The value zero indicates stop immediately via the kill signal + (no opportunity to shut down). This is a beta field and + requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is + used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + limits: + additionalProperties: + description: "Quantity is a fixed-point representation of + a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and AsInt64() + accessors.\n\nThe serialization format is:\n\n``` + \ ::= \n\n\t(Note that + may be empty, from the \"\" case in .)\n\n + \ ::= 0 | 1 | ... | 9 ::= + | ::= | . + | . | . ::= \"+\" | + \"-\" ::= | + ::= | | + ::= Ki | Mi | Gi | Ti | + Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n + ::= \"e\" | \"E\" ```\n\nNo + matter which of the three exponent forms is used, no quantity + may represent a number greater than 2^63-1 in magnitude, + nor may it have more than 3 decimal places. Numbers larger + or more precise will be capped or rounded up. (E.g.: 0.1m + will rounded up to 1m.) This may be extended in the future + if we require larger or smaller quantities.\n\nWhen a + Quantity is parsed from a string, it will remember the + type of suffix it had, and will use the same type again + when it is serialized.\n\nBefore serializing, Quantity + will be put in \"canonical form\". This means that Exponent/suffix + will be adjusted up or down (with a corresponding increase + or decrease in Mantissa) such that:\n\n- No precision + is lost - No fractional digits will be emitted - The exponent + (or suffix) is as large as possible.\n\nThe sign will + be omitted unless the number is negative.\n\nExamples:\n\n- + 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized + as \"1536Mi\"\n\nNote that the quantity will NEVER be + internally represented by a floating point number. That + is the whole point of this exercise.\n\nNon-canonical + values will still parse as long as they are well formed, + but will be re-emitted in their canonical form. (So always + use canonical form, or don't diff.)\n\nThis format is + intended to make it difficult to use these numbers without + writing some sort of special handling code in the hopes + that that will cause implementors to also use a fixed + point implementation." + type: string + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + description: "Quantity is a fixed-point representation of + a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and AsInt64() + accessors.\n\nThe serialization format is:\n\n``` + \ ::= \n\n\t(Note that + may be empty, from the \"\" case in .)\n\n + \ ::= 0 | 1 | ... | 9 ::= + | ::= | . + | . | . ::= \"+\" | + \"-\" ::= | + ::= | | + ::= Ki | Mi | Gi | Ti | + Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n + ::= \"e\" | \"E\" ```\n\nNo + matter which of the three exponent forms is used, no quantity + may represent a number greater than 2^63-1 in magnitude, + nor may it have more than 3 decimal places. Numbers larger + or more precise will be capped or rounded up. (E.g.: 0.1m + will rounded up to 1m.) This may be extended in the future + if we require larger or smaller quantities.\n\nWhen a + Quantity is parsed from a string, it will remember the + type of suffix it had, and will use the same type again + when it is serialized.\n\nBefore serializing, Quantity + will be put in \"canonical form\". This means that Exponent/suffix + will be adjusted up or down (with a corresponding increase + or decrease in Mantissa) such that:\n\n- No precision + is lost - No fractional digits will be emitted - The exponent + (or suffix) is as large as possible.\n\nThe sign will + be omitted unless the number is negative.\n\nExamples:\n\n- + 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized + as \"1536Mi\"\n\nNote that the quantity will NEVER be + internally represented by a floating point number. That + is the whole point of this exercise.\n\nNon-canonical + values will still parse as long as they are well formed, + but will be re-emitted in their canonical form. (So always + use canonical form, or don't diff.)\n\nThis format is + intended to make it difficult to use these numbers without + writing some sort of special handling code in the hopes + that that will cause implementors to also use a fixed + point implementation." + type: string + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the + container should be run with. If set, the fields of SecurityContext + override the equivalent fields of PodSecurityContext. More info: + https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a + process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN Note that this field cannot be set + when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the + container runtime. Note that this field cannot be set when + spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + type: string + type: array + drop: + description: Removed capabilities + items: + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in + privileged containers are essentially equivalent to root + on the host. Defaults to false. Note that this field cannot + be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use + for the containers. The default is DefaultProcMount which + uses the container runtime defaults for readonly paths and + masked paths. This requires the ProcMountType feature flag + to be enabled. Note that this field cannot be set when spec.os.name + is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. Note that this field cannot be set when + spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. Note + that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must + be preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: |+ + type indicates which kind of seccomp profile will be applied. Valid options are: + + Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. + + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is + linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is + alpha-level and will only be honored by components that + enable the WindowsHostProcessContainers feature flag. + Setting this field without the feature flag will result + in errors when validating the Pod. All of a Pod's containers + must have the same effective HostProcess value (it is + not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod will + be restarted, just as if the livenessProbe failed. This can + be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. This + cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is a beta field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number must + be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + format: int-or-string + type: string + scheme: + description: |+ + Scheme to use for connecting to the host. Defaults to HTTP. + + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + format: int-or-string + type: string + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to + terminate gracefully upon probe failure. The grace period + is the duration in seconds after the processes running in + the pod are sent a termination signal and the time when + the processes are forcibly halted with a kill signal. Set + this value longer than the expected cleanup time for your + process. If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides the value + provided by the pod spec. Value must be non-negative integer. + The value zero indicates stop immediately via the kill signal + (no opportunity to shut down). This is a beta field and + requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is + used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for + stdin in the container runtime. If this is not set, reads from + stdin in the container will always result in EOF. Default is + false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin + channel after it has been opened by a single attach. When stdin + is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container + start, is empty until the first client attaches to stdin, and + then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container + is restarted. If this flag is false, a container processes that + reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s + termination message will be written is mounted into the container''s + filesystem. Message written is intended to be brief final status, + such as an assertion failure message. Will be truncated by the + node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. Defaults to /dev/termination-log. + Cannot be updated.' + type: string + terminationMessagePolicy: + description: |+ + Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might be + configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + data: + description: The data PVC, if it is not specified, then use emptyDir + instead + nullable: true + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map + stored with a resource that may be set by external tools + to store and retrieve arbitrary metadata. They are not queryable + and should be preserved when modifying objects. More info: + http://kubernetes.io/docs/user-guide/annotations' + type: object + creationTimestamp: + description: |- + CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + + Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + format: date-time + type: string + deletionGracePeriodSeconds: + description: Number of seconds allowed for this object to + gracefully terminate before it will be removed from the + system. Only set when deletionTimestamp is also set. May + only be shortened. Read-only. + format: int64 + type: integer + deletionTimestamp: + description: |- + DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. + + Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + format: date-time + type: string + finalizers: + description: Must be empty before the object is deleted from + the registry. Each entry is an identifier for the responsible + component that will remove the entry from the list. If the + deletionTimestamp of the object is non-nil, entries in this + list can only be removed. Finalizers may be processed and + removed in any order. Order is NOT enforced because it + introduces significant risk of stuck finalizers. finalizers + is a shared field, any actor with permission can reorder + it. If the finalizer list is processed in order, then this + can lead to a situation in which the component responsible + for the first finalizer in the list is waiting for a signal + (field value, external system, or other) produced by a component + responsible for a finalizer later in the list, resulting + in a deadlock. Without enforced ordering finalizers are + free to order amongst themselves and are not vulnerable + to ordering changes in the list. + items: + type: string + type: array + generateName: + description: |- + GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. + + If this field is specified and the generated name exists, the server will return a 409. + + Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency + type: string + generation: + description: A sequence number representing a specific generation + of the desired state. Populated by the system. Read-only. + format: int64 + type: integer + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used + to organize and categorize (scope and select) objects. May + match selectors of replication controllers and services. + More info: http://kubernetes.io/docs/user-guide/labels' + type: object + managedFields: + description: ManagedFields maps workflow-id and version to + the set of fields that are managed by that workflow. This + is mostly for internal housekeeping, and users typically + shouldn't need to set or understand this field. A workflow + can be the user's name, a controller's name, or the name + of a specific apply path like "ci-cd". The set of fields + is always in the version that the workflow used when modifying + the object. + items: + description: ManagedFieldsEntry is a workflow-id, a FieldSet + and the group version of the resource that the fieldset + applies to. + properties: + apiVersion: + description: APIVersion defines the version of this + resource that this field set applies to. The format + is "group/version" just like the top-level APIVersion + field. It is necessary to track the version of a field + set because it cannot be automatically converted. + type: string + fieldsType: + description: 'FieldsType is the discriminator for the + different fields format and version. There is currently + only one possible value: "FieldsV1"' + type: string + fieldsV1: + description: FieldsV1 holds the first JSON version format + as described in the "FieldsV1" type. + type: object + manager: + description: Manager is an identifier of the workflow + managing these fields. + type: string + operation: + description: Operation is the type of operation which + lead to this ManagedFieldsEntry being created. The + only valid values for this field are 'Apply' and 'Update'. + type: string + subresource: + description: Subresource is the name of the subresource + used to update that object, or empty string if the + object was updated through the main resource. The + value of this field is used to distinguish between + managers, even if they share the same name. For example, + a status update will be distinct from a regular update + using the same manager name. Note that the APIVersion + field is not related to the Subresource field and + it always corresponds to the version of the main resource. + type: string + time: + description: Time is the timestamp of when the ManagedFields + entry was added. The timestamp will also be updated + if a field is added, the manager changes any of the + owned fields value or removes a field. The timestamp + does not update when a field is removed from the entry + because another manager took it over. + format: date-time + type: string + type: object + type: array + name: + description: 'Name must be unique within a namespace. Is required + when creating resources, although some resources may allow + a client to request the generation of an appropriate name + automatically. Name is primarily intended for creation idempotence + and configuration definition. Cannot be updated. More info: + http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: |- + Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. + + Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces + type: string + ownerReferences: + description: List of objects depended by this object. If ALL + objects in the list have been deleted, this object will + be garbage collected. If this object is managed by a controller, + then an entry in this list will point to this controller, + with the controller field set to true. There cannot be more + than one managing controller. + items: + description: OwnerReference contains enough information + to let you identify an owning object. An owning object + must be in the same namespace as the dependent, or be + cluster-scoped, so there is no namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" + finalizer, then the owner cannot be deleted from the + key-value store until this reference is removed. See + https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion + for how the garbage collector interacts with this + field and enforces the foreground deletion. Defaults + to false. To set this field, a user needs "delete" + permission of the owner, otherwise 422 (Unprocessable + Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing + controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + resourceVersion: + description: |- + An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. + + Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + selfLink: + description: 'Deprecated: selfLink is a legacy read-only field + that is no longer populated by the system.' + type: string + uid: + description: |- + UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. + + Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids + type: string + type: object + spec: + description: 'spec defines the desired characteristics of a volume + requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'accessModes contains the desired access modes + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the provisioner + or an external controller can support the specified data + source, it will create a new volume based on the contents + of the specified data source. When the AnyVolumeDataSource + feature gate is enabled, dataSource contents will be copied + to dataSourceRef, and dataSourceRef contents will be copied + to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not + be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource being + referenced. If APIGroup is not specified, the specified + Kind must be in the core API group. For any other third-party + types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: APIGroup is the group for the resource being + referenced. If APIGroup is not specified, the specified + Kind must be in the core API group. For any other third-party + types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: Namespace is the namespace of resource being + referenced Note that when a namespace is specified, + a gateway.networking.k8s.io/ReferenceGrant object is + required in the referent namespace to allow that namespace's + owner to accept the reference. See the ReferenceGrant + documentation for details. (Alpha) This field requires + the CrossNamespaceVolumeDataSource feature gate to be + enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources the + volume should have. If RecoverVolumeExpansionFailure feature + is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher + than capacity recorded in the status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + limits: + additionalProperties: + description: "Quantity is a fixed-point representation + of a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and AsInt64() + accessors.\n\nThe serialization format is:\n\n``` + ::= \n\n\t(Note + that may be empty, from the \"\" case in + .)\n\n ::= 0 | 1 | ... + | 9 ::= | + ::= | . + | . | . ::= \"+\" + | \"-\" ::= | + ::= | + | ::= Ki | Mi | Gi | + Ti | Pi | Ei\n\n\t(International System of units; + See: http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose the + capitalization.)\n\n ::= \"e\" + | \"E\" ```\n\nNo matter which of the + three exponent forms is used, no quantity may represent + a number greater than 2^63-1 in magnitude, nor may + it have more than 3 decimal places. Numbers larger + or more precise will be capped or rounded up. (E.g.: + 0.1m will rounded up to 1m.) This may be extended + in the future if we require larger or smaller quantities.\n\nWhen + a Quantity is parsed from a string, it will remember + the type of suffix it had, and will use the same type + again when it is serialized.\n\nBefore serializing, + Quantity will be put in \"canonical form\". This means + that Exponent/suffix will be adjusted up or down (with + a corresponding increase or decrease in Mantissa) + such that:\n\n- No precision is lost - No fractional + digits will be emitted - The exponent (or suffix) + is as large as possible.\n\nThe sign will be omitted + unless the number is negative.\n\nExamples:\n\n- 1.5 + will be serialized as \"1500m\" - 1.5Gi will be serialized + as \"1536Mi\"\n\nNote that the quantity will NEVER + be internally represented by a floating point number. + That is the whole point of this exercise.\n\nNon-canonical + values will still parse as long as they are well formed, + but will be re-emitted in their canonical form. (So + always use canonical form, or don't diff.)\n\nThis + format is intended to make it difficult to use these + numbers without writing some sort of special handling + code in the hopes that that will cause implementors + to also use a fixed point implementation." + type: string + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + description: "Quantity is a fixed-point representation + of a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and AsInt64() + accessors.\n\nThe serialization format is:\n\n``` + ::= \n\n\t(Note + that may be empty, from the \"\" case in + .)\n\n ::= 0 | 1 | ... + | 9 ::= | + ::= | . + | . | . ::= \"+\" + | \"-\" ::= | + ::= | + | ::= Ki | Mi | Gi | + Ti | Pi | Ei\n\n\t(International System of units; + See: http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose the + capitalization.)\n\n ::= \"e\" + | \"E\" ```\n\nNo matter which of the + three exponent forms is used, no quantity may represent + a number greater than 2^63-1 in magnitude, nor may + it have more than 3 decimal places. Numbers larger + or more precise will be capped or rounded up. (E.g.: + 0.1m will rounded up to 1m.) This may be extended + in the future if we require larger or smaller quantities.\n\nWhen + a Quantity is parsed from a string, it will remember + the type of suffix it had, and will use the same type + again when it is serialized.\n\nBefore serializing, + Quantity will be put in \"canonical form\". This means + that Exponent/suffix will be adjusted up or down (with + a corresponding increase or decrease in Mantissa) + such that:\n\n- No precision is lost - No fractional + digits will be emitted - The exponent (or suffix) + is as large as possible.\n\nThe sign will be omitted + unless the number is negative.\n\nExamples:\n\n- 1.5 + will be serialized as \"1500m\" - 1.5Gi will be serialized + as \"1536Mi\"\n\nNote that the quantity will NEVER + be internally represented by a floating point number. + That is the whole point of this exercise.\n\nNon-canonical + values will still parse as long as they are well formed, + but will be re-emitted in their canonical form. (So + always use canonical form, or don't diff.)\n\nThis + format is intended to make it difficult to use these + numbers without writing some sort of special handling + code in the hopes that that will cause implementors + to also use a fixed point implementation." + type: string + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDead. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDead. + type: object + type: object + storageClassName: + description: 'storageClassName is the name of the StorageClass + required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required + by the claim. Value of Filesystem is implied when not included + in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + status: + description: 'status represents the current information/status + of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'accessModes contains the actual access modes + the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + allocatedResources: + additionalProperties: + description: "Quantity is a fixed-point representation of + a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and AsInt64() + accessors.\n\nThe serialization format is:\n\n``` + \ ::= \n\n\t(Note that + may be empty, from the \"\" case in .)\n\n + \ ::= 0 | 1 | ... | 9 ::= + | ::= | . + | . | . ::= \"+\" | + \"-\" ::= | + ::= | | + ::= Ki | Mi | Gi | Ti | + Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n + ::= \"e\" | \"E\" ```\n\nNo + matter which of the three exponent forms is used, no quantity + may represent a number greater than 2^63-1 in magnitude, + nor may it have more than 3 decimal places. Numbers larger + or more precise will be capped or rounded up. (E.g.: 0.1m + will rounded up to 1m.) This may be extended in the future + if we require larger or smaller quantities.\n\nWhen a + Quantity is parsed from a string, it will remember the + type of suffix it had, and will use the same type again + when it is serialized.\n\nBefore serializing, Quantity + will be put in \"canonical form\". This means that Exponent/suffix + will be adjusted up or down (with a corresponding increase + or decrease in Mantissa) such that:\n\n- No precision + is lost - No fractional digits will be emitted - The exponent + (or suffix) is as large as possible.\n\nThe sign will + be omitted unless the number is negative.\n\nExamples:\n\n- + 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized + as \"1536Mi\"\n\nNote that the quantity will NEVER be + internally represented by a floating point number. That + is the whole point of this exercise.\n\nNon-canonical + values will still parse as long as they are well formed, + but will be re-emitted in their canonical form. (So always + use canonical form, or don't diff.)\n\nThis format is + intended to make it difficult to use these numbers without + writing some sort of special handling code in the hopes + that that will cause implementors to also use a fixed + point implementation." + type: string + description: allocatedResources is the storage resource within + AllocatedResources tracks the capacity allocated to a PVC. + It may be larger than the actual capacity when a volume + expansion operation is requested. For storage quota, the + larger value from allocatedResources and PVC.spec.resources + is used. If allocatedResources is not set, PVC.spec.resources + alone is used for quota calculation. If a volume expansion + capacity request is lowered, allocatedResources is only + lowered if there are no expansion operations in progress + and if the actual volume capacity is equal or lower than + the requested capacity. This is an alpha field and requires + enabling RecoverVolumeExpansionFailure feature. + type: object + capacity: + additionalProperties: + description: "Quantity is a fixed-point representation of + a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and AsInt64() + accessors.\n\nThe serialization format is:\n\n``` + \ ::= \n\n\t(Note that + may be empty, from the \"\" case in .)\n\n + \ ::= 0 | 1 | ... | 9 ::= + | ::= | . + | . | . ::= \"+\" | + \"-\" ::= | + ::= | | + ::= Ki | Mi | Gi | Ti | + Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n + ::= \"e\" | \"E\" ```\n\nNo + matter which of the three exponent forms is used, no quantity + may represent a number greater than 2^63-1 in magnitude, + nor may it have more than 3 decimal places. Numbers larger + or more precise will be capped or rounded up. (E.g.: 0.1m + will rounded up to 1m.) This may be extended in the future + if we require larger or smaller quantities.\n\nWhen a + Quantity is parsed from a string, it will remember the + type of suffix it had, and will use the same type again + when it is serialized.\n\nBefore serializing, Quantity + will be put in \"canonical form\". This means that Exponent/suffix + will be adjusted up or down (with a corresponding increase + or decrease in Mantissa) such that:\n\n- No precision + is lost - No fractional digits will be emitted - The exponent + (or suffix) is as large as possible.\n\nThe sign will + be omitted unless the number is negative.\n\nExamples:\n\n- + 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized + as \"1536Mi\"\n\nNote that the quantity will NEVER be + internally represented by a floating point number. That + is the whole point of this exercise.\n\nNon-canonical + values will still parse as long as they are well formed, + but will be re-emitted in their canonical form. (So always + use canonical form, or don't diff.)\n\nThis format is + intended to make it difficult to use these numbers without + writing some sort of special handling code in the hopes + that that will cause implementors to also use a fixed + point implementation." + type: string + description: capacity represents the actual resources of the + underlying volume. + type: object + conditions: + description: conditions is the current Condition of persistent + volume claim. If underlying persistent volume is being resized + then the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contains details + about state of pvc + properties: + lastProbeTime: + description: lastProbeTime is the time we probed the + condition. + format: date-time + type: string + lastTransitionTime: + description: lastTransitionTime is the time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: message is the human-readable message indicating + details about last transition. + type: string + reason: + description: reason is a unique, this should be a short, + machine understandable string that gives the reason + for condition's last transition. If it reports "ResizeStarted" + that means the underlying persistent volume is being + resized. + type: string + status: + type: string + type: + type: string + required: + - status + - type + type: object + type: array + phase: + description: |+ + phase represents the current phase of PersistentVolumeClaim. + + type: string + resizeStatus: + description: resizeStatus stores status of resize operation. + ResizeStatus is not set by default but when expansion is + complete resizeStatus is set to empty string by resize controller + or kubelet. This is an alpha field and requires enabling + RecoverVolumeExpansionFailure feature. + type: string + type: object + required: + - metadata + type: object + pvcs: + description: Some user defined persistent volume claim templates + items: + description: PersistentVolumeClaim is a user's request for and claim + to a persistent volume + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map + stored with a resource that may be set by external tools + to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + creationTimestamp: + description: |- + CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + + Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + format: date-time + type: string + deletionGracePeriodSeconds: + description: Number of seconds allowed for this object to + gracefully terminate before it will be removed from the + system. Only set when deletionTimestamp is also set. May + only be shortened. Read-only. + format: int64 + type: integer + deletionTimestamp: + description: |- + DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. + + Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + format: date-time + type: string + finalizers: + description: Must be empty before the object is deleted + from the registry. Each entry is an identifier for the + responsible component that will remove the entry from + the list. If the deletionTimestamp of the object is non-nil, + entries in this list can only be removed. Finalizers may + be processed and removed in any order. Order is NOT enforced + because it introduces significant risk of stuck finalizers. + finalizers is a shared field, any actor with permission + can reorder it. If the finalizer list is processed in + order, then this can lead to a situation in which the + component responsible for the first finalizer in the list + is waiting for a signal (field value, external system, + or other) produced by a component responsible for a finalizer + later in the list, resulting in a deadlock. Without enforced + ordering finalizers are free to order amongst themselves + and are not vulnerable to ordering changes in the list. + items: + type: string + type: array + generateName: + description: |- + GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. + + If this field is specified and the generated name exists, the server will return a 409. + + Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency + type: string + generation: + description: A sequence number representing a specific generation + of the desired state. Populated by the system. Read-only. + format: int64 + type: integer + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be + used to organize and categorize (scope and select) objects. + May match selectors of replication controllers and services. + More info: http://kubernetes.io/docs/user-guide/labels' + type: object + managedFields: + description: ManagedFields maps workflow-id and version + to the set of fields that are managed by that workflow. + This is mostly for internal housekeeping, and users typically + shouldn't need to set or understand this field. A workflow + can be the user's name, a controller's name, or the name + of a specific apply path like "ci-cd". The set of fields + is always in the version that the workflow used when modifying + the object. + items: + description: ManagedFieldsEntry is a workflow-id, a FieldSet + and the group version of the resource that the fieldset + applies to. + properties: + apiVersion: + description: APIVersion defines the version of this + resource that this field set applies to. The format + is "group/version" just like the top-level APIVersion + field. It is necessary to track the version of a + field set because it cannot be automatically converted. + type: string + fieldsType: + description: 'FieldsType is the discriminator for + the different fields format and version. There is + currently only one possible value: "FieldsV1"' + type: string + fieldsV1: + description: FieldsV1 holds the first JSON version + format as described in the "FieldsV1" type. + type: object + manager: + description: Manager is an identifier of the workflow + managing these fields. + type: string + operation: + description: Operation is the type of operation which + lead to this ManagedFieldsEntry being created. The + only valid values for this field are 'Apply' and + 'Update'. + type: string + subresource: + description: Subresource is the name of the subresource + used to update that object, or empty string if the + object was updated through the main resource. The + value of this field is used to distinguish between + managers, even if they share the same name. For + example, a status update will be distinct from a + regular update using the same manager name. Note + that the APIVersion field is not related to the + Subresource field and it always corresponds to the + version of the main resource. + type: string + time: + description: Time is the timestamp of when the ManagedFields + entry was added. The timestamp will also be updated + if a field is added, the manager changes any of + the owned fields value or removes a field. The timestamp + does not update when a field is removed from the + entry because another manager took it over. + format: date-time + type: string + type: object + type: array + name: + description: 'Name must be unique within a namespace. Is + required when creating resources, although some resources + may allow a client to request the generation of an appropriate + name automatically. Name is primarily intended for creation + idempotence and configuration definition. Cannot be updated. + More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: |- + Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. + + Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces + type: string + ownerReferences: + description: List of objects depended by this object. If + ALL objects in the list have been deleted, this object + will be garbage collected. If this object is managed by + a controller, then an entry in this list will point to + this controller, with the controller field set to true. + There cannot be more than one managing controller. + items: + description: OwnerReference contains enough information + to let you identify an owning object. An owning object + must be in the same namespace as the dependent, or be + cluster-scoped, so there is no namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" + finalizer, then the owner cannot be deleted from + the key-value store until this reference is removed. + See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion + for how the garbage collector interacts with this + field and enforces the foreground deletion. Defaults + to false. To set this field, a user needs "delete" + permission of the owner, otherwise 422 (Unprocessable + Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the + managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + resourceVersion: + description: |- + An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. + + Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + selfLink: + description: 'Deprecated: selfLink is a legacy read-only + field that is no longer populated by the system.' + type: string + uid: + description: |- + UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. + + Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids + type: string + type: object + spec: + description: 'spec defines the desired characteristics of a + volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'accessModes contains the desired access modes + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the provisioner + or an external controller can support the specified data + source, it will create a new volume based on the contents + of the specified data source. When the AnyVolumeDataSource + feature gate is enabled, dataSource contents will be copied + to dataSourceRef, and dataSourceRef contents will be copied + to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will + not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, the + specified Kind must be in the core API group. For + any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, the + specified Kind must be in the core API group. For + any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace is specified, + a gateway.networking.k8s.io/ReferenceGrant object + is required in the referent namespace to allow that + namespace's owner to accept the reference. See the + ReferenceGrant documentation for details. (Alpha) + This field requires the CrossNamespaceVolumeDataSource + feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify resource + requirements that are lower than previous value but must + still be higher than capacity recorded in the status field + of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where + this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + limits: + additionalProperties: + description: "Quantity is a fixed-point representation + of a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and AsInt64() + accessors.\n\nThe serialization format is:\n\n``` + ::= \n\n\t(Note + that may be empty, from the \"\" case in + .)\n\n ::= 0 | 1 | ... + | 9 ::= | + ::= | . + | . | . ::= \"+\" + | \"-\" ::= | + ::= | + | ::= Ki | Mi | Gi + | Ti | Pi | Ei\n\n\t(International System of units; + See: http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose the + capitalization.)\n\n ::= \"e\" + | \"E\" ```\n\nNo + matter which of the three exponent forms is used, + no quantity may represent a number greater than + 2^63-1 in magnitude, nor may it have more than 3 + decimal places. Numbers larger or more precise will + be capped or rounded up. (E.g.: 0.1m will rounded + up to 1m.) This may be extended in the future if + we require larger or smaller quantities.\n\nWhen + a Quantity is parsed from a string, it will remember + the type of suffix it had, and will use the same + type again when it is serialized.\n\nBefore serializing, + Quantity will be put in \"canonical form\". This + means that Exponent/suffix will be adjusted up or + down (with a corresponding increase or decrease + in Mantissa) such that:\n\n- No precision is lost + - No fractional digits will be emitted - The exponent + (or suffix) is as large as possible.\n\nThe sign + will be omitted unless the number is negative.\n\nExamples:\n\n- + 1.5 will be serialized as \"1500m\" - 1.5Gi will + be serialized as \"1536Mi\"\n\nNote that the quantity + will NEVER be internally represented by a floating + point number. That is the whole point of this exercise.\n\nNon-canonical + values will still parse as long as they are well + formed, but will be re-emitted in their canonical + form. (So always use canonical form, or don't diff.)\n\nThis + format is intended to make it difficult to use these + numbers without writing some sort of special handling + code in the hopes that that will cause implementors + to also use a fixed point implementation." + type: string + description: 'Limits describes the maximum amount of + compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + description: "Quantity is a fixed-point representation + of a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and AsInt64() + accessors.\n\nThe serialization format is:\n\n``` + ::= \n\n\t(Note + that may be empty, from the \"\" case in + .)\n\n ::= 0 | 1 | ... + | 9 ::= | + ::= | . + | . | . ::= \"+\" + | \"-\" ::= | + ::= | + | ::= Ki | Mi | Gi + | Ti | Pi | Ei\n\n\t(International System of units; + See: http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose the + capitalization.)\n\n ::= \"e\" + | \"E\" ```\n\nNo + matter which of the three exponent forms is used, + no quantity may represent a number greater than + 2^63-1 in magnitude, nor may it have more than 3 + decimal places. Numbers larger or more precise will + be capped or rounded up. (E.g.: 0.1m will rounded + up to 1m.) This may be extended in the future if + we require larger or smaller quantities.\n\nWhen + a Quantity is parsed from a string, it will remember + the type of suffix it had, and will use the same + type again when it is serialized.\n\nBefore serializing, + Quantity will be put in \"canonical form\". This + means that Exponent/suffix will be adjusted up or + down (with a corresponding increase or decrease + in Mantissa) such that:\n\n- No precision is lost + - No fractional digits will be emitted - The exponent + (or suffix) is as large as possible.\n\nThe sign + will be omitted unless the number is negative.\n\nExamples:\n\n- + 1.5 will be serialized as \"1500m\" - 1.5Gi will + be serialized as \"1536Mi\"\n\nNote that the quantity + will NEVER be internally represented by a floating + point number. That is the whole point of this exercise.\n\nNon-canonical + values will still parse as long as they are well + formed, but will be re-emitted in their canonical + form. (So always use canonical form, or don't diff.)\n\nThis + format is intended to make it difficult to use these + numbers without writing some sort of special handling + code in the hopes that that will cause implementors + to also use a fixed point implementation." + type: string + description: 'Requests describes the minimum amount + of compute resources required. If Requests is omitted + for a container, it defaults to Limits if that is + explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDead. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDead. + type: object + type: object + storageClassName: + description: 'storageClassName is the name of the StorageClass + required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required + by the claim. Value of Filesystem is implied when not + included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + status: + description: 'status represents the current information/status + of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'accessModes contains the actual access modes + the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + allocatedResources: + additionalProperties: + description: "Quantity is a fixed-point representation + of a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and AsInt64() + accessors.\n\nThe serialization format is:\n\n``` + \ ::= \n\n\t(Note that + may be empty, from the \"\" case in .)\n\n + \ ::= 0 | 1 | ... | 9 ::= + | ::= + | . | . | . ::= + \"+\" | \"-\" ::= | + ::= | + | ::= Ki | Mi | Gi | Ti + | Pi | Ei\n\n\t(International System of units; See: + http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n + ::= \"e\" | \"E\" ```\n\nNo + matter which of the three exponent forms is used, no + quantity may represent a number greater than 2^63-1 + in magnitude, nor may it have more than 3 decimal places. + Numbers larger or more precise will be capped or rounded + up. (E.g.: 0.1m will rounded up to 1m.) This may be + extended in the future if we require larger or smaller + quantities.\n\nWhen a Quantity is parsed from a string, + it will remember the type of suffix it had, and will + use the same type again when it is serialized.\n\nBefore + serializing, Quantity will be put in \"canonical form\". + This means that Exponent/suffix will be adjusted up + or down (with a corresponding increase or decrease in + Mantissa) such that:\n\n- No precision is lost - No + fractional digits will be emitted - The exponent (or + suffix) is as large as possible.\n\nThe sign will be + omitted unless the number is negative.\n\nExamples:\n\n- + 1.5 will be serialized as \"1500m\" - 1.5Gi will be + serialized as \"1536Mi\"\n\nNote that the quantity will + NEVER be internally represented by a floating point + number. That is the whole point of this exercise.\n\nNon-canonical + values will still parse as long as they are well formed, + but will be re-emitted in their canonical form. (So + always use canonical form, or don't diff.)\n\nThis format + is intended to make it difficult to use these numbers + without writing some sort of special handling code in + the hopes that that will cause implementors to also + use a fixed point implementation." + type: string + description: allocatedResources is the storage resource + within AllocatedResources tracks the capacity allocated + to a PVC. It may be larger than the actual capacity when + a volume expansion operation is requested. For storage + quota, the larger value from allocatedResources and PVC.spec.resources + is used. If allocatedResources is not set, PVC.spec.resources + alone is used for quota calculation. If a volume expansion + capacity request is lowered, allocatedResources is only + lowered if there are no expansion operations in progress + and if the actual volume capacity is equal or lower than + the requested capacity. This is an alpha field and requires + enabling RecoverVolumeExpansionFailure feature. + type: object + capacity: + additionalProperties: + description: "Quantity is a fixed-point representation + of a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and AsInt64() + accessors.\n\nThe serialization format is:\n\n``` + \ ::= \n\n\t(Note that + may be empty, from the \"\" case in .)\n\n + \ ::= 0 | 1 | ... | 9 ::= + | ::= + | . | . | . ::= + \"+\" | \"-\" ::= | + ::= | + | ::= Ki | Mi | Gi | Ti + | Pi | Ei\n\n\t(International System of units; See: + http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n + ::= \"e\" | \"E\" ```\n\nNo + matter which of the three exponent forms is used, no + quantity may represent a number greater than 2^63-1 + in magnitude, nor may it have more than 3 decimal places. + Numbers larger or more precise will be capped or rounded + up. (E.g.: 0.1m will rounded up to 1m.) This may be + extended in the future if we require larger or smaller + quantities.\n\nWhen a Quantity is parsed from a string, + it will remember the type of suffix it had, and will + use the same type again when it is serialized.\n\nBefore + serializing, Quantity will be put in \"canonical form\". + This means that Exponent/suffix will be adjusted up + or down (with a corresponding increase or decrease in + Mantissa) such that:\n\n- No precision is lost - No + fractional digits will be emitted - The exponent (or + suffix) is as large as possible.\n\nThe sign will be + omitted unless the number is negative.\n\nExamples:\n\n- + 1.5 will be serialized as \"1500m\" - 1.5Gi will be + serialized as \"1536Mi\"\n\nNote that the quantity will + NEVER be internally represented by a floating point + number. That is the whole point of this exercise.\n\nNon-canonical + values will still parse as long as they are well formed, + but will be re-emitted in their canonical form. (So + always use canonical form, or don't diff.)\n\nThis format + is intended to make it difficult to use these numbers + without writing some sort of special handling code in + the hopes that that will cause implementors to also + use a fixed point implementation." + type: string + description: capacity represents the actual resources of + the underlying volume. + type: object + conditions: + description: conditions is the current Condition of persistent + volume claim. If underlying persistent volume is being + resized then the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contains details + about state of pvc + properties: + lastProbeTime: + description: lastProbeTime is the time we probed the + condition. + format: date-time + type: string + lastTransitionTime: + description: lastTransitionTime is the time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: message is the human-readable message + indicating details about last transition. + type: string + reason: + description: reason is a unique, this should be a + short, machine understandable string that gives + the reason for condition's last transition. If it + reports "ResizeStarted" that means the underlying + persistent volume is being resized. + type: string + status: + type: string + type: + type: string + required: + - status + - type + type: object + type: array + phase: + description: |+ + phase represents the current phase of PersistentVolumeClaim. + + type: string + resizeStatus: + description: resizeStatus stores status of resize operation. + ResizeStatus is not set by default but when expansion + is complete resizeStatus is set to empty string by resize + controller or kubelet. This is an alpha field and requires + enabling RecoverVolumeExpansionFailure feature. + type: string + type: object + required: + - metadata + type: object + nullable: true + type: array + size: + description: Size of the xline cluster, less than 3 is not allowed + format: int32 + minimum: 3 + type: integer + required: + - container + - size + type: object + status: + description: Xline cluster status + nullable: true + properties: + available: + description: The available nodes' number in the cluster + format: int32 + type: integer + required: + - available + type: object + required: + - spec + title: Cluster + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.size + statusReplicasPath: .status.available + - additionalPrinterColumns: + - description: The cluster size + jsonPath: .spec.size + name: Size + type: string + - description: The cluster age + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha + schema: + openAPIV3Schema: + description: Auto-generated derived type for ClusterSpec via `CustomResource` + properties: + spec: + description: Xline cluster specification + properties: + affinity: + description: The affinity of the xline node + nullable: true + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |+ + Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |+ + Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDead. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |+ + Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |+ + Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDead. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDead. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDead. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDead. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDead. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDead. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied to the + union of the namespaces selected by this field and + the ones listed in the namespaces field. null selector + and null or empty namespaces list means "this pod's + namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDead. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDead. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace + names that the term applies to. The term is applied + to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. null or + empty namespaces list and null namespaceSelector means + "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDead. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDead. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDead. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDead. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDead. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDead. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied to the + union of the namespaces selected by this field and + the ones listed in the namespaces field. null selector + and null or empty namespaces list means "this pod's + namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDead. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDead. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace + names that the term applies to. The term is applied + to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. null or + empty namespaces list and null namespaceSelector means + "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + container: + description: Xline container specification + properties: + args: + description: 'Arguments to the entrypoint. The container image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will be + unchanged. Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The + container image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: + i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether + the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies the output format of the + exposed resources, defaults to "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be a + C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key will + take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of + ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: |+ + Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + + type: string + lifecycle: + description: Actions that the management system should take in + response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + format: int-or-string + type: string + scheme: + description: |+ + Scheme to use for connecting to the host. Defaults to HTTP. + + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as + a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + format: int-or-string + type: string + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The Pod''s termination grace period countdown + begins before the PreStop hook is executed. Regardless of + the outcome of the handler, the container will eventually + terminate within the Pod''s termination grace period (unless + delayed by finalizers). Other management of the container + blocks until the hook completes or until the termination + grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + format: int-or-string + type: string + scheme: + description: |+ + Scheme to use for connecting to the host. Defaults to HTTP. + + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as + a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + format: int-or-string + type: string + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is a beta field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number must + be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + format: int-or-string + type: string + scheme: + description: |+ + Scheme to use for connecting to the host. Defaults to HTTP. + + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + format: int-or-string + type: string + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to + terminate gracefully upon probe failure. The grace period + is the duration in seconds after the processes running in + the pod are sent a termination signal and the time when + the processes are forcibly halted with a kill signal. Set + this value longer than the expected cleanup time for your + process. If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides the value + provided by the pod spec. Value must be non-negative integer. + The value zero indicates stop immediately via the kill signal + (no opportunity to shut down). This is a beta field and + requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is + used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each + container in a pod must have a unique name (DNS_LABEL). Cannot + be updated. + type: string + ports: + description: List of ports to expose from the container. Not specifying + a port here DOES NOT prevent that port from being exposed. Any + port which is listening on the default "0.0.0.0" address inside + a container will be accessible from the network. Modifying this + array with strategic merge patch may corrupt the data. For more + information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in a single + container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, + this must be a valid port number, 0 < x < 65536. If HostNetwork + is specified, this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod must + have a unique name. Name for the port that can be referred + to by services. + type: string + protocol: + description: |+ + Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + description: 'Periodic probe of container service readiness. Container + will be removed from service endpoints if the probe fails. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is a beta field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number must + be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + format: int-or-string + type: string + scheme: + description: |+ + Scheme to use for connecting to the host. Defaults to HTTP. + + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + format: int-or-string + type: string + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to + terminate gracefully upon probe failure. The grace period + is the duration in seconds after the processes running in + the pod are sent a termination signal and the time when + the processes are forcibly halted with a kill signal. Set + this value longer than the expected cleanup time for your + process. If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides the value + provided by the pod spec. Value must be non-negative integer. + The value zero indicates stop immediately via the kill signal + (no opportunity to shut down). This is a beta field and + requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is + used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + limits: + additionalProperties: + description: "Quantity is a fixed-point representation of + a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and AsInt64() + accessors.\n\nThe serialization format is:\n\n``` + \ ::= \n\n\t(Note that + may be empty, from the \"\" case in .)\n\n + \ ::= 0 | 1 | ... | 9 ::= + | ::= | . + | . | . ::= \"+\" | + \"-\" ::= | + ::= | | + ::= Ki | Mi | Gi | Ti | + Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n + ::= \"e\" | \"E\" ```\n\nNo + matter which of the three exponent forms is used, no quantity + may represent a number greater than 2^63-1 in magnitude, + nor may it have more than 3 decimal places. Numbers larger + or more precise will be capped or rounded up. (E.g.: 0.1m + will rounded up to 1m.) This may be extended in the future + if we require larger or smaller quantities.\n\nWhen a + Quantity is parsed from a string, it will remember the + type of suffix it had, and will use the same type again + when it is serialized.\n\nBefore serializing, Quantity + will be put in \"canonical form\". This means that Exponent/suffix + will be adjusted up or down (with a corresponding increase + or decrease in Mantissa) such that:\n\n- No precision + is lost - No fractional digits will be emitted - The exponent + (or suffix) is as large as possible.\n\nThe sign will + be omitted unless the number is negative.\n\nExamples:\n\n- + 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized + as \"1536Mi\"\n\nNote that the quantity will NEVER be + internally represented by a floating point number. That + is the whole point of this exercise.\n\nNon-canonical + values will still parse as long as they are well formed, + but will be re-emitted in their canonical form. (So always + use canonical form, or don't diff.)\n\nThis format is + intended to make it difficult to use these numbers without + writing some sort of special handling code in the hopes + that that will cause implementors to also use a fixed + point implementation." + type: string + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + description: "Quantity is a fixed-point representation of + a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and AsInt64() + accessors.\n\nThe serialization format is:\n\n``` + \ ::= \n\n\t(Note that + may be empty, from the \"\" case in .)\n\n + \ ::= 0 | 1 | ... | 9 ::= + | ::= | . + | . | . ::= \"+\" | + \"-\" ::= | + ::= | | + ::= Ki | Mi | Gi | Ti | + Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n + ::= \"e\" | \"E\" ```\n\nNo + matter which of the three exponent forms is used, no quantity + may represent a number greater than 2^63-1 in magnitude, + nor may it have more than 3 decimal places. Numbers larger + or more precise will be capped or rounded up. (E.g.: 0.1m + will rounded up to 1m.) This may be extended in the future + if we require larger or smaller quantities.\n\nWhen a + Quantity is parsed from a string, it will remember the + type of suffix it had, and will use the same type again + when it is serialized.\n\nBefore serializing, Quantity + will be put in \"canonical form\". This means that Exponent/suffix + will be adjusted up or down (with a corresponding increase + or decrease in Mantissa) such that:\n\n- No precision + is lost - No fractional digits will be emitted - The exponent + (or suffix) is as large as possible.\n\nThe sign will + be omitted unless the number is negative.\n\nExamples:\n\n- + 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized + as \"1536Mi\"\n\nNote that the quantity will NEVER be + internally represented by a floating point number. That + is the whole point of this exercise.\n\nNon-canonical + values will still parse as long as they are well formed, + but will be re-emitted in their canonical form. (So always + use canonical form, or don't diff.)\n\nThis format is + intended to make it difficult to use these numbers without + writing some sort of special handling code in the hopes + that that will cause implementors to also use a fixed + point implementation." + type: string + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the + container should be run with. If set, the fields of SecurityContext + override the equivalent fields of PodSecurityContext. More info: + https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a + process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN Note that this field cannot be set + when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the + container runtime. Note that this field cannot be set when + spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + type: string + type: array + drop: + description: Removed capabilities + items: + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in + privileged containers are essentially equivalent to root + on the host. Defaults to false. Note that this field cannot + be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use + for the containers. The default is DefaultProcMount which + uses the container runtime defaults for readonly paths and + masked paths. This requires the ProcMountType feature flag + to be enabled. Note that this field cannot be set when spec.os.name + is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. Note that this field cannot be set when + spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. Note + that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must + be preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: |+ + type indicates which kind of seccomp profile will be applied. Valid options are: + + Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. + + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is + linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is + alpha-level and will only be honored by components that + enable the WindowsHostProcessContainers feature flag. + Setting this field without the feature flag will result + in errors when validating the Pod. All of a Pod's containers + must have the same effective HostProcess value (it is + not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod will + be restarted, just as if the livenessProbe failed. This can + be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. This + cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is a beta field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number must + be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + format: int-or-string + type: string + scheme: + description: |+ + Scheme to use for connecting to the host. Defaults to HTTP. + + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + format: int-or-string + type: string + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to + terminate gracefully upon probe failure. The grace period + is the duration in seconds after the processes running in + the pod are sent a termination signal and the time when + the processes are forcibly halted with a kill signal. Set + this value longer than the expected cleanup time for your + process. If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides the value + provided by the pod spec. Value must be non-negative integer. + The value zero indicates stop immediately via the kill signal + (no opportunity to shut down). This is a beta field and + requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is + used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for + stdin in the container runtime. If this is not set, reads from + stdin in the container will always result in EOF. Default is + false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin + channel after it has been opened by a single attach. When stdin + is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container + start, is empty until the first client attaches to stdin, and + then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container + is restarted. If this flag is false, a container processes that + reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s + termination message will be written is mounted into the container''s + filesystem. Message written is intended to be brief final status, + such as an assertion failure message. Will be truncated by the + node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. Defaults to /dev/termination-log. + Cannot be updated.' + type: string + terminationMessagePolicy: + description: |+ + Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might be + configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + data: + description: The data PVC, if it is not specified, then use emptyDir + instead + nullable: true + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map + stored with a resource that may be set by external tools + to store and retrieve arbitrary metadata. They are not queryable + and should be preserved when modifying objects. More info: + http://kubernetes.io/docs/user-guide/annotations' + type: object + creationTimestamp: + description: |- + CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + + Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + format: date-time + type: string + deletionGracePeriodSeconds: + description: Number of seconds allowed for this object to + gracefully terminate before it will be removed from the + system. Only set when deletionTimestamp is also set. May + only be shortened. Read-only. + format: int64 + type: integer + deletionTimestamp: + description: |- + DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. + + Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + format: date-time + type: string + finalizers: + description: Must be empty before the object is deleted from + the registry. Each entry is an identifier for the responsible + component that will remove the entry from the list. If the + deletionTimestamp of the object is non-nil, entries in this + list can only be removed. Finalizers may be processed and + removed in any order. Order is NOT enforced because it + introduces significant risk of stuck finalizers. finalizers + is a shared field, any actor with permission can reorder + it. If the finalizer list is processed in order, then this + can lead to a situation in which the component responsible + for the first finalizer in the list is waiting for a signal + (field value, external system, or other) produced by a component + responsible for a finalizer later in the list, resulting + in a deadlock. Without enforced ordering finalizers are + free to order amongst themselves and are not vulnerable + to ordering changes in the list. + items: + type: string + type: array + generateName: + description: |- + GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. + + If this field is specified and the generated name exists, the server will return a 409. + + Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency + type: string + generation: + description: A sequence number representing a specific generation + of the desired state. Populated by the system. Read-only. + format: int64 + type: integer + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used + to organize and categorize (scope and select) objects. May + match selectors of replication controllers and services. + More info: http://kubernetes.io/docs/user-guide/labels' + type: object + managedFields: + description: ManagedFields maps workflow-id and version to + the set of fields that are managed by that workflow. This + is mostly for internal housekeeping, and users typically + shouldn't need to set or understand this field. A workflow + can be the user's name, a controller's name, or the name + of a specific apply path like "ci-cd". The set of fields + is always in the version that the workflow used when modifying + the object. + items: + description: ManagedFieldsEntry is a workflow-id, a FieldSet + and the group version of the resource that the fieldset + applies to. + properties: + apiVersion: + description: APIVersion defines the version of this + resource that this field set applies to. The format + is "group/version" just like the top-level APIVersion + field. It is necessary to track the version of a field + set because it cannot be automatically converted. + type: string + fieldsType: + description: 'FieldsType is the discriminator for the + different fields format and version. There is currently + only one possible value: "FieldsV1"' + type: string + fieldsV1: + description: FieldsV1 holds the first JSON version format + as described in the "FieldsV1" type. + type: object + manager: + description: Manager is an identifier of the workflow + managing these fields. + type: string + operation: + description: Operation is the type of operation which + lead to this ManagedFieldsEntry being created. The + only valid values for this field are 'Apply' and 'Update'. + type: string + subresource: + description: Subresource is the name of the subresource + used to update that object, or empty string if the + object was updated through the main resource. The + value of this field is used to distinguish between + managers, even if they share the same name. For example, + a status update will be distinct from a regular update + using the same manager name. Note that the APIVersion + field is not related to the Subresource field and + it always corresponds to the version of the main resource. + type: string + time: + description: Time is the timestamp of when the ManagedFields + entry was added. The timestamp will also be updated + if a field is added, the manager changes any of the + owned fields value or removes a field. The timestamp + does not update when a field is removed from the entry + because another manager took it over. + format: date-time + type: string + type: object + type: array + name: + description: 'Name must be unique within a namespace. Is required + when creating resources, although some resources may allow + a client to request the generation of an appropriate name + automatically. Name is primarily intended for creation idempotence + and configuration definition. Cannot be updated. More info: + http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: |- + Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. + + Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces + type: string + ownerReferences: + description: List of objects depended by this object. If ALL + objects in the list have been deleted, this object will + be garbage collected. If this object is managed by a controller, + then an entry in this list will point to this controller, + with the controller field set to true. There cannot be more + than one managing controller. + items: + description: OwnerReference contains enough information + to let you identify an owning object. An owning object + must be in the same namespace as the dependent, or be + cluster-scoped, so there is no namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" + finalizer, then the owner cannot be deleted from the + key-value store until this reference is removed. See + https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion + for how the garbage collector interacts with this + field and enforces the foreground deletion. Defaults + to false. To set this field, a user needs "delete" + permission of the owner, otherwise 422 (Unprocessable + Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing + controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + resourceVersion: + description: |- + An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. + + Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + selfLink: + description: 'Deprecated: selfLink is a legacy read-only field + that is no longer populated by the system.' + type: string + uid: + description: |- + UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. + + Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids + type: string + type: object + spec: + description: 'spec defines the desired characteristics of a volume + requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'accessModes contains the desired access modes + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the provisioner + or an external controller can support the specified data + source, it will create a new volume based on the contents + of the specified data source. When the AnyVolumeDataSource + feature gate is enabled, dataSource contents will be copied + to dataSourceRef, and dataSourceRef contents will be copied + to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not + be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource being + referenced. If APIGroup is not specified, the specified + Kind must be in the core API group. For any other third-party + types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: APIGroup is the group for the resource being + referenced. If APIGroup is not specified, the specified + Kind must be in the core API group. For any other third-party + types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: Namespace is the namespace of resource being + referenced Note that when a namespace is specified, + a gateway.networking.k8s.io/ReferenceGrant object is + required in the referent namespace to allow that namespace's + owner to accept the reference. See the ReferenceGrant + documentation for details. (Alpha) This field requires + the CrossNamespaceVolumeDataSource feature gate to be + enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources the + volume should have. If RecoverVolumeExpansionFailure feature + is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher + than capacity recorded in the status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + limits: + additionalProperties: + description: "Quantity is a fixed-point representation + of a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and AsInt64() + accessors.\n\nThe serialization format is:\n\n``` + ::= \n\n\t(Note + that may be empty, from the \"\" case in + .)\n\n ::= 0 | 1 | ... + | 9 ::= | + ::= | . + | . | . ::= \"+\" + | \"-\" ::= | + ::= | + | ::= Ki | Mi | Gi | + Ti | Pi | Ei\n\n\t(International System of units; + See: http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose the + capitalization.)\n\n ::= \"e\" + | \"E\" ```\n\nNo matter which of the + three exponent forms is used, no quantity may represent + a number greater than 2^63-1 in magnitude, nor may + it have more than 3 decimal places. Numbers larger + or more precise will be capped or rounded up. (E.g.: + 0.1m will rounded up to 1m.) This may be extended + in the future if we require larger or smaller quantities.\n\nWhen + a Quantity is parsed from a string, it will remember + the type of suffix it had, and will use the same type + again when it is serialized.\n\nBefore serializing, + Quantity will be put in \"canonical form\". This means + that Exponent/suffix will be adjusted up or down (with + a corresponding increase or decrease in Mantissa) + such that:\n\n- No precision is lost - No fractional + digits will be emitted - The exponent (or suffix) + is as large as possible.\n\nThe sign will be omitted + unless the number is negative.\n\nExamples:\n\n- 1.5 + will be serialized as \"1500m\" - 1.5Gi will be serialized + as \"1536Mi\"\n\nNote that the quantity will NEVER + be internally represented by a floating point number. + That is the whole point of this exercise.\n\nNon-canonical + values will still parse as long as they are well formed, + but will be re-emitted in their canonical form. (So + always use canonical form, or don't diff.)\n\nThis + format is intended to make it difficult to use these + numbers without writing some sort of special handling + code in the hopes that that will cause implementors + to also use a fixed point implementation." + type: string + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + description: "Quantity is a fixed-point representation + of a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and AsInt64() + accessors.\n\nThe serialization format is:\n\n``` + ::= \n\n\t(Note + that may be empty, from the \"\" case in + .)\n\n ::= 0 | 1 | ... + | 9 ::= | + ::= | . + | . | . ::= \"+\" + | \"-\" ::= | + ::= | + | ::= Ki | Mi | Gi | + Ti | Pi | Ei\n\n\t(International System of units; + See: http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose the + capitalization.)\n\n ::= \"e\" + | \"E\" ```\n\nNo matter which of the + three exponent forms is used, no quantity may represent + a number greater than 2^63-1 in magnitude, nor may + it have more than 3 decimal places. Numbers larger + or more precise will be capped or rounded up. (E.g.: + 0.1m will rounded up to 1m.) This may be extended + in the future if we require larger or smaller quantities.\n\nWhen + a Quantity is parsed from a string, it will remember + the type of suffix it had, and will use the same type + again when it is serialized.\n\nBefore serializing, + Quantity will be put in \"canonical form\". This means + that Exponent/suffix will be adjusted up or down (with + a corresponding increase or decrease in Mantissa) + such that:\n\n- No precision is lost - No fractional + digits will be emitted - The exponent (or suffix) + is as large as possible.\n\nThe sign will be omitted + unless the number is negative.\n\nExamples:\n\n- 1.5 + will be serialized as \"1500m\" - 1.5Gi will be serialized + as \"1536Mi\"\n\nNote that the quantity will NEVER + be internally represented by a floating point number. + That is the whole point of this exercise.\n\nNon-canonical + values will still parse as long as they are well formed, + but will be re-emitted in their canonical form. (So + always use canonical form, or don't diff.)\n\nThis + format is intended to make it difficult to use these + numbers without writing some sort of special handling + code in the hopes that that will cause implementors + to also use a fixed point implementation." + type: string + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDead. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDead. + type: object + type: object + storageClassName: + description: 'storageClassName is the name of the StorageClass + required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required + by the claim. Value of Filesystem is implied when not included + in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + status: + description: 'status represents the current information/status + of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'accessModes contains the actual access modes + the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + allocatedResources: + additionalProperties: + description: "Quantity is a fixed-point representation of + a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and AsInt64() + accessors.\n\nThe serialization format is:\n\n``` + \ ::= \n\n\t(Note that + may be empty, from the \"\" case in .)\n\n + \ ::= 0 | 1 | ... | 9 ::= + | ::= | . + | . | . ::= \"+\" | + \"-\" ::= | + ::= | | + ::= Ki | Mi | Gi | Ti | + Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n + ::= \"e\" | \"E\" ```\n\nNo + matter which of the three exponent forms is used, no quantity + may represent a number greater than 2^63-1 in magnitude, + nor may it have more than 3 decimal places. Numbers larger + or more precise will be capped or rounded up. (E.g.: 0.1m + will rounded up to 1m.) This may be extended in the future + if we require larger or smaller quantities.\n\nWhen a + Quantity is parsed from a string, it will remember the + type of suffix it had, and will use the same type again + when it is serialized.\n\nBefore serializing, Quantity + will be put in \"canonical form\". This means that Exponent/suffix + will be adjusted up or down (with a corresponding increase + or decrease in Mantissa) such that:\n\n- No precision + is lost - No fractional digits will be emitted - The exponent + (or suffix) is as large as possible.\n\nThe sign will + be omitted unless the number is negative.\n\nExamples:\n\n- + 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized + as \"1536Mi\"\n\nNote that the quantity will NEVER be + internally represented by a floating point number. That + is the whole point of this exercise.\n\nNon-canonical + values will still parse as long as they are well formed, + but will be re-emitted in their canonical form. (So always + use canonical form, or don't diff.)\n\nThis format is + intended to make it difficult to use these numbers without + writing some sort of special handling code in the hopes + that that will cause implementors to also use a fixed + point implementation." + type: string + description: allocatedResources is the storage resource within + AllocatedResources tracks the capacity allocated to a PVC. + It may be larger than the actual capacity when a volume + expansion operation is requested. For storage quota, the + larger value from allocatedResources and PVC.spec.resources + is used. If allocatedResources is not set, PVC.spec.resources + alone is used for quota calculation. If a volume expansion + capacity request is lowered, allocatedResources is only + lowered if there are no expansion operations in progress + and if the actual volume capacity is equal or lower than + the requested capacity. This is an alpha field and requires + enabling RecoverVolumeExpansionFailure feature. + type: object + capacity: + additionalProperties: + description: "Quantity is a fixed-point representation of + a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and AsInt64() + accessors.\n\nThe serialization format is:\n\n``` + \ ::= \n\n\t(Note that + may be empty, from the \"\" case in .)\n\n + \ ::= 0 | 1 | ... | 9 ::= + | ::= | . + | . | . ::= \"+\" | + \"-\" ::= | + ::= | | + ::= Ki | Mi | Gi | Ti | + Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n + ::= \"e\" | \"E\" ```\n\nNo + matter which of the three exponent forms is used, no quantity + may represent a number greater than 2^63-1 in magnitude, + nor may it have more than 3 decimal places. Numbers larger + or more precise will be capped or rounded up. (E.g.: 0.1m + will rounded up to 1m.) This may be extended in the future + if we require larger or smaller quantities.\n\nWhen a + Quantity is parsed from a string, it will remember the + type of suffix it had, and will use the same type again + when it is serialized.\n\nBefore serializing, Quantity + will be put in \"canonical form\". This means that Exponent/suffix + will be adjusted up or down (with a corresponding increase + or decrease in Mantissa) such that:\n\n- No precision + is lost - No fractional digits will be emitted - The exponent + (or suffix) is as large as possible.\n\nThe sign will + be omitted unless the number is negative.\n\nExamples:\n\n- + 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized + as \"1536Mi\"\n\nNote that the quantity will NEVER be + internally represented by a floating point number. That + is the whole point of this exercise.\n\nNon-canonical + values will still parse as long as they are well formed, + but will be re-emitted in their canonical form. (So always + use canonical form, or don't diff.)\n\nThis format is + intended to make it difficult to use these numbers without + writing some sort of special handling code in the hopes + that that will cause implementors to also use a fixed + point implementation." + type: string + description: capacity represents the actual resources of the + underlying volume. + type: object + conditions: + description: conditions is the current Condition of persistent + volume claim. If underlying persistent volume is being resized + then the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contains details + about state of pvc + properties: + lastProbeTime: + description: lastProbeTime is the time we probed the + condition. + format: date-time + type: string + lastTransitionTime: + description: lastTransitionTime is the time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: message is the human-readable message indicating + details about last transition. + type: string + reason: + description: reason is a unique, this should be a short, + machine understandable string that gives the reason + for condition's last transition. If it reports "ResizeStarted" + that means the underlying persistent volume is being + resized. + type: string + status: + type: string + type: + type: string + required: + - status + - type + type: object + type: array + phase: + description: |+ + phase represents the current phase of PersistentVolumeClaim. + + type: string + resizeStatus: + description: resizeStatus stores status of resize operation. + ResizeStatus is not set by default but when expansion is + complete resizeStatus is set to empty string by resize controller + or kubelet. This is an alpha field and requires enabling + RecoverVolumeExpansionFailure feature. + type: string + type: object + required: + - metadata + type: object + pvcs: + description: Some user defined persistent volume claim templates + items: + description: PersistentVolumeClaim is a user's request for and claim + to a persistent volume + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map + stored with a resource that may be set by external tools + to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + creationTimestamp: + description: |- + CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + + Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + format: date-time + type: string + deletionGracePeriodSeconds: + description: Number of seconds allowed for this object to + gracefully terminate before it will be removed from the + system. Only set when deletionTimestamp is also set. May + only be shortened. Read-only. + format: int64 + type: integer + deletionTimestamp: + description: |- + DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. + + Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + format: date-time + type: string + finalizers: + description: Must be empty before the object is deleted + from the registry. Each entry is an identifier for the + responsible component that will remove the entry from + the list. If the deletionTimestamp of the object is non-nil, + entries in this list can only be removed. Finalizers may + be processed and removed in any order. Order is NOT enforced + because it introduces significant risk of stuck finalizers. + finalizers is a shared field, any actor with permission + can reorder it. If the finalizer list is processed in + order, then this can lead to a situation in which the + component responsible for the first finalizer in the list + is waiting for a signal (field value, external system, + or other) produced by a component responsible for a finalizer + later in the list, resulting in a deadlock. Without enforced + ordering finalizers are free to order amongst themselves + and are not vulnerable to ordering changes in the list. + items: + type: string + type: array + generateName: + description: |- + GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. + + If this field is specified and the generated name exists, the server will return a 409. + + Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency + type: string + generation: + description: A sequence number representing a specific generation + of the desired state. Populated by the system. Read-only. + format: int64 + type: integer + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be + used to organize and categorize (scope and select) objects. + May match selectors of replication controllers and services. + More info: http://kubernetes.io/docs/user-guide/labels' + type: object + managedFields: + description: ManagedFields maps workflow-id and version + to the set of fields that are managed by that workflow. + This is mostly for internal housekeeping, and users typically + shouldn't need to set or understand this field. A workflow + can be the user's name, a controller's name, or the name + of a specific apply path like "ci-cd". The set of fields + is always in the version that the workflow used when modifying + the object. + items: + description: ManagedFieldsEntry is a workflow-id, a FieldSet + and the group version of the resource that the fieldset + applies to. + properties: + apiVersion: + description: APIVersion defines the version of this + resource that this field set applies to. The format + is "group/version" just like the top-level APIVersion + field. It is necessary to track the version of a + field set because it cannot be automatically converted. + type: string + fieldsType: + description: 'FieldsType is the discriminator for + the different fields format and version. There is + currently only one possible value: "FieldsV1"' + type: string + fieldsV1: + description: FieldsV1 holds the first JSON version + format as described in the "FieldsV1" type. + type: object + manager: + description: Manager is an identifier of the workflow + managing these fields. + type: string + operation: + description: Operation is the type of operation which + lead to this ManagedFieldsEntry being created. The + only valid values for this field are 'Apply' and + 'Update'. + type: string + subresource: + description: Subresource is the name of the subresource + used to update that object, or empty string if the + object was updated through the main resource. The + value of this field is used to distinguish between + managers, even if they share the same name. For + example, a status update will be distinct from a + regular update using the same manager name. Note + that the APIVersion field is not related to the + Subresource field and it always corresponds to the + version of the main resource. + type: string + time: + description: Time is the timestamp of when the ManagedFields + entry was added. The timestamp will also be updated + if a field is added, the manager changes any of + the owned fields value or removes a field. The timestamp + does not update when a field is removed from the + entry because another manager took it over. + format: date-time + type: string + type: object + type: array + name: + description: 'Name must be unique within a namespace. Is + required when creating resources, although some resources + may allow a client to request the generation of an appropriate + name automatically. Name is primarily intended for creation + idempotence and configuration definition. Cannot be updated. + More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: |- + Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. + + Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces + type: string + ownerReferences: + description: List of objects depended by this object. If + ALL objects in the list have been deleted, this object + will be garbage collected. If this object is managed by + a controller, then an entry in this list will point to + this controller, with the controller field set to true. + There cannot be more than one managing controller. + items: + description: OwnerReference contains enough information + to let you identify an owning object. An owning object + must be in the same namespace as the dependent, or be + cluster-scoped, so there is no namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" + finalizer, then the owner cannot be deleted from + the key-value store until this reference is removed. + See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion + for how the garbage collector interacts with this + field and enforces the foreground deletion. Defaults + to false. To set this field, a user needs "delete" + permission of the owner, otherwise 422 (Unprocessable + Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the + managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + resourceVersion: + description: |- + An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. + + Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + selfLink: + description: 'Deprecated: selfLink is a legacy read-only + field that is no longer populated by the system.' + type: string + uid: + description: |- + UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. + + Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids + type: string + type: object + spec: + description: 'spec defines the desired characteristics of a + volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'accessModes contains the desired access modes + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the provisioner + or an external controller can support the specified data + source, it will create a new volume based on the contents + of the specified data source. When the AnyVolumeDataSource + feature gate is enabled, dataSource contents will be copied + to dataSourceRef, and dataSourceRef contents will be copied + to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will + not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, the + specified Kind must be in the core API group. For + any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, the + specified Kind must be in the core API group. For + any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace is specified, + a gateway.networking.k8s.io/ReferenceGrant object + is required in the referent namespace to allow that + namespace's owner to accept the reference. See the + ReferenceGrant documentation for details. (Alpha) + This field requires the CrossNamespaceVolumeDataSource + feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify resource + requirements that are lower than previous value but must + still be higher than capacity recorded in the status field + of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + + This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where + this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + limits: + additionalProperties: + description: "Quantity is a fixed-point representation + of a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and AsInt64() + accessors.\n\nThe serialization format is:\n\n``` + ::= \n\n\t(Note + that may be empty, from the \"\" case in + .)\n\n ::= 0 | 1 | ... + | 9 ::= | + ::= | . + | . | . ::= \"+\" + | \"-\" ::= | + ::= | + | ::= Ki | Mi | Gi + | Ti | Pi | Ei\n\n\t(International System of units; + See: http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose the + capitalization.)\n\n ::= \"e\" + | \"E\" ```\n\nNo + matter which of the three exponent forms is used, + no quantity may represent a number greater than + 2^63-1 in magnitude, nor may it have more than 3 + decimal places. Numbers larger or more precise will + be capped or rounded up. (E.g.: 0.1m will rounded + up to 1m.) This may be extended in the future if + we require larger or smaller quantities.\n\nWhen + a Quantity is parsed from a string, it will remember + the type of suffix it had, and will use the same + type again when it is serialized.\n\nBefore serializing, + Quantity will be put in \"canonical form\". This + means that Exponent/suffix will be adjusted up or + down (with a corresponding increase or decrease + in Mantissa) such that:\n\n- No precision is lost + - No fractional digits will be emitted - The exponent + (or suffix) is as large as possible.\n\nThe sign + will be omitted unless the number is negative.\n\nExamples:\n\n- + 1.5 will be serialized as \"1500m\" - 1.5Gi will + be serialized as \"1536Mi\"\n\nNote that the quantity + will NEVER be internally represented by a floating + point number. That is the whole point of this exercise.\n\nNon-canonical + values will still parse as long as they are well + formed, but will be re-emitted in their canonical + form. (So always use canonical form, or don't diff.)\n\nThis + format is intended to make it difficult to use these + numbers without writing some sort of special handling + code in the hopes that that will cause implementors + to also use a fixed point implementation." + type: string + description: 'Limits describes the maximum amount of + compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + description: "Quantity is a fixed-point representation + of a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and AsInt64() + accessors.\n\nThe serialization format is:\n\n``` + ::= \n\n\t(Note + that may be empty, from the \"\" case in + .)\n\n ::= 0 | 1 | ... + | 9 ::= | + ::= | . + | . | . ::= \"+\" + | \"-\" ::= | + ::= | + | ::= Ki | Mi | Gi + | Ti | Pi | Ei\n\n\t(International System of units; + See: http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose the + capitalization.)\n\n ::= \"e\" + | \"E\" ```\n\nNo + matter which of the three exponent forms is used, + no quantity may represent a number greater than + 2^63-1 in magnitude, nor may it have more than 3 + decimal places. Numbers larger or more precise will + be capped or rounded up. (E.g.: 0.1m will rounded + up to 1m.) This may be extended in the future if + we require larger or smaller quantities.\n\nWhen + a Quantity is parsed from a string, it will remember + the type of suffix it had, and will use the same + type again when it is serialized.\n\nBefore serializing, + Quantity will be put in \"canonical form\". This + means that Exponent/suffix will be adjusted up or + down (with a corresponding increase or decrease + in Mantissa) such that:\n\n- No precision is lost + - No fractional digits will be emitted - The exponent + (or suffix) is as large as possible.\n\nThe sign + will be omitted unless the number is negative.\n\nExamples:\n\n- + 1.5 will be serialized as \"1500m\" - 1.5Gi will + be serialized as \"1536Mi\"\n\nNote that the quantity + will NEVER be internally represented by a floating + point number. That is the whole point of this exercise.\n\nNon-canonical + values will still parse as long as they are well + formed, but will be re-emitted in their canonical + form. (So always use canonical form, or don't diff.)\n\nThis + format is intended to make it difficult to use these + numbers without writing some sort of special handling + code in the hopes that that will cause implementors + to also use a fixed point implementation." + type: string + description: 'Requests describes the minimum amount + of compute resources required. If Requests is omitted + for a container, it defaults to Limits if that is + explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDead. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDead. + type: object + type: object + storageClassName: + description: 'storageClassName is the name of the StorageClass + required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required + by the claim. Value of Filesystem is implied when not + included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + status: + description: 'status represents the current information/status + of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'accessModes contains the actual access modes + the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + allocatedResources: + additionalProperties: + description: "Quantity is a fixed-point representation + of a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and AsInt64() + accessors.\n\nThe serialization format is:\n\n``` + \ ::= \n\n\t(Note that + may be empty, from the \"\" case in .)\n\n + \ ::= 0 | 1 | ... | 9 ::= + | ::= + | . | . | . ::= + \"+\" | \"-\" ::= | + ::= | + | ::= Ki | Mi | Gi | Ti + | Pi | Ei\n\n\t(International System of units; See: + http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n + ::= \"e\" | \"E\" ```\n\nNo + matter which of the three exponent forms is used, no + quantity may represent a number greater than 2^63-1 + in magnitude, nor may it have more than 3 decimal places. + Numbers larger or more precise will be capped or rounded + up. (E.g.: 0.1m will rounded up to 1m.) This may be + extended in the future if we require larger or smaller + quantities.\n\nWhen a Quantity is parsed from a string, + it will remember the type of suffix it had, and will + use the same type again when it is serialized.\n\nBefore + serializing, Quantity will be put in \"canonical form\". + This means that Exponent/suffix will be adjusted up + or down (with a corresponding increase or decrease in + Mantissa) such that:\n\n- No precision is lost - No + fractional digits will be emitted - The exponent (or + suffix) is as large as possible.\n\nThe sign will be + omitted unless the number is negative.\n\nExamples:\n\n- + 1.5 will be serialized as \"1500m\" - 1.5Gi will be + serialized as \"1536Mi\"\n\nNote that the quantity will + NEVER be internally represented by a floating point + number. That is the whole point of this exercise.\n\nNon-canonical + values will still parse as long as they are well formed, + but will be re-emitted in their canonical form. (So + always use canonical form, or don't diff.)\n\nThis format + is intended to make it difficult to use these numbers + without writing some sort of special handling code in + the hopes that that will cause implementors to also + use a fixed point implementation." + type: string + description: allocatedResources is the storage resource + within AllocatedResources tracks the capacity allocated + to a PVC. It may be larger than the actual capacity when + a volume expansion operation is requested. For storage + quota, the larger value from allocatedResources and PVC.spec.resources + is used. If allocatedResources is not set, PVC.spec.resources + alone is used for quota calculation. If a volume expansion + capacity request is lowered, allocatedResources is only + lowered if there are no expansion operations in progress + and if the actual volume capacity is equal or lower than + the requested capacity. This is an alpha field and requires + enabling RecoverVolumeExpansionFailure feature. + type: object + capacity: + additionalProperties: + description: "Quantity is a fixed-point representation + of a number. It provides convenient marshaling/unmarshaling + in JSON and YAML, in addition to String() and AsInt64() + accessors.\n\nThe serialization format is:\n\n``` + \ ::= \n\n\t(Note that + may be empty, from the \"\" case in .)\n\n + \ ::= 0 | 1 | ... | 9 ::= + | ::= + | . | . | . ::= + \"+\" | \"-\" ::= | + ::= | + | ::= Ki | Mi | Gi | Ti + | Pi | Ei\n\n\t(International System of units; See: + http://physics.nist.gov/cuu/Units/binary.html)\n\n + \ ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note + that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n + ::= \"e\" | \"E\" ```\n\nNo + matter which of the three exponent forms is used, no + quantity may represent a number greater than 2^63-1 + in magnitude, nor may it have more than 3 decimal places. + Numbers larger or more precise will be capped or rounded + up. (E.g.: 0.1m will rounded up to 1m.) This may be + extended in the future if we require larger or smaller + quantities.\n\nWhen a Quantity is parsed from a string, + it will remember the type of suffix it had, and will + use the same type again when it is serialized.\n\nBefore + serializing, Quantity will be put in \"canonical form\". + This means that Exponent/suffix will be adjusted up + or down (with a corresponding increase or decrease in + Mantissa) such that:\n\n- No precision is lost - No + fractional digits will be emitted - The exponent (or + suffix) is as large as possible.\n\nThe sign will be + omitted unless the number is negative.\n\nExamples:\n\n- + 1.5 will be serialized as \"1500m\" - 1.5Gi will be + serialized as \"1536Mi\"\n\nNote that the quantity will + NEVER be internally represented by a floating point + number. That is the whole point of this exercise.\n\nNon-canonical + values will still parse as long as they are well formed, + but will be re-emitted in their canonical form. (So + always use canonical form, or don't diff.)\n\nThis format + is intended to make it difficult to use these numbers + without writing some sort of special handling code in + the hopes that that will cause implementors to also + use a fixed point implementation." + type: string + description: capacity represents the actual resources of + the underlying volume. + type: object + conditions: + description: conditions is the current Condition of persistent + volume claim. If underlying persistent volume is being + resized then the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contains details + about state of pvc + properties: + lastProbeTime: + description: lastProbeTime is the time we probed the + condition. + format: date-time + type: string + lastTransitionTime: + description: lastTransitionTime is the time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: message is the human-readable message + indicating details about last transition. + type: string + reason: + description: reason is a unique, this should be a + short, machine understandable string that gives + the reason for condition's last transition. If it + reports "ResizeStarted" that means the underlying + persistent volume is being resized. + type: string + status: + type: string + type: + type: string + required: + - status + - type + type: object + type: array + phase: + description: |+ + phase represents the current phase of PersistentVolumeClaim. + + type: string + resizeStatus: + description: resizeStatus stores status of resize operation. + ResizeStatus is not set by default but when expansion + is complete resizeStatus is set to empty string by resize + controller or kubelet. This is an alpha field and requires + enabling RecoverVolumeExpansionFailure feature. + type: string + type: object + required: + - metadata + type: object + nullable: true + type: array + size: + description: Size of the xline cluster, less than 3 is not allowed + format: int32 + minimum: 3 + type: integer + required: + - container + - size + type: object + status: + description: Xline cluster status + nullable: true + properties: + available: + description: The available nodes' number in the cluster + format: int32 + type: integer + required: + - available + type: object + required: + - spec + title: Cluster + type: object + served: true + storage: false + subresources: + scale: + specReplicasPath: .spec.size + statusReplicasPath: .status.available diff --git a/manifest/rbac/crd-migration.yml b/manifest/rbac/crd-migration.yml new file mode 100644 index 00000000..a0e8571c --- /dev/null +++ b/manifest/rbac/crd-migration.yml @@ -0,0 +1,30 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: crd-migration-role +rules: + - apiGroups: + - xlineoperator.xline.cloud + resources: + - xlineclusters + verbs: + - "*" + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - "*" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: crd-migration-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: crd-migration-role +subjects: + - kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/manifest/rbac/kustomization.yml b/manifest/rbac/kustomization.yml new file mode 100644 index 00000000..270040da --- /dev/null +++ b/manifest/rbac/kustomization.yml @@ -0,0 +1,4 @@ +resources: + - service-account.yaml + - namespaced-controller.yml + - crd-migration.yml diff --git a/manifest/rbac/namespaced-controller.yml b/manifest/rbac/namespaced-controller.yml new file mode 100644 index 00000000..a4798c85 --- /dev/null +++ b/manifest/rbac/namespaced-controller.yml @@ -0,0 +1,46 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: namespaced-controller-role +rules: + - apiGroups: + - xlineoperator.xline.cloud + resources: + - xlineclusters + verbs: + - "*" + - apiGroups: + - "" + resources: + - pods + - services + - endpoints + - persistentvolumeclaims + - events + verbs: + - "*" + - apiGroups: + - apps + resources: + - statefulsets + verbs: + - "*" + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - "*" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: namespaced-controller-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: namespaced-controller-role +subjects: + - kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/manifest/rbac/service-account.yaml b/manifest/rbac/service-account.yaml new file mode 100644 index 00000000..7cd6025b --- /dev/null +++ b/manifest/rbac/service-account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: controller-manager + namespace: system