From 8fcf857c5cd7d900cd06312b46d542fb9201ee5c Mon Sep 17 00:00:00 2001 From: CloudNativePG Automated Updates Date: Thu, 3 Oct 2024 09:00:46 +0000 Subject: [PATCH] chore: sync API --- go.mod | 33 + go.sum | 117 + pkg/api/v1/backup_types.go | 340 +++ pkg/api/v1/base_types.go | 50 + pkg/api/v1/cluster_types.go | 2272 +++++++++++++++++++ pkg/api/v1/clusterimagecatalog_types.go | 50 + pkg/api/v1/common_types.go | 43 + pkg/api/v1/database_types.go | 132 ++ pkg/api/v1/groupversion_info.go | 65 + pkg/api/v1/imagecatalog_types.go | 69 + pkg/api/v1/pooler_types.go | 269 +++ pkg/api/v1/scheduledbackup_types.go | 135 ++ pkg/api/v1/zz_api_repo_funcs_to_copy.go | 27 + pkg/api/v1/zz_generated.deepcopy.go | 2773 +++++++++++++++++++++++ 14 files changed, 6375 insertions(+) create mode 100644 go.mod create mode 100644 go.sum create mode 100644 pkg/api/v1/backup_types.go create mode 100644 pkg/api/v1/base_types.go create mode 100644 pkg/api/v1/cluster_types.go create mode 100644 pkg/api/v1/clusterimagecatalog_types.go create mode 100644 pkg/api/v1/common_types.go create mode 100644 pkg/api/v1/database_types.go create mode 100644 pkg/api/v1/groupversion_info.go create mode 100644 pkg/api/v1/imagecatalog_types.go create mode 100644 pkg/api/v1/pooler_types.go create mode 100644 pkg/api/v1/scheduledbackup_types.go create mode 100644 pkg/api/v1/zz_api_repo_funcs_to_copy.go create mode 100644 pkg/api/v1/zz_generated.deepcopy.go diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..a43d6c2 --- /dev/null +++ b/go.mod @@ -0,0 +1,33 @@ +module github.com/cloudnative-pg/cloudnative-pg + +go 1.22.0 + +toolchain go1.23.2 + +require ( + github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a + github.com/cloudnative-pg/machinery v0.0.0-20241001153943-0e5ba4f9a0e1 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2 + k8s.io/api v0.31.1 + k8s.io/apimachinery v0.31.1 + sigs.k8s.io/controller-runtime v0.19.0 +) + +require ( + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/x448/float16 v0.8.4 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/text v0.18.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..7fe3975 --- /dev/null +++ b/go.sum @@ -0,0 +1,117 @@ +github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a h1:0v1ML9Eibfq3helbT9GtU0EstqFtG91k/MPO9azY5ME= +github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a/go.mod h1:Jm0tOp5oB7utpt8wz6RfSv31h1mThOtffjfyxVupriE= +github.com/cloudnative-pg/machinery v0.0.0-20241001153943-0e5ba4f9a0e1 h1:qrxfp0vR+zqC+L1yTdQTqRHvnLLcVk4CdWB1RwLd8UE= +github.com/cloudnative-pg/machinery v0.0.0-20241001153943-0e5ba4f9a0e1/go.mod h1:bWp1Es5zlxElg4Z/c5f0RKOkDcyNvDHdYIvNcPQU4WM= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20240910150728-a0b0bb1d4134 h1:c5FlPPgxOn7kJz3VoPLkQYQXGBS3EklQ4Zfi57uOuqQ= +github.com/google/pprof v0.0.0-20240910150728-a0b0bb1d4134/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4= +github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= +github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= +github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2 h1:6UsAv+jAevuGO2yZFU/BukV4o9NKnFMOuoouSA4G0ns= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2/go.mod h1:XYrdZw5dW12Cjkt4ndbeNZZTBp4UCHtW0ccR9+sTtPU= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= +k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY9mD9fNT47QO6HI= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= +sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/pkg/api/v1/backup_types.go b/pkg/api/v1/backup_types.go new file mode 100644 index 0000000..739e9a7 --- /dev/null +++ b/pkg/api/v1/backup_types.go @@ -0,0 +1,340 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + barmanApi "github.com/cloudnative-pg/barman-cloud/pkg/api" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// BackupPhase is the phase of the backup +type BackupPhase string + +const ( + // BackupPhasePending means that the backup is still waiting to be started + BackupPhasePending = "pending" + + // BackupPhaseStarted means that the backup is now running + BackupPhaseStarted = "started" + + // BackupPhaseRunning means that the backup is now running + BackupPhaseRunning = "running" + + // BackupPhaseFinalizing means that a consistent backup have been + // taken and the operator is waiting for it to be ready to be + // used to restore a cluster. + // This phase is used for VolumeSnapshot backups, when a + // VolumeSnapshotContent have already been provisioned, but it is + // still now waiting for the `readyToUse` flag to be true. + BackupPhaseFinalizing = "finalizing" + + // BackupPhaseCompleted means that the backup is now completed + BackupPhaseCompleted = "completed" + + // BackupPhaseFailed means that the backup is failed + BackupPhaseFailed = "failed" + + // BackupPhaseWalArchivingFailing means wal archiving isn't properly working + BackupPhaseWalArchivingFailing = "walArchivingFailing" +) + +// BarmanCredentials an object containing the potential credentials for each cloud provider +// +kubebuilder:object:generate:=false +type BarmanCredentials = barmanApi.BarmanCredentials + +// AzureCredentials is the type for the credentials to be used to upload +// files to Azure Blob Storage. The connection string contains every needed +// information. If the connection string is not specified, we'll need the +// storage account name and also one (and only one) of: +// +// - storageKey +// - storageSasToken +// +// - inheriting the credentials from the pod environment by setting inheritFromAzureAD to true +// +kubebuilder:object:generate:=false +type AzureCredentials = barmanApi.AzureCredentials + +// BarmanObjectStoreConfiguration contains the backup configuration +// using Barman against an S3-compatible object storage +// +kubebuilder:object:generate:=false +type BarmanObjectStoreConfiguration = barmanApi.BarmanObjectStoreConfiguration + +// DataBackupConfiguration is the configuration of the backup of +// the data directory +// +kubebuilder:object:generate:=false +type DataBackupConfiguration = barmanApi.DataBackupConfiguration + +// GoogleCredentials is the type for the Google Cloud Storage credentials. +// This needs to be specified even if we run inside a GKE environment. +// +kubebuilder:object:generate:=false +type GoogleCredentials = barmanApi.GoogleCredentials + +// S3Credentials is the type for the credentials to be used to upload +// files to S3. It can be provided in two alternative ways: +// +// - explicitly passing accessKeyId and secretAccessKey +// +// - inheriting the role from the pod environment by setting inheritFromIAMRole to true +// +kubebuilder:object:generate:=false +type S3Credentials = barmanApi.S3Credentials + +// WalBackupConfiguration is the configuration of the backup of the +// WAL stream +// +kubebuilder:object:generate:=false +type WalBackupConfiguration = barmanApi.WalBackupConfiguration + +// BackupMethod defines the way of executing the physical base backups of +// the selected PostgreSQL instance +type BackupMethod string + +const ( + // BackupMethodVolumeSnapshot means using the volume snapshot + // Kubernetes feature + BackupMethodVolumeSnapshot BackupMethod = "volumeSnapshot" + + // BackupMethodBarmanObjectStore means using barman to backup the + // PostgreSQL cluster + BackupMethodBarmanObjectStore BackupMethod = "barmanObjectStore" + + // BackupMethodPlugin means that this backup should be handled by + // a plugin + BackupMethodPlugin BackupMethod = "plugin" +) + +// BackupSpec defines the desired state of Backup +type BackupSpec struct { + // The cluster to backup + Cluster LocalObjectReference `json:"cluster"` + + // The policy to decide which instance should perform this backup. If empty, + // it defaults to `cluster.spec.backup.target`. + // Available options are empty string, `primary` and `prefer-standby`. + // `primary` to have backups run always on primary instances, + // `prefer-standby` to have backups run preferably on the most updated + // standby, if available. + // +optional + // +kubebuilder:validation:Enum=primary;prefer-standby + Target BackupTarget `json:"target,omitempty"` + + // The backup method to be used, possible options are `barmanObjectStore`, + // `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + // +optional + // +kubebuilder:validation:Enum=barmanObjectStore;volumeSnapshot;plugin + // +kubebuilder:default:=barmanObjectStore + Method BackupMethod `json:"method,omitempty"` + + // Configuration parameters passed to the plugin managing this backup + // +optional + PluginConfiguration *BackupPluginConfiguration `json:"pluginConfiguration,omitempty"` + + // Whether the default type of backup with volume snapshots is + // online/hot (`true`, default) or offline/cold (`false`) + // Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + // +optional + Online *bool `json:"online,omitempty"` + + // Configuration parameters to control the online/hot backup with volume snapshots + // Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + // +optional + OnlineConfiguration *OnlineConfiguration `json:"onlineConfiguration,omitempty"` +} + +// BackupPluginConfiguration contains the backup configuration used by +// the backup plugin +type BackupPluginConfiguration struct { + // Name is the name of the plugin managing this backup + Name string `json:"name"` + + // Parameters are the configuration parameters passed to the backup + // plugin for this backup + // +optional + Parameters map[string]string `json:"parameters,omitempty"` +} + +// BackupSnapshotStatus the fields exclusive to the volumeSnapshot method backup +type BackupSnapshotStatus struct { + // The elements list, populated with the gathered volume snapshots + // +optional + Elements []BackupSnapshotElementStatus `json:"elements,omitempty"` +} + +// BackupSnapshotElementStatus is a volume snapshot that is part of a volume snapshot method backup +type BackupSnapshotElementStatus struct { + // Name is the snapshot resource name + Name string `json:"name"` + + // Type is tho role of the snapshot in the cluster, such as PG_DATA, PG_WAL and PG_TABLESPACE + Type string `json:"type"` + + // TablespaceName is the name of the snapshotted tablespace. Only set + // when type is PG_TABLESPACE + TablespaceName string `json:"tablespaceName,omitempty"` +} + +// BackupStatus defines the observed state of Backup +type BackupStatus struct { + // The potential credentials for each cloud provider + BarmanCredentials `json:",inline"` + + // EndpointCA store the CA bundle of the barman endpoint. + // Useful when using self-signed certificates to avoid + // errors with certificate issuer and barman-cloud-wal-archive. + // +optional + EndpointCA *SecretKeySelector `json:"endpointCA,omitempty"` + + // Endpoint to be used to upload data to the cloud, + // overriding the automatic endpoint discovery + // +optional + EndpointURL string `json:"endpointURL,omitempty"` + + // The path where to store the backup (i.e. s3://bucket/path/to/folder) + // this path, with different destination folders, will be used for WALs + // and for data. This may not be populated in case of errors. + // +optional + DestinationPath string `json:"destinationPath,omitempty"` + + // The server name on S3, the cluster name is used if this + // parameter is omitted + // +optional + ServerName string `json:"serverName,omitempty"` + + // Encryption method required to S3 API + // +optional + Encryption string `json:"encryption,omitempty"` + + // The ID of the Barman backup + // +optional + BackupID string `json:"backupId,omitempty"` + + // The Name of the Barman backup + // +optional + BackupName string `json:"backupName,omitempty"` + + // The last backup status + // +optional + Phase BackupPhase `json:"phase,omitempty"` + + // When the backup was started + // +optional + StartedAt *metav1.Time `json:"startedAt,omitempty"` + + // When the backup was terminated + // +optional + StoppedAt *metav1.Time `json:"stoppedAt,omitempty"` + + // The starting WAL + // +optional + BeginWal string `json:"beginWal,omitempty"` + + // The ending WAL + // +optional + EndWal string `json:"endWal,omitempty"` + + // The starting xlog + // +optional + BeginLSN string `json:"beginLSN,omitempty"` + + // The ending xlog + // +optional + EndLSN string `json:"endLSN,omitempty"` + + // The detected error + // +optional + Error string `json:"error,omitempty"` + + // Unused. Retained for compatibility with old versions. + // +optional + CommandOutput string `json:"commandOutput,omitempty"` + + // The backup command output in case of error + // +optional + CommandError string `json:"commandError,omitempty"` + + // Backup label file content as returned by Postgres in case of online (hot) backups + // +optional + BackupLabelFile []byte `json:"backupLabelFile,omitempty"` + + // Tablespace map file content as returned by Postgres in case of online (hot) backups + // +optional + TablespaceMapFile []byte `json:"tablespaceMapFile,omitempty"` + + // Information to identify the instance where the backup has been taken from + // +optional + InstanceID *InstanceID `json:"instanceID,omitempty"` + + // Status of the volumeSnapshot backup + // +optional + BackupSnapshotStatus BackupSnapshotStatus `json:"snapshotBackupStatus,omitempty"` + + // The backup method being used + // +optional + Method BackupMethod `json:"method,omitempty"` + + // Whether the backup was online/hot (`true`) or offline/cold (`false`) + Online *bool `json:"online,omitempty"` +} + +// InstanceID contains the information to identify an instance +type InstanceID struct { + // The pod name + // +optional + PodName string `json:"podName,omitempty"` + // The container ID + // +optional + ContainerID string `json:"ContainerID,omitempty"` +} + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".spec.cluster.name" +// +kubebuilder:printcolumn:name="Method",type="string",JSONPath=".spec.method" +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase" +// +kubebuilder:printcolumn:name="Error",type="string",JSONPath=".status.error" + +// Backup is the Schema for the backups API +type Backup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // Specification of the desired behavior of the backup. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + Spec BackupSpec `json:"spec"` + // Most recently observed status of the backup. This data may not be up to + // date. Populated by the system. Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status BackupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BackupList contains a list of Backup +type BackupList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + metav1.ListMeta `json:"metadata,omitempty"` + // List of backups + Items []Backup `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Backup{}, &BackupList{}) +} diff --git a/pkg/api/v1/base_types.go b/pkg/api/v1/base_types.go new file mode 100644 index 0000000..6ae61a7 --- /dev/null +++ b/pkg/api/v1/base_types.go @@ -0,0 +1,50 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + machineryapi "github.com/cloudnative-pg/machinery/pkg/api" +) + +// PodStatus represent the possible status of pods +type PodStatus string + +const ( + // PodHealthy means that a Pod is active and ready + PodHealthy = "healthy" + + // PodReplicating means that a Pod is still not ready but still active + PodReplicating = "replicating" + + // PodFailed means that a Pod will not be scheduled again (deleted or evicted) + PodFailed = "failed" +) + +// LocalObjectReference contains enough information to let you locate a +// local object with a known type inside the same namespace +// +kubebuilder:object:generate:=false +type LocalObjectReference = machineryapi.LocalObjectReference + +// SecretKeySelector contains enough information to let you locate +// the key of a Secret +// +kubebuilder:object:generate:=false +type SecretKeySelector = machineryapi.SecretKeySelector + +// ConfigMapKeySelector contains enough information to let you locate +// the key of a ConfigMap +// +kubebuilder:object:generate:=false +type ConfigMapKeySelector = machineryapi.ConfigMapKeySelector diff --git a/pkg/api/v1/cluster_types.go b/pkg/api/v1/cluster_types.go new file mode 100644 index 0000000..8717197 --- /dev/null +++ b/pkg/api/v1/cluster_types.go @@ -0,0 +1,2272 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "regexp" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // PrimaryPodDisruptionBudgetSuffix is the suffix appended to the cluster name + // to get the name of the PDB used for the cluster primary + PrimaryPodDisruptionBudgetSuffix = "-primary" + + // ReplicationSecretSuffix is the suffix appended to the cluster name to + // get the name of the generated replication secret for PostgreSQL + ReplicationSecretSuffix = "-replication" // #nosec + + // SuperUserSecretSuffix is the suffix appended to the cluster name to + // get the name of the PostgreSQL superuser secret + SuperUserSecretSuffix = "-superuser" + + // ApplicationUserSecretSuffix is the suffix appended to the cluster name to + // get the name of the application user secret + ApplicationUserSecretSuffix = "-app" + + // DefaultServerCaSecretSuffix is the suffix appended to the secret containing + // the generated CA for the cluster + DefaultServerCaSecretSuffix = "-ca" + + // ClientCaSecretSuffix is the suffix appended to the secret containing + // the generated CA for the client certificates + ClientCaSecretSuffix = "-ca" + + // ServerSecretSuffix is the suffix appended to the secret containing + // the generated server secret for PostgreSQL + ServerSecretSuffix = "-server" + + // ServiceAnySuffix is the suffix appended to the cluster name to get the + // service name for every node (including non-ready ones) + ServiceAnySuffix = "-any" + + // ServiceReadSuffix is the suffix appended to the cluster name to get the + // service name for every ready node that you can use to read data (including the primary) + ServiceReadSuffix = "-r" + + // ServiceReadOnlySuffix is the suffix appended to the cluster name to get the + // service name for every ready node that you can use to read data (excluding the primary) + ServiceReadOnlySuffix = "-ro" + + // ServiceReadWriteSuffix is the suffix appended to the cluster name to get + // the se service name for every node that you can use to read and write + // data + ServiceReadWriteSuffix = "-rw" + + // ClusterSecretSuffix is the suffix appended to the cluster name to + // get the name of the pull secret + ClusterSecretSuffix = "-pull-secret" + + // WalArchiveVolumeSuffix is the suffix appended to the instance name to + // get the name of the PVC dedicated to WAL files. + WalArchiveVolumeSuffix = "-wal" + + // TablespaceVolumeInfix is the infix added between the instance name + // and tablespace name to get the name of PVC for a certain tablespace + TablespaceVolumeInfix = "-tbs-" + + // StreamingReplicationUser is the name of the user we'll use for + // streaming replication purposes + StreamingReplicationUser = "streaming_replica" + + // defaultPostgresUID is the default UID which is used by PostgreSQL + defaultPostgresUID = 26 + + // defaultPostgresGID is the default GID which is used by PostgreSQL + defaultPostgresGID = 26 + + // PodAntiAffinityTypeRequired is the label for required anti-affinity type + PodAntiAffinityTypeRequired = "required" + + // PodAntiAffinityTypePreferred is the label for preferred anti-affinity type + PodAntiAffinityTypePreferred = "preferred" + + // DefaultPgBouncerPoolerSecretSuffix is the suffix for the default pgbouncer Pooler secret + DefaultPgBouncerPoolerSecretSuffix = "-pooler" + + // PendingFailoverMarker is used as target primary to signal that a failover is required + PendingFailoverMarker = "pending" + + // PGBouncerPoolerUserName is the name of the role to be used for + PGBouncerPoolerUserName = "cnpg_pooler_pgbouncer" + + // MissingWALDiskSpaceExitCode is the exit code the instance manager + // will use to signal that there's no more WAL disk space + MissingWALDiskSpaceExitCode = 4 +) + +// SnapshotOwnerReference defines the reference type for the owner of the snapshot. +// This specifies which owner the processed resources should relate to. +type SnapshotOwnerReference string + +// Constants to represent the allowed types for SnapshotOwnerReference. +const ( + // ShapshotOwnerReferenceNone indicates that the snapshot does not have any owner reference. + ShapshotOwnerReferenceNone SnapshotOwnerReference = "none" + // SnapshotOwnerReferenceBackup indicates that the snapshot is owned by the backup resource. + SnapshotOwnerReferenceBackup SnapshotOwnerReference = "backup" + // SnapshotOwnerReferenceCluster indicates that the snapshot is owned by the cluster resource. + SnapshotOwnerReferenceCluster SnapshotOwnerReference = "cluster" +) + +// VolumeSnapshotConfiguration represents the configuration for the execution of snapshot backups. +type VolumeSnapshotConfiguration struct { + // Labels are key-value pairs that will be added to .metadata.labels snapshot resources. + // +optional + Labels map[string]string `json:"labels,omitempty"` + // Annotations key-value pairs that will be added to .metadata.annotations snapshot resources. + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + // ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + // It is the default class for the other types if no specific class is present + // +optional + ClassName string `json:"className,omitempty"` + // WalClassName specifies the Snapshot Class to be used for the PG_WAL PersistentVolumeClaim. + // +optional + WalClassName string `json:"walClassName,omitempty"` + // TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + // defaults to the PGDATA Snapshot Class, if set + // +optional + TablespaceClassName map[string]string `json:"tablespaceClassName,omitempty"` + // SnapshotOwnerReference indicates the type of owner reference the snapshot should have + // +optional + // +kubebuilder:validation:Enum:=none;cluster;backup + // +kubebuilder:default:=none + SnapshotOwnerReference SnapshotOwnerReference `json:"snapshotOwnerReference,omitempty"` + + // Whether the default type of backup with volume snapshots is + // online/hot (`true`, default) or offline/cold (`false`) + // +optional + // +kubebuilder:default:=true + Online *bool `json:"online,omitempty"` + + // Configuration parameters to control the online/hot backup with volume snapshots + // +kubebuilder:default:={waitForArchive:true,immediateCheckpoint:false} + // +optional + OnlineConfiguration OnlineConfiguration `json:"onlineConfiguration,omitempty"` +} + +// OnlineConfiguration contains the configuration parameters for the online volume snapshot +type OnlineConfiguration struct { + // If false, the function will return immediately after the backup is completed, + // without waiting for WAL to be archived. + // This behavior is only useful with backup software that independently monitors WAL archiving. + // Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + // By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + // enabled. + // On a standby, this means that it will wait only when archive_mode = always. + // If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + // an immediate segment switch. + // +kubebuilder:default:=true + // +optional + WaitForArchive *bool `json:"waitForArchive,omitempty"` + + // Control whether the I/O workload for the backup initial checkpoint will + // be limited, according to the `checkpoint_completion_target` setting on + // the PostgreSQL server. If set to true, an immediate checkpoint will be + // used, meaning PostgreSQL will complete the checkpoint as soon as + // possible. `false` by default. + // +optional + ImmediateCheckpoint *bool `json:"immediateCheckpoint,omitempty"` +} + +// ImageCatalogRef defines the reference to a major version in an ImageCatalog +type ImageCatalogRef struct { + // +kubebuilder:validation:XValidation:rule="self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog'",message="Only image catalogs are supported" + // +kubebuilder:validation:XValidation:rule="self.apiGroup == 'postgresql.cnpg.io'",message="Only image catalogs are supported" + corev1.TypedLocalObjectReference `json:",inline"` + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Major is immutable" + // The major version of PostgreSQL we want to use from the ImageCatalog + Major int `json:"major"` +} + +// +kubebuilder:validation:XValidation:rule="!(has(self.imageCatalogRef) && has(self.imageName))",message="imageName and imageCatalogRef are mutually exclusive" + +// ClusterSpec defines the desired state of Cluster +type ClusterSpec struct { + // Description of this PostgreSQL cluster + // +optional + Description string `json:"description,omitempty"` + + // Metadata that will be inherited by all objects related to the Cluster + // +optional + InheritedMetadata *EmbeddedObjectMetadata `json:"inheritedMetadata,omitempty"` + + // Name of the container image, supporting both tags (`:`) + // and digests for deterministic and repeatable deployments + // (`:@sha256:`) + // +optional + ImageName string `json:"imageName,omitempty"` + + // Defines the major PostgreSQL version we want to use within an ImageCatalog + // +optional + ImageCatalogRef *ImageCatalogRef `json:"imageCatalogRef,omitempty"` + + // Image pull policy. + // One of `Always`, `Never` or `IfNotPresent`. + // If not defined, it defaults to `IfNotPresent`. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + + // If specified, the pod will be dispatched by specified Kubernetes + // scheduler. If not specified, the pod will be dispatched by the default + // scheduler. More info: + // https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + // +optional + SchedulerName string `json:"schedulerName,omitempty"` + + // The UID of the `postgres` user inside the image, defaults to `26` + // +kubebuilder:default:=26 + // +optional + PostgresUID int64 `json:"postgresUID,omitempty"` + + // The GID of the `postgres` user inside the image, defaults to `26` + // +kubebuilder:default:=26 + // +optional + PostgresGID int64 `json:"postgresGID,omitempty"` + + // Number of instances required in the cluster + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:default:=1 + Instances int `json:"instances"` + + // Minimum number of instances required in synchronous replication with the + // primary. Undefined or 0 allow writes to complete when no standby is + // available. + // +kubebuilder:default:=0 + // +kubebuilder:validation:Minimum=0 + // +optional + MinSyncReplicas int `json:"minSyncReplicas,omitempty"` + + // The target value for the synchronous replication quorum, that can be + // decreased if the number of ready standbys is lower than this. + // Undefined or 0 disable synchronous replication. + // +kubebuilder:default:=0 + // +kubebuilder:validation:Minimum=0 + // +optional + MaxSyncReplicas int `json:"maxSyncReplicas,omitempty"` + + // Configuration of the PostgreSQL server + // +optional + PostgresConfiguration PostgresConfiguration `json:"postgresql,omitempty"` + + // Replication slots management configuration + // +kubebuilder:default:={"highAvailability":{"enabled":true}} + // +optional + ReplicationSlots *ReplicationSlotsConfiguration `json:"replicationSlots,omitempty"` + + // Instructions to bootstrap this cluster + // +optional + Bootstrap *BootstrapConfiguration `json:"bootstrap,omitempty"` + + // Replica cluster configuration + // +optional + ReplicaCluster *ReplicaClusterConfiguration `json:"replica,omitempty"` + + // The secret containing the superuser password. If not defined a new + // secret will be created with a randomly generated password + // +optional + SuperuserSecret *LocalObjectReference `json:"superuserSecret,omitempty"` + + // When this option is enabled, the operator will use the `SuperuserSecret` + // to update the `postgres` user password (if the secret is + // not present, the operator will automatically create one). When this + // option is disabled, the operator will ignore the `SuperuserSecret` content, delete + // it when automatically created, and then blank the password of the `postgres` + // user by setting it to `NULL`. Disabled by default. + // +kubebuilder:default:=false + // +optional + EnableSuperuserAccess *bool `json:"enableSuperuserAccess,omitempty"` + + // The configuration for the CA and related certificates + // +optional + Certificates *CertificatesConfiguration `json:"certificates,omitempty"` + + // The list of pull secrets to be used to pull the images + // +optional + ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty"` + + // Configuration of the storage of the instances + // +optional + StorageConfiguration StorageConfiguration `json:"storage,omitempty"` + + // Configure the generation of the service account + // +optional + ServiceAccountTemplate *ServiceAccountTemplate `json:"serviceAccountTemplate,omitempty"` + + // Configuration of the storage for PostgreSQL WAL (Write-Ahead Log) + // +optional + WalStorage *StorageConfiguration `json:"walStorage,omitempty"` + + // EphemeralVolumeSource allows the user to configure the source of ephemeral volumes. + // +optional + EphemeralVolumeSource *corev1.EphemeralVolumeSource `json:"ephemeralVolumeSource,omitempty"` + + // The time in seconds that is allowed for a PostgreSQL instance to + // successfully start up (default 3600). + // The startup probe failure threshold is derived from this value using the formula: + // ceiling(startDelay / 10). + // +kubebuilder:default:=3600 + // +optional + MaxStartDelay int32 `json:"startDelay,omitempty"` + + // The time in seconds that is allowed for a PostgreSQL instance to + // gracefully shutdown (default 1800) + // +kubebuilder:default:=1800 + // +optional + MaxStopDelay int32 `json:"stopDelay,omitempty"` + + // The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + // Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + // (that is: `stopDelay` - `smartShutdownTimeout`). + // +kubebuilder:default:=180 + // +optional + SmartShutdownTimeout *int32 `json:"smartShutdownTimeout,omitempty"` + + // The time in seconds that is allowed for a primary PostgreSQL instance + // to gracefully shutdown during a switchover. + // Default value is 3600 seconds (1 hour). + // +kubebuilder:default:=3600 + // +optional + MaxSwitchoverDelay int32 `json:"switchoverDelay,omitempty"` + + // The amount of time (in seconds) to wait before triggering a failover + // after the primary PostgreSQL instance in the cluster was detected + // to be unhealthy + // +kubebuilder:default:=0 + // +optional + FailoverDelay int32 `json:"failoverDelay,omitempty"` + + // LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + // to successfully respond to the liveness probe (default 30). + // The Liveness probe failure threshold is derived from this value using the formula: + // ceiling(livenessProbe / 10). + // +optional + LivenessProbeTimeout *int32 `json:"livenessProbeTimeout,omitempty"` + + // Affinity/Anti-affinity rules for Pods + // +optional + Affinity AffinityConfiguration `json:"affinity,omitempty"` + + // TopologySpreadConstraints specifies how to spread matching pods among the given topology. + // More info: + // https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + // +optional + TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + + // Resources requirements of every generated Pod. Please refer to + // https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // for more information. + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty"` + + // EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + // volumes + EphemeralVolumesSizeLimit *EphemeralVolumesSizeLimitConfiguration `json:"ephemeralVolumesSizeLimit,omitempty"` + + // Name of the priority class which will be used in every generated Pod, if the PriorityClass + // specified does not exist, the pod will not be able to schedule. Please refer to + // https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + // for more information + // +optional + PriorityClassName string `json:"priorityClassName,omitempty"` + + // Deployment strategy to follow to upgrade the primary server during a rolling + // update procedure, after all replicas have been successfully updated: + // it can be automated (`unsupervised` - default) or manual (`supervised`) + // +kubebuilder:default:=unsupervised + // +kubebuilder:validation:Enum:=unsupervised;supervised + // +optional + PrimaryUpdateStrategy PrimaryUpdateStrategy `json:"primaryUpdateStrategy,omitempty"` + + // Method to follow to upgrade the primary server during a rolling + // update procedure, after all replicas have been successfully updated: + // it can be with a switchover (`switchover`) or in-place (`restart` - default) + // +kubebuilder:default:=restart + // +kubebuilder:validation:Enum:=switchover;restart + // +optional + PrimaryUpdateMethod PrimaryUpdateMethod `json:"primaryUpdateMethod,omitempty"` + + // The configuration to be used for backups + // +optional + Backup *BackupConfiguration `json:"backup,omitempty"` + + // Define a maintenance window for the Kubernetes nodes + // +optional + NodeMaintenanceWindow *NodeMaintenanceWindow `json:"nodeMaintenanceWindow,omitempty"` + + // The configuration of the monitoring infrastructure of this cluster + // +optional + Monitoring *MonitoringConfiguration `json:"monitoring,omitempty"` + + // The list of external clusters which are used in the configuration + // +optional + ExternalClusters []ExternalCluster `json:"externalClusters,omitempty"` + + // The instances' log level, one of the following values: error, warning, info (default), debug, trace + // +kubebuilder:default:=info + // +kubebuilder:validation:Enum:=error;warning;info;debug;trace + // +optional + LogLevel string `json:"logLevel,omitempty"` + + // Template to be used to define projected volumes, projected volumes will be mounted + // under `/projected` base folder + // +optional + ProjectedVolumeTemplate *corev1.ProjectedVolumeSource `json:"projectedVolumeTemplate,omitempty"` + + // Env follows the Env format to pass environment variables + // to the pods created in the cluster + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Env []corev1.EnvVar `json:"env,omitempty"` + + // EnvFrom follows the EnvFrom format to pass environment variables + // sources to the pods to be used by Env + // +optional + EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty"` + + // The configuration that is used by the portions of PostgreSQL that are managed by the instance manager + // +optional + Managed *ManagedConfiguration `json:"managed,omitempty"` + + // The SeccompProfile applied to every Pod and Container. + // Defaults to: `RuntimeDefault` + // +optional + SeccompProfile *corev1.SeccompProfile `json:"seccompProfile,omitempty"` + + // The tablespaces configuration + // +optional + Tablespaces []TablespaceConfiguration `json:"tablespaces,omitempty"` + + // Manage the `PodDisruptionBudget` resources within the cluster. When + // configured as `true` (default setting), the pod disruption budgets + // will safeguard the primary node from being terminated. Conversely, + // setting it to `false` will result in the absence of any + // `PodDisruptionBudget` resource, permitting the shutdown of all nodes + // hosting the PostgreSQL cluster. This latter configuration is + // advisable for any PostgreSQL cluster employed for + // development/staging purposes. + // +kubebuilder:default:=true + // +optional + EnablePDB *bool `json:"enablePDB,omitempty"` + + // The plugins configuration, containing + // any plugin to be loaded with the corresponding configuration + Plugins PluginConfigurationList `json:"plugins,omitempty"` +} + +// PluginConfigurationList represent a set of plugin with their +// configuration parameters +type PluginConfigurationList []PluginConfiguration + +const ( + // PhaseSwitchover when a cluster is changing the primary node + PhaseSwitchover = "Switchover in progress" + + // PhaseFailOver in case a pod is missing and need to change primary + PhaseFailOver = "Failing over" + + // PhaseFirstPrimary for an starting cluster + PhaseFirstPrimary = "Setting up primary" + + // PhaseCreatingReplica everytime we add a new replica + PhaseCreatingReplica = "Creating a new replica" + + // PhaseUpgrade upgrade in process + PhaseUpgrade = "Upgrading cluster" + + // PhaseUpgradeDelayed is set when a cluster need to be upgraded + // but the operation is being delayed by the operator configuration + PhaseUpgradeDelayed = "Cluster upgrade delayed" + + // PhaseWaitingForUser set the status to wait for an action from the user + PhaseWaitingForUser = "Waiting for user action" + + // PhaseInplacePrimaryRestart for a cluster restarting the primary instance in-place + PhaseInplacePrimaryRestart = "Primary instance is being restarted in-place" + + // PhaseInplaceDeletePrimaryRestart for a cluster restarting the primary instance without a switchover + PhaseInplaceDeletePrimaryRestart = "Primary instance is being restarted without a switchover" + + // PhaseHealthy for a cluster doing nothing + PhaseHealthy = "Cluster in healthy state" + + // PhaseUnknownPlugin is triggered when the required CNPG-i plugin have not been + // loaded still + PhaseUnknownPlugin = "Cluster cannot proceed to reconciliation due to an unknown plugin being required" + + // PhaseImageCatalogError is triggered when the cluster cannot select the image to + // apply because of an invalid or incomplete catalog + PhaseImageCatalogError = "Cluster has incomplete or invalid image catalog" + + // PhaseUnrecoverable for an unrecoverable cluster + PhaseUnrecoverable = "Cluster is in an unrecoverable state, needs manual intervention" + + // PhaseArchitectureBinaryMissing is the error phase describing a missing architecture + PhaseArchitectureBinaryMissing = "Cluster cannot execute instance online upgrade due to missing architecture binary" + + // PhaseWaitingForInstancesToBeActive is a waiting phase that is triggered when an instance pod is not active + PhaseWaitingForInstancesToBeActive = "Waiting for the instances to become active" + + // PhaseOnlineUpgrading for when the instance manager is being upgraded in place + PhaseOnlineUpgrading = "Online upgrade in progress" + + // PhaseApplyingConfiguration is set by the instance manager when a configuration + // change is being detected + PhaseApplyingConfiguration = "Applying configuration" + + // PhaseReplicaClusterPromotion is the phase + PhaseReplicaClusterPromotion = "Promoting to primary cluster" + + // PhaseCannotCreateClusterObjects is set by the operator when is unable to create cluster resources + PhaseCannotCreateClusterObjects = "Unable to create required cluster objects" +) + +// EphemeralVolumesSizeLimitConfiguration contains the configuration of the ephemeral +// storage +type EphemeralVolumesSizeLimitConfiguration struct { + // Shm is the size limit of the shared memory volume + Shm *resource.Quantity `json:"shm,omitempty"` + + // TemporaryData is the size limit of the temporary data volume + TemporaryData *resource.Quantity `json:"temporaryData,omitempty"` +} + +// ServiceAccountTemplate contains the template needed to generate the service accounts +type ServiceAccountTemplate struct { + // Metadata are the metadata to be used for the generated + // service account + Metadata Metadata `json:"metadata"` +} + +// PodTopologyLabels represent the topology of a Pod. map[labelName]labelValue +type PodTopologyLabels map[string]string + +// PodName is the name of a Pod +type PodName string + +// Topology contains the cluster topology +type Topology struct { + // Instances contains the pod topology of the instances + // +optional + Instances map[PodName]PodTopologyLabels `json:"instances,omitempty"` + + // NodesUsed represents the count of distinct nodes accommodating the instances. + // A value of '1' suggests that all instances are hosted on a single node, + // implying the absence of High Availability (HA). Ideally, this value should + // be the same as the number of instances in the Postgres HA cluster, implying + // shared nothing architecture on the compute side. + // +optional + NodesUsed int32 `json:"nodesUsed,omitempty"` + + // SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + // in synchronous replica election in case of failures + // +optional + SuccessfullyExtracted bool `json:"successfullyExtracted,omitempty"` +} + +// RoleStatus represents the status of a managed role in the cluster +type RoleStatus string + +const ( + // RoleStatusReconciled indicates the role in DB matches the Spec + RoleStatusReconciled RoleStatus = "reconciled" + // RoleStatusNotManaged indicates the role is not in the Spec, therefore not managed + RoleStatusNotManaged RoleStatus = "not-managed" + // RoleStatusPendingReconciliation indicates the role in Spec requires updated/creation in DB + RoleStatusPendingReconciliation RoleStatus = "pending-reconciliation" + // RoleStatusReserved indicates this is one of the roles reserved by the operator. E.g. `postgres` + RoleStatusReserved RoleStatus = "reserved" +) + +// PasswordState represents the state of the password of a managed RoleConfiguration +type PasswordState struct { + // the last transaction ID to affect the role definition in PostgreSQL + // +optional + TransactionID int64 `json:"transactionID,omitempty"` + // the resource version of the password secret + // +optional + SecretResourceVersion string `json:"resourceVersion,omitempty"` +} + +// ManagedRoles tracks the status of a cluster's managed roles +type ManagedRoles struct { + // ByStatus gives the list of roles in each state + // +optional + ByStatus map[RoleStatus][]string `json:"byStatus,omitempty"` + + // CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + // with an explanation of the cause + // +optional + CannotReconcile map[string][]string `json:"cannotReconcile,omitempty"` + + // PasswordStatus gives the last transaction id and password secret version for each managed role + // +optional + PasswordStatus map[string]PasswordState `json:"passwordStatus,omitempty"` +} + +// TablespaceState represents the state of a tablespace in a cluster +type TablespaceState struct { + // Name is the name of the tablespace + Name string `json:"name"` + + // Owner is the PostgreSQL user owning the tablespace + // +optional + Owner string `json:"owner,omitempty"` + + // State is the latest reconciliation state + State TablespaceStatus `json:"state"` + + // Error is the reconciliation error, if any + // +optional + Error string `json:"error,omitempty"` +} + +// TablespaceStatus represents the status of a tablespace in the cluster +type TablespaceStatus string + +const ( + // TablespaceStatusReconciled indicates the tablespace in DB matches the Spec + TablespaceStatusReconciled TablespaceStatus = "reconciled" + + // TablespaceStatusPendingReconciliation indicates the tablespace in Spec requires creation in the DB + TablespaceStatusPendingReconciliation TablespaceStatus = "pending" +) + +// AvailableArchitecture represents the state of a cluster's architecture +type AvailableArchitecture struct { + // GoArch is the name of the executable architecture + GoArch string `json:"goArch"` + + // Hash is the hash of the executable + Hash string `json:"hash"` +} + +// ClusterStatus defines the observed state of Cluster +type ClusterStatus struct { + // The total number of PVC Groups detected in the cluster. It may differ from the number of existing instance pods. + // +optional + Instances int `json:"instances,omitempty"` + + // The total number of ready instances in the cluster. It is equal to the number of ready instance pods. + // +optional + ReadyInstances int `json:"readyInstances,omitempty"` + + // InstancesStatus indicates in which status the instances are + // +optional + InstancesStatus map[PodStatus][]string `json:"instancesStatus,omitempty"` + + // The reported state of the instances during the last reconciliation loop + // +optional + InstancesReportedState map[PodName]InstanceReportedState `json:"instancesReportedState,omitempty"` + + // ManagedRolesStatus reports the state of the managed roles in the cluster + // +optional + ManagedRolesStatus ManagedRoles `json:"managedRolesStatus,omitempty"` + + // TablespacesStatus reports the state of the declarative tablespaces in the cluster + // +optional + TablespacesStatus []TablespaceState `json:"tablespacesStatus,omitempty"` + + // The timeline of the Postgres cluster + // +optional + TimelineID int `json:"timelineID,omitempty"` + + // Instances topology. + // +optional + Topology Topology `json:"topology,omitempty"` + + // ID of the latest generated node (used to avoid node name clashing) + // +optional + LatestGeneratedNode int `json:"latestGeneratedNode,omitempty"` + + // Current primary instance + // +optional + CurrentPrimary string `json:"currentPrimary,omitempty"` + + // Target primary instance, this is different from the previous one + // during a switchover or a failover + // +optional + TargetPrimary string `json:"targetPrimary,omitempty"` + + // LastPromotionToken is the last verified promotion token that + // was used to promote a replica cluster + LastPromotionToken string `json:"lastPromotionToken,omitempty"` + + // How many PVCs have been created by this cluster + // +optional + PVCCount int32 `json:"pvcCount,omitempty"` + + // How many Jobs have been created by this cluster + // +optional + JobCount int32 `json:"jobCount,omitempty"` + + // List of all the PVCs created by this cluster and still available + // which are not attached to a Pod + // +optional + DanglingPVC []string `json:"danglingPVC,omitempty"` + + // List of all the PVCs that have ResizingPVC condition. + // +optional + ResizingPVC []string `json:"resizingPVC,omitempty"` + + // List of all the PVCs that are being initialized by this cluster + // +optional + InitializingPVC []string `json:"initializingPVC,omitempty"` + + // List of all the PVCs not dangling nor initializing + // +optional + HealthyPVC []string `json:"healthyPVC,omitempty"` + + // List of all the PVCs that are unusable because another PVC is missing + // +optional + UnusablePVC []string `json:"unusablePVC,omitempty"` + + // Current write pod + // +optional + WriteService string `json:"writeService,omitempty"` + + // Current list of read pods + // +optional + ReadService string `json:"readService,omitempty"` + + // Current phase of the cluster + // +optional + Phase string `json:"phase,omitempty"` + + // Reason for the current phase + // +optional + PhaseReason string `json:"phaseReason,omitempty"` + + // The list of resource versions of the secrets + // managed by the operator. Every change here is done in the + // interest of the instance manager, which will refresh the + // secret data + // +optional + SecretsResourceVersion SecretsResourceVersion `json:"secretsResourceVersion,omitempty"` + + // The list of resource versions of the configmaps, + // managed by the operator. Every change here is done in the + // interest of the instance manager, which will refresh the + // configmap data + // +optional + ConfigMapResourceVersion ConfigMapResourceVersion `json:"configMapResourceVersion,omitempty"` + + // The configuration for the CA and related certificates, initialized with defaults. + // +optional + Certificates CertificatesStatus `json:"certificates,omitempty"` + + // The first recoverability point, stored as a date in RFC3339 format. + // This field is calculated from the content of FirstRecoverabilityPointByMethod + // +optional + FirstRecoverabilityPoint string `json:"firstRecoverabilityPoint,omitempty"` + + // The first recoverability point, stored as a date in RFC3339 format, per backup method type + // +optional + FirstRecoverabilityPointByMethod map[BackupMethod]metav1.Time `json:"firstRecoverabilityPointByMethod,omitempty"` + + // Last successful backup, stored as a date in RFC3339 format + // This field is calculated from the content of LastSuccessfulBackupByMethod + // +optional + LastSuccessfulBackup string `json:"lastSuccessfulBackup,omitempty"` + + // Last successful backup, stored as a date in RFC3339 format, per backup method type + // +optional + LastSuccessfulBackupByMethod map[BackupMethod]metav1.Time `json:"lastSuccessfulBackupByMethod,omitempty"` + + // Stored as a date in RFC3339 format + // +optional + LastFailedBackup string `json:"lastFailedBackup,omitempty"` + + // The commit hash number of which this operator running + // +optional + CommitHash string `json:"cloudNativePGCommitHash,omitempty"` + + // The timestamp when the last actual promotion to primary has occurred + // +optional + CurrentPrimaryTimestamp string `json:"currentPrimaryTimestamp,omitempty"` + + // The timestamp when the primary was detected to be unhealthy + // This field is reported when `.spec.failoverDelay` is populated or during online upgrades + // +optional + CurrentPrimaryFailingSinceTimestamp string `json:"currentPrimaryFailingSinceTimestamp,omitempty"` + + // The timestamp when the last request for a new primary has occurred + // +optional + TargetPrimaryTimestamp string `json:"targetPrimaryTimestamp,omitempty"` + + // The integration needed by poolers referencing the cluster + // +optional + PoolerIntegrations *PoolerIntegrations `json:"poolerIntegrations,omitempty"` + + // The hash of the binary of the operator + // +optional + OperatorHash string `json:"cloudNativePGOperatorHash,omitempty"` + + // AvailableArchitectures reports the available architectures of a cluster + // +optional + AvailableArchitectures []AvailableArchitecture `json:"availableArchitectures,omitempty"` + + // Conditions for cluster object + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // List of instance names in the cluster + // +optional + InstanceNames []string `json:"instanceNames,omitempty"` + + // OnlineUpdateEnabled shows if the online upgrade is enabled inside the cluster + // +optional + OnlineUpdateEnabled bool `json:"onlineUpdateEnabled,omitempty"` + + // AzurePVCUpdateEnabled shows if the PVC online upgrade is enabled for this cluster + // +optional + AzurePVCUpdateEnabled bool `json:"azurePVCUpdateEnabled,omitempty"` + + // Image contains the image name used by the pods + // +optional + Image string `json:"image,omitempty"` + + // PluginStatus is the status of the loaded plugins + PluginStatus []PluginStatus `json:"pluginStatus,omitempty"` + + // SwitchReplicaClusterStatus is the status of the switch to replica cluster + // +optional + SwitchReplicaClusterStatus SwitchReplicaClusterStatus `json:"switchReplicaClusterStatus,omitempty"` + + // DemotionToken is a JSON token containing the information + // from pg_controldata such as Database system identifier, Latest checkpoint's + // TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + // WAL file, and Time of latest checkpoint + // +optional + DemotionToken string `json:"demotionToken,omitempty"` +} + +// SwitchReplicaClusterStatus contains all the statuses regarding the switch of a cluster to a replica cluster +type SwitchReplicaClusterStatus struct { + // InProgress indicates if there is an ongoing procedure of switching a cluster to a replica cluster. + // +optional + InProgress bool `json:"inProgress,omitempty"` +} + +// InstanceReportedState describes the last reported state of an instance during a reconciliation loop +type InstanceReportedState struct { + // indicates if an instance is the primary one + IsPrimary bool `json:"isPrimary"` + // indicates on which TimelineId the instance is + // +optional + TimeLineID int `json:"timeLineID,omitempty"` +} + +// ClusterConditionType defines types of cluster conditions +type ClusterConditionType string + +// These are valid conditions of a Cluster, some of the conditions could be owned by +// Instance Manager and some of them could be owned by reconciler. +const ( + // ConditionContinuousArchiving represents whether WAL archiving is working + ConditionContinuousArchiving ClusterConditionType = "ContinuousArchiving" + // ConditionBackup represents the last backup's status + ConditionBackup ClusterConditionType = "LastBackupSucceeded" + // ConditionClusterReady represents whether a cluster is Ready + ConditionClusterReady ClusterConditionType = "Ready" +) + +// ConditionStatus defines conditions of resources +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition; +// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// ConditionReason defines the reason why a certain +// condition changed +type ConditionReason string + +const ( + // ConditionBackupStarted means that the condition changed because the debug + // started + ConditionBackupStarted ConditionReason = "BackupStarted" + + // ConditionReasonLastBackupSucceeded means that the condition changed because the last backup + // has been taken successfully + ConditionReasonLastBackupSucceeded ConditionReason = "LastBackupSucceeded" + + // ConditionReasonLastBackupFailed means that the condition changed because the last backup + // failed + ConditionReasonLastBackupFailed ConditionReason = "LastBackupFailed" + + // ConditionReasonContinuousArchivingSuccess means that the condition changed because the + // WAL archiving was working correctly + ConditionReasonContinuousArchivingSuccess ConditionReason = "ContinuousArchivingSuccess" + + // ConditionReasonContinuousArchivingFailing means that the condition has changed because + // the WAL archiving is not working correctly + ConditionReasonContinuousArchivingFailing ConditionReason = "ContinuousArchivingFailing" + + // ClusterReady means that the condition changed because the cluster is ready and working properly + ClusterReady ConditionReason = "ClusterIsReady" + + // ClusterIsNotReady means that the condition changed because the cluster is not ready + ClusterIsNotReady ConditionReason = "ClusterIsNotReady" + + // DetachedVolume is the reason that is set when we do a rolling upgrade to add a PVC volume to a cluster + DetachedVolume ConditionReason = "DetachedVolume" +) + +// EmbeddedObjectMetadata contains metadata to be inherited by all resources related to a Cluster +type EmbeddedObjectMetadata struct { + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // +optional + Annotations map[string]string `json:"annotations,omitempty"` +} + +// PoolerIntegrations encapsulates the needed integration for the poolers referencing the cluster +type PoolerIntegrations struct { + // +optional + PgBouncerIntegration PgBouncerIntegrationStatus `json:"pgBouncerIntegration,omitempty"` +} + +// PgBouncerIntegrationStatus encapsulates the needed integration for the pgbouncer poolers referencing the cluster +type PgBouncerIntegrationStatus struct { + // +optional + Secrets []string `json:"secrets,omitempty"` +} + +// ReplicaClusterConfiguration encapsulates the configuration of a replica +// cluster +type ReplicaClusterConfiguration struct { + // Self defines the name of this cluster. It is used to determine if this is a primary + // or a replica cluster, comparing it with `primary` + Self string `json:"self,omitempty"` + + // Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + // topology specified in externalClusters + Primary string `json:"primary,omitempty"` + + // The name of the external cluster which is the replication origin + // +kubebuilder:validation:MinLength=1 + Source string `json:"source"` + + // If replica mode is enabled, this cluster will be a replica of an + // existing cluster. Replica cluster can be created from a recovery + // object store or via streaming through pg_basebackup. + // Refer to the Replica clusters page of the documentation for more information. + Enabled *bool `json:"enabled,omitempty"` + + // A demotion token generated by an external cluster used to + // check if the promotion requirements are met. + PromotionToken string `json:"promotionToken,omitempty"` + + // When replica mode is enabled, this parameter allows you to replay + // transactions only when the system time is at least the configured + // time past the commit time. This provides an opportunity to correct + // data loss errors. Note that when this parameter is set, a promotion + // token cannot be used. + MinApplyDelay *metav1.Duration `json:"minApplyDelay,omitempty"` +} + +// DefaultReplicationSlotsUpdateInterval is the default in seconds for the replication slots update interval +const DefaultReplicationSlotsUpdateInterval = 30 + +// DefaultReplicationSlotsHASlotPrefix is the default prefix for names of replication slots used for HA. +const DefaultReplicationSlotsHASlotPrefix = "_cnpg_" + +// SynchronizeReplicasConfiguration contains the configuration for the synchronization of user defined +// physical replication slots +type SynchronizeReplicasConfiguration struct { + // When set to true, every replication slot that is on the primary is synchronized on each standby + // +kubebuilder:default:=true + Enabled *bool `json:"enabled"` + + // List of regular expression patterns to match the names of replication slots to be excluded (by default empty) + // +optional + ExcludePatterns []string `json:"excludePatterns,omitempty"` + + synchronizeReplicasCache `json:"-"` +} + +// synchronizeReplicasCache contains the result of the regex compilation +// +kubebuilder:object:generate:=false +type synchronizeReplicasCache struct { + compiledPatterns []regexp.Regexp `json:"-"` + + compiled bool `json:"-"` + + compileErrors []error `json:"-"` +} + +// ReplicationSlotsConfiguration encapsulates the configuration +// of replication slots +type ReplicationSlotsConfiguration struct { + // Replication slots for high availability configuration + // +kubebuilder:default:={"enabled": true} + // +optional + HighAvailability *ReplicationSlotsHAConfiguration `json:"highAvailability,omitempty"` + + // Standby will update the status of the local replication slots + // every `updateInterval` seconds (default 30). + // +kubebuilder:default:=30 + // +kubebuilder:validation:Minimum=1 + // +optional + UpdateInterval int `json:"updateInterval,omitempty"` + + // Configures the synchronization of the user defined physical replication slots + // +optional + SynchronizeReplicas *SynchronizeReplicasConfiguration `json:"synchronizeReplicas,omitempty"` +} + +// ReplicationSlotsHAConfiguration encapsulates the configuration +// of the replication slots that are automatically managed by +// the operator to control the streaming replication connections +// with the standby instances for high availability (HA) purposes. +// Replication slots are a PostgreSQL feature that makes sure +// that PostgreSQL automatically keeps WAL files in the primary +// when a streaming client (in this specific case a replica that +// is part of the HA cluster) gets disconnected. +type ReplicationSlotsHAConfiguration struct { + // If enabled (default), the operator will automatically manage replication slots + // on the primary instance and use them in streaming replication + // connections with all the standby instances that are part of the HA + // cluster. If disabled, the operator will not take advantage + // of replication slots in streaming connections with the replicas. + // This feature also controls replication slots in replica cluster, + // from the designated primary to its cascading replicas. + // +optional + // +kubebuilder:default:=true + Enabled *bool `json:"enabled,omitempty"` + + // Prefix for replication slots managed by the operator for HA. + // It may only contain lower case letters, numbers, and the underscore character. + // This can only be set at creation time. By default set to `_cnpg_`. + // +kubebuilder:default:=_cnpg_ + // +kubebuilder:validation:Pattern=^[0-9a-z_]*$ + // +optional + SlotPrefix string `json:"slotPrefix,omitempty"` +} + +// KubernetesUpgradeStrategy tells the operator if the user want to +// allocate more space while upgrading a k8s node which is hosting +// the PostgreSQL Pods or just wait for the node to come up +type KubernetesUpgradeStrategy string + +const ( + // KubernetesUpgradeStrategyAllocateSpace means that the operator + // should allocate more disk space to host data belonging to the + // k8s node that is being updated + KubernetesUpgradeStrategyAllocateSpace = "allocateSpace" + + // KubernetesUpgradeStrategyWaitForNode means that the operator + // should just recreate stuff and wait for the upgraded node + // to be ready + KubernetesUpgradeStrategyWaitForNode = "waitForNode" +) + +// NodeMaintenanceWindow contains information that the operator +// will use while upgrading the underlying node. +// +// This option is only useful when the chosen storage prevents the Pods +// from being freely moved across nodes. +type NodeMaintenanceWindow struct { + // Reuse the existing PVC (wait for the node to come + // up again) or not (recreate it elsewhere - when `instances` >1) + // +optional + // +kubebuilder:default:=true + ReusePVC *bool `json:"reusePVC,omitempty"` + + // Is there a node maintenance activity in progress? + // +optional + // +kubebuilder:default:=false + InProgress bool `json:"inProgress,omitempty"` +} + +// PrimaryUpdateStrategy contains the strategy to follow when upgrading +// the primary server of the cluster as part of rolling updates +type PrimaryUpdateStrategy string + +// PrimaryUpdateMethod contains the method to use when upgrading +// the primary server of the cluster as part of rolling updates +type PrimaryUpdateMethod string + +const ( + // PrimaryUpdateStrategySupervised means that the operator need to wait for the + // user to manually issue a switchover request before updating the primary + // server (`supervised`) + PrimaryUpdateStrategySupervised PrimaryUpdateStrategy = "supervised" + + // PrimaryUpdateStrategyUnsupervised means that the operator will proceed with the + // selected PrimaryUpdateMethod to another updated replica and then automatically update + // the primary server (`unsupervised`, default) + PrimaryUpdateStrategyUnsupervised PrimaryUpdateStrategy = "unsupervised" + + // PrimaryUpdateMethodSwitchover means that the operator will switchover to another updated + // replica when it needs to upgrade the primary instance + PrimaryUpdateMethodSwitchover PrimaryUpdateMethod = "switchover" + + // PrimaryUpdateMethodRestart means that the operator will restart the primary instance in-place + // when it needs to upgrade it + PrimaryUpdateMethodRestart PrimaryUpdateMethod = "restart" + + // DefaultPgCtlTimeoutForPromotion is the default for the pg_ctl timeout when a promotion is performed. + // It is greater than one year in seconds, big enough to simulate an infinite timeout + DefaultPgCtlTimeoutForPromotion = 40000000 + + // DefaultMaxSwitchoverDelay is the default for the pg_ctl timeout in seconds when a primary PostgreSQL instance + // is gracefully shutdown during a switchover. + DefaultMaxSwitchoverDelay = 3600 + + // DefaultStartupDelay is the default value for startupDelay, startupDelay will be used to calculate the + // FailureThreshold of startupProbe, the formula is `FailureThreshold = ceiling(startDelay / periodSeconds)`, + // the minimum value is 1 + DefaultStartupDelay = 3600 +) + +// SynchronousReplicaConfigurationMethod configures whether to use +// quorum based replication or a priority list +type SynchronousReplicaConfigurationMethod string + +const ( + // SynchronousReplicaConfigurationMethodFirst means a priority list should be used + SynchronousReplicaConfigurationMethodFirst = SynchronousReplicaConfigurationMethod("first") + + // SynchronousReplicaConfigurationMethodAny means that quorum based replication should be used + SynchronousReplicaConfigurationMethodAny = SynchronousReplicaConfigurationMethod("any") +) + +// SynchronousReplicaConfiguration contains the configuration of the +// PostgreSQL synchronous replication feature. +// Important: at this moment, also `.spec.minSyncReplicas` and `.spec.maxSyncReplicas` +// need to be considered. +type SynchronousReplicaConfiguration struct { + // Method to select synchronous replication standbys from the listed + // servers, accepting 'any' (quorum-based synchronous replication) or + // 'first' (priority-based synchronous replication) as values. + // +kubebuilder:validation:Enum=any;first + Method SynchronousReplicaConfigurationMethod `json:"method"` + + // Specifies the number of synchronous standby servers that + // transactions must wait for responses from. + // +kubebuilder:validation:XValidation:rule="self > 0",message="The number of synchronous replicas should be greater than zero" + Number int `json:"number"` + + // Specifies the maximum number of local cluster pods that can be + // automatically included in the `synchronous_standby_names` option in + // PostgreSQL. + // +optional + MaxStandbyNamesFromCluster *int `json:"maxStandbyNamesFromCluster,omitempty"` + + // A user-defined list of application names to be added to + // `synchronous_standby_names` before local cluster pods (the order is + // only useful for priority-based synchronous replication). + // +optional + StandbyNamesPre []string `json:"standbyNamesPre,omitempty"` + + // A user-defined list of application names to be added to + // `synchronous_standby_names` after local cluster pods (the order is + // only useful for priority-based synchronous replication). + // +optional + StandbyNamesPost []string `json:"standbyNamesPost,omitempty"` +} + +// PostgresConfiguration defines the PostgreSQL configuration +type PostgresConfiguration struct { + // PostgreSQL configuration options (postgresql.conf) + // +optional + Parameters map[string]string `json:"parameters,omitempty"` + + // Configuration of the PostgreSQL synchronous replication feature + // +optional + Synchronous *SynchronousReplicaConfiguration `json:"synchronous,omitempty"` + + // PostgreSQL Host Based Authentication rules (lines to be appended + // to the pg_hba.conf file) + // +optional + PgHBA []string `json:"pg_hba,omitempty"` + + // PostgreSQL User Name Maps rules (lines to be appended + // to the pg_ident.conf file) + // +optional + PgIdent []string `json:"pg_ident,omitempty"` + + // Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + // set up. + // +optional + SyncReplicaElectionConstraint SyncReplicaElectionConstraints `json:"syncReplicaElectionConstraint,omitempty"` + + // Lists of shared preload libraries to add to the default ones + // +optional + AdditionalLibraries []string `json:"shared_preload_libraries,omitempty"` + + // Options to specify LDAP configuration + // +optional + LDAP *LDAPConfig `json:"ldap,omitempty"` + + // Specifies the maximum number of seconds to wait when promoting an instance to primary. + // Default value is 40000000, greater than one year in seconds, + // big enough to simulate an infinite timeout + // +optional + PgCtlTimeoutForPromotion int32 `json:"promotionTimeout,omitempty"` + + // If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + // on this CloudNativePG Cluster. + // This should only be used for debugging and troubleshooting. + // Defaults to false. + // +optional + EnableAlterSystem bool `json:"enableAlterSystem,omitempty"` +} + +// BootstrapConfiguration contains information about how to create the PostgreSQL +// cluster. Only a single bootstrap method can be defined among the supported +// ones. `initdb` will be used as the bootstrap method if left +// unspecified. Refer to the Bootstrap page of the documentation for more +// information. +type BootstrapConfiguration struct { + // Bootstrap the cluster via initdb + // +optional + InitDB *BootstrapInitDB `json:"initdb,omitempty"` + + // Bootstrap the cluster from a backup + // +optional + Recovery *BootstrapRecovery `json:"recovery,omitempty"` + + // Bootstrap the cluster taking a physical backup of another compatible + // PostgreSQL instance + // +optional + PgBaseBackup *BootstrapPgBaseBackup `json:"pg_basebackup,omitempty"` +} + +// LDAPScheme defines the possible schemes for LDAP +type LDAPScheme string + +// These are the valid LDAP schemes +const ( + LDAPSchemeLDAP LDAPScheme = "ldap" + LDAPSchemeLDAPS LDAPScheme = "ldaps" +) + +// LDAPConfig contains the parameters needed for LDAP authentication +type LDAPConfig struct { + // LDAP hostname or IP address + // +optional + Server string `json:"server,omitempty"` + // LDAP server port + // +optional + Port int `json:"port,omitempty"` + + // LDAP schema to be used, possible options are `ldap` and `ldaps` + // +kubebuilder:validation:Enum=ldap;ldaps + // +optional + Scheme LDAPScheme `json:"scheme,omitempty"` + + // Bind as authentication configuration + // +optional + BindAsAuth *LDAPBindAsAuth `json:"bindAsAuth,omitempty"` + + // Bind+Search authentication configuration + // +optional + BindSearchAuth *LDAPBindSearchAuth `json:"bindSearchAuth,omitempty"` + + // Set to 'true' to enable LDAP over TLS. 'false' is default + // +optional + TLS bool `json:"tls,omitempty"` +} + +// LDAPBindAsAuth provides the required fields to use the +// bind authentication for LDAP +type LDAPBindAsAuth struct { + // Prefix for the bind authentication option + // +optional + Prefix string `json:"prefix,omitempty"` + // Suffix for the bind authentication option + // +optional + Suffix string `json:"suffix,omitempty"` +} + +// LDAPBindSearchAuth provides the required fields to use +// the bind+search LDAP authentication process +type LDAPBindSearchAuth struct { + // Root DN to begin the user search + // +optional + BaseDN string `json:"baseDN,omitempty"` + // DN of the user to bind to the directory + // +optional + BindDN string `json:"bindDN,omitempty"` + // Secret with the password for the user to bind to the directory + // +optional + BindPassword *corev1.SecretKeySelector `json:"bindPassword,omitempty"` + + // Attribute to match against the username + // +optional + SearchAttribute string `json:"searchAttribute,omitempty"` + // Search filter to use when doing the search+bind authentication + // +optional + SearchFilter string `json:"searchFilter,omitempty"` +} + +// CertificatesConfiguration contains the needed configurations to handle server certificates. +type CertificatesConfiguration struct { + // The secret containing the Server CA certificate. If not defined, a new secret will be created + // with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+ //
+ // Contains:
+ //
+ // - `ca.crt`: CA that should be used to validate the server certificate, + // used as `sslrootcert` in client connection strings.
+ // - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + // this can be omitted.
+ // +optional + ServerCASecret string `json:"serverCASecret,omitempty"` + + // The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + // `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + // If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + // created using the provided CA. + // +optional + ServerTLSSecret string `json:"serverTLSSecret,omitempty"` + + // The secret of type kubernetes.io/tls containing the client certificate to authenticate as + // the `streaming_replica` user. + // If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + // created using the provided CA. + // +optional + ReplicationTLSSecret string `json:"replicationTLSSecret,omitempty"` + + // The secret containing the Client CA certificate. If not defined, a new secret will be created + // with a self-signed CA and will be used to generate all the client certificates.
+ //
+ // Contains:
+ //
+ // - `ca.crt`: CA that should be used to validate the client certificates, + // used as `ssl_ca_file` of all the instances.
+ // - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + // this can be omitted.
+ // +optional + ClientCASecret string `json:"clientCASecret,omitempty"` + + // The list of the server alternative DNS names to be added to the generated server TLS certificates, when required. + // +optional + ServerAltDNSNames []string `json:"serverAltDNSNames,omitempty"` +} + +// CertificatesStatus contains configuration certificates and related expiration dates. +type CertificatesStatus struct { + // Needed configurations to handle server certificates, initialized with default values, if needed. + CertificatesConfiguration `json:",inline"` + + // Expiration dates for all certificates. + // +optional + Expirations map[string]string `json:"expirations,omitempty"` +} + +// BootstrapInitDB is the configuration of the bootstrap process when +// initdb is used +// Refer to the Bootstrap page of the documentation for more information. +type BootstrapInitDB struct { + // Name of the database used by the application. Default: `app`. + // +optional + Database string `json:"database,omitempty"` + + // Name of the owner of the database in the instance to be used + // by applications. Defaults to the value of the `database` key. + // +optional + Owner string `json:"owner,omitempty"` + + // Name of the secret containing the initial credentials for the + // owner of the user database. If empty a new secret will be + // created from scratch + // +optional + Secret *LocalObjectReference `json:"secret,omitempty"` + + // The list of options that must be passed to initdb when creating the cluster. + // Deprecated: This could lead to inconsistent configurations, + // please use the explicit provided parameters instead. + // If defined, explicit values will be ignored. + // +optional + Options []string `json:"options,omitempty"` + + // Whether the `-k` option should be passed to initdb, + // enabling checksums on data pages (default: `false`) + // +optional + DataChecksums *bool `json:"dataChecksums,omitempty"` + + // The value to be passed as option `--encoding` for initdb (default:`UTF8`) + // +optional + Encoding string `json:"encoding,omitempty"` + + // The value to be passed as option `--lc-collate` for initdb (default:`C`) + // +optional + LocaleCollate string `json:"localeCollate,omitempty"` + + // The value to be passed as option `--lc-ctype` for initdb (default:`C`) + // +optional + LocaleCType string `json:"localeCType,omitempty"` + + // The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + // option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=1024 + // +optional + WalSegmentSize int `json:"walSegmentSize,omitempty"` + + // List of SQL queries to be executed as a superuser in the `postgres` + // database right after the cluster has been created - to be used with extreme care + // (by default empty) + // +optional + PostInitSQL []string `json:"postInitSQL,omitempty"` + + // List of SQL queries to be executed as a superuser in the application + // database right after the cluster has been created - to be used with extreme care + // (by default empty) + // +optional + PostInitApplicationSQL []string `json:"postInitApplicationSQL,omitempty"` + + // List of SQL queries to be executed as a superuser in the `template1` + // database right after the cluster has been created - to be used with extreme care + // (by default empty) + // +optional + PostInitTemplateSQL []string `json:"postInitTemplateSQL,omitempty"` + + // Bootstraps the new cluster by importing data from an existing PostgreSQL + // instance using logical backup (`pg_dump` and `pg_restore`) + // +optional + Import *Import `json:"import,omitempty"` + + // List of references to ConfigMaps or Secrets containing SQL files + // to be executed as a superuser in the application database right after + // the cluster has been created. The references are processed in a specific order: + // first, all Secrets are processed, followed by all ConfigMaps. + // Within each group, the processing order follows the sequence specified + // in their respective arrays. + // (by default empty) + // +optional + PostInitApplicationSQLRefs *SQLRefs `json:"postInitApplicationSQLRefs,omitempty"` + + // List of references to ConfigMaps or Secrets containing SQL files + // to be executed as a superuser in the `template1` database right after + // the cluster has been created. The references are processed in a specific order: + // first, all Secrets are processed, followed by all ConfigMaps. + // Within each group, the processing order follows the sequence specified + // in their respective arrays. + // (by default empty) + // +optional + PostInitTemplateSQLRefs *SQLRefs `json:"postInitTemplateSQLRefs,omitempty"` + + // List of references to ConfigMaps or Secrets containing SQL files + // to be executed as a superuser in the `postgres` database right after + // the cluster has been created. The references are processed in a specific order: + // first, all Secrets are processed, followed by all ConfigMaps. + // Within each group, the processing order follows the sequence specified + // in their respective arrays. + // (by default empty) + // +optional + PostInitSQLRefs *SQLRefs `json:"postInitSQLRefs,omitempty"` +} + +// SnapshotType is a type of allowed import +type SnapshotType string + +const ( + // MonolithSnapshotType indicates to execute the monolith clone typology + MonolithSnapshotType SnapshotType = "monolith" + + // MicroserviceSnapshotType indicates to execute the microservice clone typology + MicroserviceSnapshotType SnapshotType = "microservice" +) + +// Import contains the configuration to init a database from a logic snapshot of an externalCluster +type Import struct { + // The source of the import + Source ImportSource `json:"source"` + + // The import type. Can be `microservice` or `monolith`. + // +kubebuilder:validation:Enum=microservice;monolith + Type SnapshotType `json:"type"` + + // The databases to import + Databases []string `json:"databases"` + + // The roles to import + // +optional + Roles []string `json:"roles,omitempty"` + + // List of SQL queries to be executed as a superuser in the application + // database right after is imported - to be used with extreme care + // (by default empty). Only available in microservice type. + // +optional + PostImportApplicationSQL []string `json:"postImportApplicationSQL,omitempty"` + + // When set to true, only the `pre-data` and `post-data` sections of + // `pg_restore` are invoked, avoiding data import. Default: `false`. + // +optional + SchemaOnly bool `json:"schemaOnly,omitempty"` +} + +// ImportSource describes the source for the logical snapshot +type ImportSource struct { + // The name of the externalCluster used for import + ExternalCluster string `json:"externalCluster"` +} + +// SQLRefs holds references to ConfigMaps or Secrets +// containing SQL files. The references are processed in a specific order: +// first, all Secrets are processed, followed by all ConfigMaps. +// Within each group, the processing order follows the sequence specified +// in their respective arrays. +type SQLRefs struct { + // SecretRefs holds a list of references to Secrets + // +optional + SecretRefs []SecretKeySelector `json:"secretRefs,omitempty"` + + // ConfigMapRefs holds a list of references to ConfigMaps + // +optional + ConfigMapRefs []ConfigMapKeySelector `json:"configMapRefs,omitempty"` +} + +// BootstrapRecovery contains the configuration required to restore +// from an existing cluster using 3 methodologies: external cluster, +// volume snapshots or backup objects. Full recovery and Point-In-Time +// Recovery are supported. +// The method can be also be used to create clusters in continuous recovery +// (replica clusters), also supporting cascading replication when `instances` > +// 1. Once the cluster exits recovery, the password for the superuser +// will be changed through the provided secret. +// Refer to the Bootstrap page of the documentation for more information. +type BootstrapRecovery struct { + // The backup object containing the physical base backup from which to + // initiate the recovery procedure. + // Mutually exclusive with `source` and `volumeSnapshots`. + // +optional + Backup *BackupSource `json:"backup,omitempty"` + + // The external cluster whose backup we will restore. This is also + // used as the name of the folder under which the backup is stored, + // so it must be set to the name of the source cluster + // Mutually exclusive with `backup`. + // +optional + Source string `json:"source,omitempty"` + + // The static PVC data source(s) from which to initiate the + // recovery procedure. Currently supporting `VolumeSnapshot` + // and `PersistentVolumeClaim` resources that map an existing + // PVC group, compatible with CloudNativePG, and taken with + // a cold backup copy on a fenced Postgres instance (limitation + // which will be removed in the future when online backup + // will be implemented). + // Mutually exclusive with `backup`. + // +optional + VolumeSnapshots *DataSource `json:"volumeSnapshots,omitempty"` + + // By default, the recovery process applies all the available + // WAL files in the archive (full recovery). However, you can also + // end the recovery as soon as a consistent state is reached or + // recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + // as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + // More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + // +optional + RecoveryTarget *RecoveryTarget `json:"recoveryTarget,omitempty"` + + // Name of the database used by the application. Default: `app`. + // +optional + Database string `json:"database,omitempty"` + + // Name of the owner of the database in the instance to be used + // by applications. Defaults to the value of the `database` key. + // +optional + Owner string `json:"owner,omitempty"` + + // Name of the secret containing the initial credentials for the + // owner of the user database. If empty a new secret will be + // created from scratch + // +optional + Secret *LocalObjectReference `json:"secret,omitempty"` +} + +// DataSource contains the configuration required to bootstrap a +// PostgreSQL cluster from an existing storage +type DataSource struct { + // Configuration of the storage of the instances + Storage corev1.TypedLocalObjectReference `json:"storage"` + + // Configuration of the storage for PostgreSQL WAL (Write-Ahead Log) + // +optional + WalStorage *corev1.TypedLocalObjectReference `json:"walStorage,omitempty"` + + // Configuration of the storage for PostgreSQL tablespaces + // +optional + TablespaceStorage map[string]corev1.TypedLocalObjectReference `json:"tablespaceStorage,omitempty"` +} + +// BackupSource contains the backup we need to restore from, plus some +// information that could be needed to correctly restore it. +type BackupSource struct { + LocalObjectReference `json:",inline"` + // EndpointCA store the CA bundle of the barman endpoint. + // Useful when using self-signed certificates to avoid + // errors with certificate issuer and barman-cloud-wal-archive. + // +optional + EndpointCA *SecretKeySelector `json:"endpointCA,omitempty"` +} + +// BootstrapPgBaseBackup contains the configuration required to take +// a physical backup of an existing PostgreSQL cluster +type BootstrapPgBaseBackup struct { + // The name of the server of which we need to take a physical backup + // +kubebuilder:validation:MinLength=1 + Source string `json:"source"` + + // Name of the database used by the application. Default: `app`. + // +optional + Database string `json:"database,omitempty"` + + // Name of the owner of the database in the instance to be used + // by applications. Defaults to the value of the `database` key. + // +optional + Owner string `json:"owner,omitempty"` + + // Name of the secret containing the initial credentials for the + // owner of the user database. If empty a new secret will be + // created from scratch + // +optional + Secret *LocalObjectReference `json:"secret,omitempty"` +} + +// RecoveryTarget allows to configure the moment where the recovery process +// will stop. All the target options except TargetTLI are mutually exclusive. +type RecoveryTarget struct { + // The ID of the backup from which to start the recovery process. + // If empty (default) the operator will automatically detect the backup + // based on targetTime or targetLSN if specified. Otherwise use the + // latest available backup in chronological order. + // +optional + BackupID string `json:"backupID,omitempty"` + + // The target timeline ("latest" or a positive integer) + // +optional + TargetTLI string `json:"targetTLI,omitempty"` + + // The target transaction ID + // +optional + TargetXID string `json:"targetXID,omitempty"` + + // The target name (to be previously created + // with `pg_create_restore_point`) + // +optional + TargetName string `json:"targetName,omitempty"` + + // The target LSN (Log Sequence Number) + // +optional + TargetLSN string `json:"targetLSN,omitempty"` + + // The target time as a timestamp in the RFC3339 standard + // +optional + TargetTime string `json:"targetTime,omitempty"` + + // End recovery as soon as a consistent state is reached + // +optional + TargetImmediate *bool `json:"targetImmediate,omitempty"` + + // Set the target to be exclusive. If omitted, defaults to false, so that + // in Postgres, `recovery_target_inclusive` will be true + // +optional + Exclusive *bool `json:"exclusive,omitempty"` +} + +// StorageConfiguration is the configuration used to create and reconcile PVCs, +// usable for WAL volumes, PGDATA volumes, or tablespaces +type StorageConfiguration struct { + // StorageClass to use for PVCs. Applied after + // evaluating the PVC template, if available. + // If not specified, the generated PVCs will use the + // default storage class + // +optional + StorageClass *string `json:"storageClass,omitempty"` + + // Size of the storage. Required if not already specified in the PVC template. + // Changes to this field are automatically reapplied to the created PVCs. + // Size cannot be decreased. + // +optional + Size string `json:"size,omitempty"` + + // Resize existent PVCs, defaults to true + // +optional + // +kubebuilder:default:=true + ResizeInUseVolumes *bool `json:"resizeInUseVolumes,omitempty"` + + // Template to be used to generate the Persistent Volume Claim + // +optional + PersistentVolumeClaimTemplate *corev1.PersistentVolumeClaimSpec `json:"pvcTemplate,omitempty"` +} + +// TablespaceConfiguration is the configuration of a tablespace, and includes +// the storage specification for the tablespace +type TablespaceConfiguration struct { + // The name of the tablespace + Name string `json:"name"` + + // The storage configuration for the tablespace + Storage StorageConfiguration `json:"storage"` + + // Owner is the PostgreSQL user owning the tablespace + // +optional + Owner DatabaseRoleRef `json:"owner,omitempty"` + + // When set to true, the tablespace will be added as a `temp_tablespaces` + // entry in PostgreSQL, and will be available to automatically house temp + // database objects, or other temporary files. Please refer to PostgreSQL + // documentation for more information on the `temp_tablespaces` GUC. + // +optional + // +kubebuilder:default:=false + Temporary bool `json:"temporary,omitempty"` +} + +// DatabaseRoleRef is a reference an a role available inside PostgreSQL +type DatabaseRoleRef struct { + // +optional + Name string `json:"name,omitempty"` +} + +// SyncReplicaElectionConstraints contains the constraints for sync replicas election. +// +// For anti-affinity parameters two instances are considered in the same location +// if all the labels values match. +// +// In future synchronous replica election restriction by name will be supported. +type SyncReplicaElectionConstraints struct { + // A list of node labels values to extract and compare to evaluate if the pods reside in the same topology or not + // +optional + NodeLabelsAntiAffinity []string `json:"nodeLabelsAntiAffinity,omitempty"` + + // This flag enables the constraints for sync replicas + Enabled bool `json:"enabled"` +} + +// AffinityConfiguration contains the info we need to create the +// affinity rules for Pods +type AffinityConfiguration struct { + // Activates anti-affinity for the pods. The operator will define pods + // anti-affinity unless this field is explicitly set to false + // +optional + EnablePodAntiAffinity *bool `json:"enablePodAntiAffinity,omitempty"` + + // TopologyKey to use for anti-affinity configuration. See k8s documentation + // for more info on that + // +optional + TopologyKey string `json:"topologyKey,omitempty"` + + // NodeSelector is map of key-value pairs used to define the nodes on which + // the pods can run. + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // NodeAffinity describes node affinity scheduling rules for the pod. + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + // +optional + NodeAffinity *corev1.NodeAffinity `json:"nodeAffinity,omitempty"` + + // Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + // on tainted nodes. + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + // considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + // "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + // added if all the existing nodes don't match the required pod anti-affinity rule. + // More info: + // https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + // +optional + PodAntiAffinityType string `json:"podAntiAffinityType,omitempty"` + + // AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + // by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + // +optional + AdditionalPodAntiAffinity *corev1.PodAntiAffinity `json:"additionalPodAntiAffinity,omitempty"` + + // AdditionalPodAffinity allows to specify pod affinity terms to be passed to all the cluster's pods. + // +optional + AdditionalPodAffinity *corev1.PodAffinity `json:"additionalPodAffinity,omitempty"` +} + +// RollingUpdateStatus contains the information about an instance which is +// being updated +type RollingUpdateStatus struct { + // The image which we put into the Pod + ImageName string `json:"imageName"` + + // When the update has been started + // +optional + StartedAt metav1.Time `json:"startedAt,omitempty"` +} + +// BackupTarget describes the preferred targets for a backup +type BackupTarget string + +const ( + // BackupTargetPrimary means backups will be performed on the primary instance + BackupTargetPrimary = BackupTarget("primary") + + // BackupTargetStandby means backups will be performed on a standby instance if available + BackupTargetStandby = BackupTarget("prefer-standby") + + // DefaultBackupTarget is the default BackupTarget + DefaultBackupTarget = BackupTargetStandby +) + +// BackupConfiguration defines how the backup of the cluster are taken. +// The supported backup methods are BarmanObjectStore and VolumeSnapshot. +// For details and examples refer to the Backup and Recovery section of the +// documentation +type BackupConfiguration struct { + // VolumeSnapshot provides the configuration for the execution of volume snapshot backups. + // +optional + VolumeSnapshot *VolumeSnapshotConfiguration `json:"volumeSnapshot,omitempty"` + + // The configuration for the barman-cloud tool suite + // +optional + BarmanObjectStore *BarmanObjectStoreConfiguration `json:"barmanObjectStore,omitempty"` + + // RetentionPolicy is the retention policy to be used for backups + // and WALs (i.e. '60d'). The retention policy is expressed in the form + // of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + // days, weeks, months. + // It's currently only applicable when using the BarmanObjectStore method. + // +kubebuilder:validation:Pattern=^[1-9][0-9]*[dwm]$ + // +optional + RetentionPolicy string `json:"retentionPolicy,omitempty"` + + // The policy to decide which instance should perform backups. Available + // options are empty string, which will default to `prefer-standby` policy, + // `primary` to have backups run always on primary instances, `prefer-standby` + // to have backups run preferably on the most updated standby, if available. + // +kubebuilder:validation:Enum=primary;prefer-standby + // +kubebuilder:default:=prefer-standby + // +optional + Target BackupTarget `json:"target,omitempty"` +} + +// MonitoringConfiguration is the type containing all the monitoring +// configuration for a certain cluster +type MonitoringConfiguration struct { + // Whether the default queries should be injected. + // Set it to `true` if you don't want to inject default queries into the cluster. + // Default: false. + // +kubebuilder:default:=false + // +optional + DisableDefaultQueries *bool `json:"disableDefaultQueries,omitempty"` + + // The list of config maps containing the custom queries + // +optional + CustomQueriesConfigMap []ConfigMapKeySelector `json:"customQueriesConfigMap,omitempty"` + + // The list of secrets containing the custom queries + // +optional + CustomQueriesSecret []SecretKeySelector `json:"customQueriesSecret,omitempty"` + + // Enable or disable the `PodMonitor` + // +kubebuilder:default:=false + // +optional + EnablePodMonitor bool `json:"enablePodMonitor,omitempty"` + + // Configure TLS communication for the metrics endpoint. + // Changing tls.enabled option will force a rollout of all instances. + // +optional + TLSConfig *ClusterMonitoringTLSConfiguration `json:"tls,omitempty"` + + // The list of metric relabelings for the `PodMonitor`. Applied to samples before ingestion. + // +optional + PodMonitorMetricRelabelConfigs []monitoringv1.RelabelConfig `json:"podMonitorMetricRelabelings,omitempty"` + + // The list of relabelings for the `PodMonitor`. Applied to samples before scraping. + // +optional + PodMonitorRelabelConfigs []monitoringv1.RelabelConfig `json:"podMonitorRelabelings,omitempty"` +} + +// ClusterMonitoringTLSConfiguration is the type containing the TLS configuration +// for the cluster's monitoring +type ClusterMonitoringTLSConfiguration struct { + // Enable TLS for the monitoring endpoint. + // Changing this option will force a rollout of all instances. + // +kubebuilder:default:=false + // +optional + Enabled bool `json:"enabled,omitempty"` +} + +// ExternalCluster represents the connection parameters to an +// external cluster which is used in the other sections of the configuration +type ExternalCluster struct { + // The server name, required + Name string `json:"name"` + + // The list of connection parameters, such as dbname, host, username, etc + // +optional + ConnectionParameters map[string]string `json:"connectionParameters,omitempty"` + + // The reference to an SSL certificate to be used to connect to this + // instance + // +optional + SSLCert *corev1.SecretKeySelector `json:"sslCert,omitempty"` + + // The reference to an SSL private key to be used to connect to this + // instance + // +optional + SSLKey *corev1.SecretKeySelector `json:"sslKey,omitempty"` + + // The reference to an SSL CA public key to be used to connect to this + // instance + // +optional + SSLRootCert *corev1.SecretKeySelector `json:"sslRootCert,omitempty"` + + // The reference to the password to be used to connect to the server. + // If a password is provided, CloudNativePG creates a PostgreSQL + // passfile at `/controller/external/NAME/pass` (where "NAME" is the + // cluster's name). This passfile is automatically referenced in the + // connection string when establishing a connection to the remote + // PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + // secure and efficient password management for external clusters. + // +optional + Password *corev1.SecretKeySelector `json:"password,omitempty"` + + // The configuration for the barman-cloud tool suite + // +optional + BarmanObjectStore *BarmanObjectStoreConfiguration `json:"barmanObjectStore,omitempty"` +} + +// EnsureOption represents whether we should enforce the presence or absence of +// a Role in a PostgreSQL instance +type EnsureOption string + +// values taken by EnsureOption +const ( + EnsurePresent EnsureOption = "present" + EnsureAbsent EnsureOption = "absent" +) + +// ServiceSelectorType describes a valid value for generating the service selectors. +// It indicates which type of service the selector applies to, such as read-write, read, or read-only +// +kubebuilder:validation:Enum=rw;r;ro +type ServiceSelectorType string + +// Constants representing the valid values for ServiceSelectorType. +const ( + // ServiceSelectorTypeRW selects the read-write service. + ServiceSelectorTypeRW ServiceSelectorType = "rw" + // ServiceSelectorTypeR selects the read service. + ServiceSelectorTypeR ServiceSelectorType = "r" + // ServiceSelectorTypeRO selects the read-only service. + ServiceSelectorTypeRO ServiceSelectorType = "ro" +) + +// ServiceUpdateStrategy describes how the changes to the managed service should be handled +// +kubebuilder:validation:Enum=patch;replace +type ServiceUpdateStrategy string + +const ( + // ServiceUpdateStrategyPatch applies a patch deriving from the differences of the actual service and the expect one + ServiceUpdateStrategyPatch = "patch" + // ServiceUpdateStrategyReplace deletes the existing service and recreates it when a difference is detected + ServiceUpdateStrategyReplace = "replace" +) + +// ManagedServices represents the services managed by the cluster. +type ManagedServices struct { + // DisabledDefaultServices is a list of service types that are disabled by default. + // Valid values are "r", and "ro", representing read, and read-only services. + // +optional + DisabledDefaultServices []ServiceSelectorType `json:"disabledDefaultServices,omitempty"` + // Additional is a list of additional managed services specified by the user. + Additional []ManagedService `json:"additional,omitempty"` +} + +// ManagedService represents a specific service managed by the cluster. +// It includes the type of service and its associated template specification. +type ManagedService struct { + // SelectorType specifies the type of selectors that the service will have. + // Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + // +kubebuilder:validation:Enum=rw;r;ro + SelectorType ServiceSelectorType `json:"selectorType"` + + // UpdateStrategy describes how the service differences should be reconciled + // +kubebuilder:default:="patch" + UpdateStrategy ServiceUpdateStrategy `json:"updateStrategy,omitempty"` + + // ServiceTemplate is the template specification for the service. + ServiceTemplate ServiceTemplateSpec `json:"serviceTemplate"` +} + +// ManagedConfiguration represents the portions of PostgreSQL that are managed +// by the instance manager +type ManagedConfiguration struct { + // Database roles managed by the `Cluster` + // +optional + Roles []RoleConfiguration `json:"roles,omitempty"` + // Services roles managed by the `Cluster` + // +optional + Services *ManagedServices `json:"services,omitempty"` +} + +// PluginConfiguration specifies a plugin that need to be loaded for this +// cluster to be reconciled +type PluginConfiguration struct { + // Name is the plugin name + Name string `json:"name"` + + // Enabled is true if this plugin will be used + // +kubebuilder:default:=true + // +optional + Enabled *bool `json:"enabled,omitempty"` + + // Parameters is the configuration of the plugin + Parameters map[string]string `json:"parameters,omitempty"` +} + +// PluginStatus is the status of a loaded plugin +type PluginStatus struct { + // Name is the name of the plugin + Name string `json:"name"` + + // Version is the version of the plugin loaded by the + // latest reconciliation loop + Version string `json:"version"` + + // Capabilities are the list of capabilities of the + // plugin + Capabilities []string `json:"capabilities,omitempty"` + + // OperatorCapabilities are the list of capabilities of the + // plugin regarding the reconciler + OperatorCapabilities []string `json:"operatorCapabilities,omitempty"` + + // WALCapabilities are the list of capabilities of the + // plugin regarding the WAL management + WALCapabilities []string `json:"walCapabilities,omitempty"` + + // BackupCapabilities are the list of capabilities of the + // plugin regarding the Backup management + BackupCapabilities []string `json:"backupCapabilities,omitempty"` + + // Status contain the status reported by the plugin through the SetStatusInCluster interface + Status string `json:"status,omitempty"` +} + +// RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role +// with the additional field Ensure specifying whether to ensure the presence or +// absence of the role in the database +// +// The defaults of the CREATE ROLE command are applied +// Reference: https://www.postgresql.org/docs/current/sql-createrole.html +type RoleConfiguration struct { + // Name of the role + Name string `json:"name"` + // Description of the role + // +optional + Comment string `json:"comment,omitempty"` + + // Ensure the role is `present` or `absent` - defaults to "present" + // +kubebuilder:default:="present" + // +kubebuilder:validation:Enum=present;absent + // +optional + Ensure EnsureOption `json:"ensure,omitempty"` + + // Secret containing the password of the role (if present) + // If null, the password will be ignored unless DisablePassword is set + // +optional + PasswordSecret *LocalObjectReference `json:"passwordSecret,omitempty"` + + // If the role can log in, this specifies how many concurrent + // connections the role can make. `-1` (the default) means no limit. + // +kubebuilder:default:=-1 + // +optional + ConnectionLimit int64 `json:"connectionLimit,omitempty"` + + // Date and time after which the role's password is no longer valid. + // When omitted, the password will never expire (default). + // +optional + ValidUntil *metav1.Time `json:"validUntil,omitempty"` + + // List of one or more existing roles to which this role will be + // immediately added as a new member. Default empty. + // +optional + InRoles []string `json:"inRoles,omitempty"` + + // Whether a role "inherits" the privileges of roles it is a member of. + // Defaults is `true`. + // +kubebuilder:default:=true + // +optional + Inherit *bool `json:"inherit,omitempty"` // IMPORTANT default is INHERIT + + // DisablePassword indicates that a role's password should be set to NULL in Postgres + // +optional + DisablePassword bool `json:"disablePassword,omitempty"` + + // Whether the role is a `superuser` who can override all access + // restrictions within the database - superuser status is dangerous and + // should be used only when really needed. You must yourself be a + // superuser to create a new superuser. Defaults is `false`. + // +optional + Superuser bool `json:"superuser,omitempty"` + + // When set to `true`, the role being defined will be allowed to create + // new databases. Specifying `false` (default) will deny a role the + // ability to create databases. + // +optional + CreateDB bool `json:"createdb,omitempty"` + + // Whether the role will be permitted to create, alter, drop, comment + // on, change the security label for, and grant or revoke membership in + // other roles. Default is `false`. + // +optional + CreateRole bool `json:"createrole,omitempty"` + + // Whether the role is allowed to log in. A role having the `login` + // attribute can be thought of as a user. Roles without this attribute + // are useful for managing database privileges, but are not users in + // the usual sense of the word. Default is `false`. + // +optional + Login bool `json:"login,omitempty"` + + // Whether a role is a replication role. A role must have this + // attribute (or be a superuser) in order to be able to connect to the + // server in replication mode (physical or logical replication) and in + // order to be able to create or drop replication slots. A role having + // the `replication` attribute is a very highly privileged role, and + // should only be used on roles actually used for replication. Default + // is `false`. + // +optional + Replication bool `json:"replication,omitempty"` + + // Whether a role bypasses every row-level security (RLS) policy. + // Default is `false`. + // +optional + BypassRLS bool `json:"bypassrls,omitempty"` // Row-Level Security +} + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.instances,statuspath=.status.instances +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Instances",type="integer",JSONPath=".status.instances",description="Number of instances" +// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.readyInstances",description="Number of ready instances" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="Cluster current status" +// +kubebuilder:printcolumn:name="Primary",type="string",JSONPath=".status.currentPrimary",description="Primary pod" + +// Cluster is the Schema for the PostgreSQL API +type Cluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // Specification of the desired behavior of the cluster. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + Spec ClusterSpec `json:"spec"` + // Most recently observed status of the cluster. This data may not be up + // to date. Populated by the system. Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterList contains a list of Cluster +type ClusterList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + metav1.ListMeta `json:"metadata,omitempty"` + // List of clusters + Items []Cluster `json:"items"` +} + +// SecretsResourceVersion is the resource versions of the secrets +// managed by the operator +type SecretsResourceVersion struct { + // The resource version of the "postgres" user secret + // +optional + SuperuserSecretVersion string `json:"superuserSecretVersion,omitempty"` + + // The resource version of the "streaming_replica" user secret + // +optional + ReplicationSecretVersion string `json:"replicationSecretVersion,omitempty"` + + // The resource version of the "app" user secret + // +optional + ApplicationSecretVersion string `json:"applicationSecretVersion,omitempty"` + + // The resource versions of the managed roles secrets + // +optional + ManagedRoleSecretVersions map[string]string `json:"managedRoleSecretVersion,omitempty"` + + // Unused. Retained for compatibility with old versions. + // +optional + CASecretVersion string `json:"caSecretVersion,omitempty"` + + // The resource version of the PostgreSQL client-side CA secret version + // +optional + ClientCASecretVersion string `json:"clientCaSecretVersion,omitempty"` + + // The resource version of the PostgreSQL server-side CA secret version + // +optional + ServerCASecretVersion string `json:"serverCaSecretVersion,omitempty"` + + // The resource version of the PostgreSQL server-side secret version + // +optional + ServerSecretVersion string `json:"serverSecretVersion,omitempty"` + + // The resource version of the Barman Endpoint CA if provided + // +optional + BarmanEndpointCA string `json:"barmanEndpointCA,omitempty"` + + // The resource versions of the external cluster secrets + // +optional + ExternalClusterSecretVersions map[string]string `json:"externalClusterSecretVersion,omitempty"` + + // A map with the versions of all the secrets used to pass metrics. + // Map keys are the secret names, map values are the versions + // +optional + Metrics map[string]string `json:"metrics,omitempty"` +} + +// ConfigMapResourceVersion is the resource versions of the secrets +// managed by the operator +type ConfigMapResourceVersion struct { + // A map with the versions of all the config maps used to pass metrics. + // Map keys are the config map names, map values are the versions + // +optional + Metrics map[string]string `json:"metrics,omitempty"` +} + +func init() { + SchemeBuilder.Register(&Cluster{}, &ClusterList{}) +} diff --git a/pkg/api/v1/clusterimagecatalog_types.go b/pkg/api/v1/clusterimagecatalog_types.go new file mode 100644 index 0000000..850822f --- /dev/null +++ b/pkg/api/v1/clusterimagecatalog_types.go @@ -0,0 +1,50 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// ClusterImageCatalog is the Schema for the clusterimagecatalogs API +type ClusterImageCatalog struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + // Specification of the desired behavior of the ClusterImageCatalog. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + Spec ImageCatalogSpec `json:"spec"` +} + +// +kubebuilder:object:root=true + +// ClusterImageCatalogList contains a list of ClusterImageCatalog +type ClusterImageCatalogList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + metav1.ListMeta `json:"metadata"` + // List of ClusterImageCatalogs + Items []ClusterImageCatalog `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterImageCatalog{}, &ClusterImageCatalogList{}) +} diff --git a/pkg/api/v1/common_types.go b/pkg/api/v1/common_types.go new file mode 100644 index 0000000..fb5144a --- /dev/null +++ b/pkg/api/v1/common_types.go @@ -0,0 +1,43 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// VolumeSnapshotKind this is a strongly typed reference to the kind used by the volumesnapshot package +const VolumeSnapshotKind = "VolumeSnapshot" + +// Metadata is a structure similar to the metav1.ObjectMeta, but still +// parseable by controller-gen to create a suitable CRD for the user. +// The comment of PodTemplateSpec has an explanation of why we are +// not using the core data types. +type Metadata struct { + // The name of the resource. Only supported for certain types + Name string `json:"name,omitempty"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/pkg/api/v1/database_types.go b/pkg/api/v1/database_types.go new file mode 100644 index 0000000..8cb52ad --- /dev/null +++ b/pkg/api/v1/database_types.go @@ -0,0 +1,132 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// DatabaseReclaimPolicy describes a policy for end-of-life maintenance of databases. +// +enum +type DatabaseReclaimPolicy string + +const ( + // DatabaseReclaimDelete means the database will be deleted from its PostgreSQL Cluster on release + // from its claim. + DatabaseReclaimDelete DatabaseReclaimPolicy = "delete" + + // DatabaseReclaimRetain means the database will be left in its current phase for manual + // reclamation by the administrator. The default policy is Retain. + DatabaseReclaimRetain DatabaseReclaimPolicy = "retain" +) + +// DatabaseSpec is the specification of a Postgresql Database +type DatabaseSpec struct { + // The corresponding cluster + ClusterRef corev1.LocalObjectReference `json:"cluster"` + + // The name inside PostgreSQL + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="name is immutable" + Name string `json:"name"` + + // The owner + Owner string `json:"owner"` + + // The name of the template from which to create the new database + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="template is immutable" + Template string `json:"template,omitempty"` + + // The encoding (cannot be changed) + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="encoding is immutable" + // +optional + Encoding string `json:"encoding,omitempty"` + + // True when the database is a template + // +optional + IsTemplate *bool `json:"isTemplate,omitempty"` + + // True when connections to this database are allowed + // +optional + AllowConnections *bool `json:"allowConnections,omitempty"` + + // Connection limit, -1 means no limit and -2 means the + // database is not valid + // +optional + ConnectionLimit *int `json:"connectionLimit,omitempty"` + + // The default tablespace of this database + // +optional + Tablespace string `json:"tablespace,omitempty"` + + // The policy for end-of-life maintenance of this database + // +kubebuilder:validation:Enum=delete;retain + // +kubebuilder:default:=retain + // +optional + ReclaimPolicy DatabaseReclaimPolicy `json:"databaseReclaimPolicy,omitempty"` +} + +// DatabaseStatus defines the observed state of Database +type DatabaseStatus struct { + // A sequence number representing the latest + // desired state that was synchronized + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Ready is true if the database was reconciled correctly + Ready bool `json:"ready,omitempty"` + + // Error is the reconciliation error message + Error string `json:"error,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".spec.cluster.name" +// +kubebuilder:printcolumn:name="PG Name",type="string",JSONPath=".spec.name" +// +kubebuilder:printcolumn:name="Ready",type="boolean",JSONPath=".status.ready" +// +kubebuilder:printcolumn:name="Error",type="string",JSONPath=".status.error",description="Latest error message" + +// Database is the Schema for the databases API +type Database struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // Specification of the desired Database. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + Spec DatabaseSpec `json:"spec"` + // Most recently observed status of the Database. This data may not be up to + // date. Populated by the system. Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status DatabaseStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DatabaseList contains a list of Database +type DatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Database `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Database{}, &DatabaseList{}) +} diff --git a/pkg/api/v1/groupversion_info.go b/pkg/api/v1/groupversion_info.go new file mode 100644 index 0000000..44bab3d --- /dev/null +++ b/pkg/api/v1/groupversion_info.go @@ -0,0 +1,65 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains API Schema definitions for the postgresql v1 API group +// +kubebuilder:object:generate=true +// +groupName=postgresql.cnpg.io +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "postgresql.cnpg.io", Version: "v1"} + + // ClusterGVK is the triple to reach Cluster resources in k8s + ClusterGVK = schema.GroupVersionResource{ + Group: GroupVersion.Group, + Version: GroupVersion.Version, + Resource: "clusters", + } + + // PoolerGVK is the triple to reach Pooler resources in k8s + PoolerGVK = schema.GroupVersionResource{ + Group: GroupVersion.Group, + Version: GroupVersion.Version, + Resource: "poolers", + } + + // ClusterKind is the kind name of Clusters + ClusterKind = "Cluster" + + // BackupKind is the kind name of Backups + BackupKind = "Backup" + + // PoolerKind is the kind name of Poolers + PoolerKind = "Pooler" + + // ImageCatalogKind is the kind name of namespaced image catalogs + ImageCatalogKind = "ImageCatalog" + + // ClusterImageCatalogKind is the kind name of the cluster-wide image catalogs + ClusterImageCatalogKind = "ClusterImageCatalog" + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/pkg/api/v1/imagecatalog_types.go b/pkg/api/v1/imagecatalog_types.go new file mode 100644 index 0000000..2d5d5c1 --- /dev/null +++ b/pkg/api/v1/imagecatalog_types.go @@ -0,0 +1,69 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ImageCatalogSpec defines the desired ImageCatalog +type ImageCatalogSpec struct { + // List of CatalogImages available in the catalog + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:XValidation:rule="self.all(e, self.filter(f, f.major==e.major).size() == 1)",message=Images must have unique major versions + Images []CatalogImage `json:"images"` +} + +// CatalogImage defines the image and major version +type CatalogImage struct { + // The image reference + Image string `json:"image"` + // +kubebuilder:validation:Minimum=10 + // The PostgreSQL major version of the image. Must be unique within the catalog. + Major int `json:"major"` +} + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// ImageCatalog is the Schema for the imagecatalogs API +type ImageCatalog struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + // Specification of the desired behavior of the ImageCatalog. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + Spec ImageCatalogSpec `json:"spec"` +} + +// +kubebuilder:object:root=true + +// ImageCatalogList contains a list of ImageCatalog +type ImageCatalogList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + metav1.ListMeta `json:"metadata"` + // List of ImageCatalogs + Items []ImageCatalog `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ImageCatalog{}, &ImageCatalogList{}) +} diff --git a/pkg/api/v1/pooler_types.go b/pkg/api/v1/pooler_types.go new file mode 100644 index 0000000..5fc3bdb --- /dev/null +++ b/pkg/api/v1/pooler_types.go @@ -0,0 +1,269 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PoolerType is the type of the connection pool, meaning the service +// we are targeting. Allowed values are `rw` and `ro`. +// +kubebuilder:validation:Enum=rw;ro +type PoolerType string + +const ( + // PoolerTypeRW means that the pooler involves only the primary server + PoolerTypeRW = PoolerType("rw") + + // PoolerTypeRO means that the pooler involves only the replicas + PoolerTypeRO = PoolerType("ro") + + // DefaultPgBouncerPoolerAuthQuery is the default auth_query for PgBouncer + DefaultPgBouncerPoolerAuthQuery = "SELECT usename, passwd FROM public.user_search($1)" +) + +// PgBouncerPoolMode is the mode of PgBouncer +// +kubebuilder:validation:Enum=session;transaction +type PgBouncerPoolMode string + +const ( + // PgBouncerPoolModeSession the "session" mode + PgBouncerPoolModeSession = PgBouncerPoolMode("session") + + // PgBouncerPoolModeTransaction the "transaction" mode + PgBouncerPoolModeTransaction = PgBouncerPoolMode("transaction") +) + +// PoolerSpec defines the desired state of Pooler +type PoolerSpec struct { + // This is the cluster reference on which the Pooler will work. + // Pooler name should never match with any cluster name within the same namespace. + Cluster LocalObjectReference `json:"cluster"` + + // Type of service to forward traffic to. Default: `rw`. + // +kubebuilder:default:=rw + // +optional + Type PoolerType `json:"type,omitempty"` + + // The number of replicas we want. Default: 1. + // +kubebuilder:default:=1 + // +optional + Instances *int32 `json:"instances,omitempty"` + + // The template of the Pod to be created + // +optional + Template *PodTemplateSpec `json:"template,omitempty"` + + // The PgBouncer configuration + PgBouncer *PgBouncerSpec `json:"pgbouncer"` + + // The deployment strategy to use for pgbouncer to replace existing pods with new ones + // +optional + DeploymentStrategy *appsv1.DeploymentStrategy `json:"deploymentStrategy,omitempty"` + + // The configuration of the monitoring infrastructure of this pooler. + // +optional + Monitoring *PoolerMonitoringConfiguration `json:"monitoring,omitempty"` + + // Template for the Service to be created + // +optional + ServiceTemplate *ServiceTemplateSpec `json:"serviceTemplate,omitempty"` +} + +// PoolerMonitoringConfiguration is the type containing all the monitoring +// configuration for a certain Pooler. +// +// Mirrors the Cluster's MonitoringConfiguration but without the custom queries +// part for now. +type PoolerMonitoringConfiguration struct { + // Enable or disable the `PodMonitor` + // +kubebuilder:default:=false + // +optional + EnablePodMonitor bool `json:"enablePodMonitor,omitempty"` + + // The list of metric relabelings for the `PodMonitor`. Applied to samples before ingestion. + // +optional + PodMonitorMetricRelabelConfigs []monitoringv1.RelabelConfig `json:"podMonitorMetricRelabelings,omitempty"` + + // The list of relabelings for the `PodMonitor`. Applied to samples before scraping. + // +optional + PodMonitorRelabelConfigs []monitoringv1.RelabelConfig `json:"podMonitorRelabelings,omitempty"` +} + +// PodTemplateSpec is a structure allowing the user to set +// a template for Pod generation. +// +// Unfortunately we can't use the corev1.PodTemplateSpec +// type because the generated CRD won't have the field for the +// metadata section. +// +// References: +// https://github.com/kubernetes-sigs/controller-tools/issues/385 +// https://github.com/kubernetes-sigs/controller-tools/issues/448 +// https://github.com/prometheus-operator/prometheus-operator/issues/3041 +type PodTemplateSpec struct { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + ObjectMeta Metadata `json:"metadata,omitempty"` + + // Specification of the desired behavior of the pod. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec corev1.PodSpec `json:"spec,omitempty"` +} + +// ServiceTemplateSpec is a structure allowing the user to set +// a template for Service generation. +type ServiceTemplateSpec struct { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + ObjectMeta Metadata `json:"metadata,omitempty"` + + // Specification of the desired behavior of the service. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec corev1.ServiceSpec `json:"spec,omitempty"` +} + +// PgBouncerSpec defines how to configure PgBouncer +type PgBouncerSpec struct { + // The pool mode. Default: `session`. + // +kubebuilder:default:=session + // +optional + PoolMode PgBouncerPoolMode `json:"poolMode,omitempty"` + + // The credentials of the user that need to be used for the authentication + // query. In case it is specified, also an AuthQuery + // (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + // has to be specified and no automatic CNPG Cluster integration will be triggered. + // +optional + AuthQuerySecret *LocalObjectReference `json:"authQuerySecret,omitempty"` + + // The query that will be used to download the hash of the password + // of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + // In case it is specified, also an AuthQuerySecret has to be specified and + // no automatic CNPG Cluster integration will be triggered. + // +optional + AuthQuery string `json:"authQuery,omitempty"` + + // Additional parameters to be passed to PgBouncer - please check + // the CNPG documentation for a list of options you can configure + // +optional + Parameters map[string]string `json:"parameters,omitempty"` + + // PostgreSQL Host Based Authentication rules (lines to be appended + // to the pg_hba.conf file) + // +optional + PgHBA []string `json:"pg_hba,omitempty"` + + // When set to `true`, PgBouncer will disconnect from the PostgreSQL + // server, first waiting for all queries to complete, and pause all new + // client connections until this value is set to `false` (default). Internally, + // the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + // +kubebuilder:default:=false + // +optional + Paused *bool `json:"paused,omitempty"` +} + +// PoolerStatus defines the observed state of Pooler +type PoolerStatus struct { + // The resource version of the config object + // +optional + Secrets *PoolerSecrets `json:"secrets,omitempty"` + // The number of pods trying to be scheduled + // +optional + Instances int32 `json:"instances,omitempty"` +} + +// PoolerSecrets contains the versions of all the secrets used +type PoolerSecrets struct { + // The server TLS secret version + // +optional + ServerTLS SecretVersion `json:"serverTLS,omitempty"` + + // The server CA secret version + // +optional + ServerCA SecretVersion `json:"serverCA,omitempty"` + + // The client CA secret version + // +optional + ClientCA SecretVersion `json:"clientCA,omitempty"` + + // The version of the secrets used by PgBouncer + // +optional + PgBouncerSecrets *PgBouncerSecrets `json:"pgBouncerSecrets,omitempty"` +} + +// PgBouncerSecrets contains the versions of the secrets used +// by pgbouncer +type PgBouncerSecrets struct { + // The auth query secret version + // +optional + AuthQuery SecretVersion `json:"authQuery,omitempty"` +} + +// SecretVersion contains a secret name and its ResourceVersion +type SecretVersion struct { + // The name of the secret + // +optional + Name string `json:"name,omitempty"` + + // The ResourceVersion of the secret + // +optional + Version string `json:"version,omitempty"` +} + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".spec.cluster.name" +// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".spec.type" +// +kubebuilder:subresource:scale:specpath=.spec.instances,statuspath=.status.instances + +// Pooler is the Schema for the poolers API +type Pooler struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // Specification of the desired behavior of the Pooler. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + Spec PoolerSpec `json:"spec"` + // Most recently observed status of the Pooler. This data may not be up to + // date. Populated by the system. Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status PoolerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PoolerList contains a list of Pooler +type PoolerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Pooler `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Pooler{}, &PoolerList{}) +} diff --git a/pkg/api/v1/scheduledbackup_types.go b/pkg/api/v1/scheduledbackup_types.go new file mode 100644 index 0000000..1929db5 --- /dev/null +++ b/pkg/api/v1/scheduledbackup_types.go @@ -0,0 +1,135 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ScheduledBackupSpec defines the desired state of ScheduledBackup +type ScheduledBackupSpec struct { + // If this backup is suspended or not + // +optional + Suspend *bool `json:"suspend,omitempty"` + + // If the first backup has to be immediately start after creation or not + // +optional + Immediate *bool `json:"immediate,omitempty"` + + // The schedule does not follow the same format used in Kubernetes CronJobs + // as it includes an additional seconds specifier, + // see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + Schedule string `json:"schedule"` + + // The cluster to backup + Cluster LocalObjectReference `json:"cluster"` + + // Indicates which ownerReference should be put inside the created backup resources.
+ // - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ // - self: sets the Scheduled backup object as owner of the backup
+ // - cluster: set the cluster as owner of the backup
+ // +kubebuilder:validation:Enum=none;self;cluster + // +kubebuilder:default:=none + // +optional + BackupOwnerReference string `json:"backupOwnerReference,omitempty"` + + // The policy to decide which instance should perform this backup. If empty, + // it defaults to `cluster.spec.backup.target`. + // Available options are empty string, `primary` and `prefer-standby`. + // `primary` to have backups run always on primary instances, + // `prefer-standby` to have backups run preferably on the most updated + // standby, if available. + // +kubebuilder:validation:Enum=primary;prefer-standby + // +optional + Target BackupTarget `json:"target,omitempty"` + + // The backup method to be used, possible options are `barmanObjectStore`, + // `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + // +optional + // +kubebuilder:validation:Enum=barmanObjectStore;volumeSnapshot;plugin + // +kubebuilder:default:=barmanObjectStore + Method BackupMethod `json:"method,omitempty"` + + // Configuration parameters passed to the plugin managing this backup + // +optional + PluginConfiguration *BackupPluginConfiguration `json:"pluginConfiguration,omitempty"` + + // Whether the default type of backup with volume snapshots is + // online/hot (`true`, default) or offline/cold (`false`) + // Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + // +optional + Online *bool `json:"online,omitempty"` + + // Configuration parameters to control the online/hot backup with volume snapshots + // Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + // +optional + OnlineConfiguration *OnlineConfiguration `json:"onlineConfiguration,omitempty"` +} + +// ScheduledBackupStatus defines the observed state of ScheduledBackup +type ScheduledBackupStatus struct { + // The latest time the schedule + // +optional + LastCheckTime *metav1.Time `json:"lastCheckTime,omitempty"` + + // Information when was the last time that backup was successfully scheduled. + // +optional + LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty"` + + // Next time we will run a backup + // +optional + NextScheduleTime *metav1.Time `json:"nextScheduleTime,omitempty"` +} + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".spec.cluster.name" +// +kubebuilder:printcolumn:name="Last Backup",type="date",JSONPath=".status.lastScheduleTime" + +// ScheduledBackup is the Schema for the scheduledbackups API +type ScheduledBackup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // Specification of the desired behavior of the ScheduledBackup. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + Spec ScheduledBackupSpec `json:"spec"` + // Most recently observed status of the ScheduledBackup. This data may not be up + // to date. Populated by the system. Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ScheduledBackupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ScheduledBackupList contains a list of ScheduledBackup +type ScheduledBackupList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + metav1.ListMeta `json:"metadata,omitempty"` + // List of clusters + Items []ScheduledBackup `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ScheduledBackup{}, &ScheduledBackupList{}) +} diff --git a/pkg/api/v1/zz_api_repo_funcs_to_copy.go b/pkg/api/v1/zz_api_repo_funcs_to_copy.go new file mode 100644 index 0000000..f7cbea2 --- /dev/null +++ b/pkg/api/v1/zz_api_repo_funcs_to_copy.go @@ -0,0 +1,27 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// IMPORTANT: +// This file contains the functions that need to be copied from the api/v1 package to the cloudnative-pg/api +// repository. This is currently required because the controller-gen tool cannot generate DeepCopyInto for the +// regexp type. This will be removed once the controller-gen tool supports this feature. + +// DeepCopyInto needs to be manually added for the controller-gen compiler to work correctly, given that it cannot +// generate the DeepCopyInto for the regexp type. +// The method is empty because we don't want to transfer the cache when invoking DeepCopyInto. +func (receiver synchronizeReplicasCache) DeepCopyInto(*synchronizeReplicasCache) {} diff --git a/pkg/api/v1/zz_generated.deepcopy.go b/pkg/api/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000..14c71da --- /dev/null +++ b/pkg/api/v1/zz_generated.deepcopy.go @@ -0,0 +1,2773 @@ +//go:build !ignore_autogenerated + +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + pkgapi "github.com/cloudnative-pg/barman-cloud/pkg/api" + "github.com/cloudnative-pg/machinery/pkg/api" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AffinityConfiguration) DeepCopyInto(out *AffinityConfiguration) { + *out = *in + if in.EnablePodAntiAffinity != nil { + in, out := &in.EnablePodAntiAffinity, &out.EnablePodAntiAffinity + *out = new(bool) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(corev1.NodeAffinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalPodAntiAffinity != nil { + in, out := &in.AdditionalPodAntiAffinity, &out.AdditionalPodAntiAffinity + *out = new(corev1.PodAntiAffinity) + (*in).DeepCopyInto(*out) + } + if in.AdditionalPodAffinity != nil { + in, out := &in.AdditionalPodAffinity, &out.AdditionalPodAffinity + *out = new(corev1.PodAffinity) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AffinityConfiguration. +func (in *AffinityConfiguration) DeepCopy() *AffinityConfiguration { + if in == nil { + return nil + } + out := new(AffinityConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AvailableArchitecture) DeepCopyInto(out *AvailableArchitecture) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvailableArchitecture. +func (in *AvailableArchitecture) DeepCopy() *AvailableArchitecture { + if in == nil { + return nil + } + out := new(AvailableArchitecture) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Backup) DeepCopyInto(out *Backup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backup. +func (in *Backup) DeepCopy() *Backup { + if in == nil { + return nil + } + out := new(Backup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Backup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupConfiguration) DeepCopyInto(out *BackupConfiguration) { + *out = *in + if in.VolumeSnapshot != nil { + in, out := &in.VolumeSnapshot, &out.VolumeSnapshot + *out = new(VolumeSnapshotConfiguration) + (*in).DeepCopyInto(*out) + } + if in.BarmanObjectStore != nil { + in, out := &in.BarmanObjectStore, &out.BarmanObjectStore + *out = new(pkgapi.BarmanObjectStoreConfiguration) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupConfiguration. +func (in *BackupConfiguration) DeepCopy() *BackupConfiguration { + if in == nil { + return nil + } + out := new(BackupConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupList) DeepCopyInto(out *BackupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Backup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupList. +func (in *BackupList) DeepCopy() *BackupList { + if in == nil { + return nil + } + out := new(BackupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupPluginConfiguration) DeepCopyInto(out *BackupPluginConfiguration) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPluginConfiguration. +func (in *BackupPluginConfiguration) DeepCopy() *BackupPluginConfiguration { + if in == nil { + return nil + } + out := new(BackupPluginConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupSnapshotElementStatus) DeepCopyInto(out *BackupSnapshotElementStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSnapshotElementStatus. +func (in *BackupSnapshotElementStatus) DeepCopy() *BackupSnapshotElementStatus { + if in == nil { + return nil + } + out := new(BackupSnapshotElementStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupSnapshotStatus) DeepCopyInto(out *BackupSnapshotStatus) { + *out = *in + if in.Elements != nil { + in, out := &in.Elements, &out.Elements + *out = make([]BackupSnapshotElementStatus, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSnapshotStatus. +func (in *BackupSnapshotStatus) DeepCopy() *BackupSnapshotStatus { + if in == nil { + return nil + } + out := new(BackupSnapshotStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupSource) DeepCopyInto(out *BackupSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.EndpointCA != nil { + in, out := &in.EndpointCA, &out.EndpointCA + *out = new(api.SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSource. +func (in *BackupSource) DeepCopy() *BackupSource { + if in == nil { + return nil + } + out := new(BackupSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupSpec) DeepCopyInto(out *BackupSpec) { + *out = *in + out.Cluster = in.Cluster + if in.PluginConfiguration != nil { + in, out := &in.PluginConfiguration, &out.PluginConfiguration + *out = new(BackupPluginConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Online != nil { + in, out := &in.Online, &out.Online + *out = new(bool) + **out = **in + } + if in.OnlineConfiguration != nil { + in, out := &in.OnlineConfiguration, &out.OnlineConfiguration + *out = new(OnlineConfiguration) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSpec. +func (in *BackupSpec) DeepCopy() *BackupSpec { + if in == nil { + return nil + } + out := new(BackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupStatus) DeepCopyInto(out *BackupStatus) { + *out = *in + in.BarmanCredentials.DeepCopyInto(&out.BarmanCredentials) + if in.EndpointCA != nil { + in, out := &in.EndpointCA, &out.EndpointCA + *out = new(api.SecretKeySelector) + **out = **in + } + if in.StartedAt != nil { + in, out := &in.StartedAt, &out.StartedAt + *out = (*in).DeepCopy() + } + if in.StoppedAt != nil { + in, out := &in.StoppedAt, &out.StoppedAt + *out = (*in).DeepCopy() + } + if in.BackupLabelFile != nil { + in, out := &in.BackupLabelFile, &out.BackupLabelFile + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.TablespaceMapFile != nil { + in, out := &in.TablespaceMapFile, &out.TablespaceMapFile + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(InstanceID) + **out = **in + } + in.BackupSnapshotStatus.DeepCopyInto(&out.BackupSnapshotStatus) + if in.Online != nil { + in, out := &in.Online, &out.Online + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStatus. +func (in *BackupStatus) DeepCopy() *BackupStatus { + if in == nil { + return nil + } + out := new(BackupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootstrapConfiguration) DeepCopyInto(out *BootstrapConfiguration) { + *out = *in + if in.InitDB != nil { + in, out := &in.InitDB, &out.InitDB + *out = new(BootstrapInitDB) + (*in).DeepCopyInto(*out) + } + if in.Recovery != nil { + in, out := &in.Recovery, &out.Recovery + *out = new(BootstrapRecovery) + (*in).DeepCopyInto(*out) + } + if in.PgBaseBackup != nil { + in, out := &in.PgBaseBackup, &out.PgBaseBackup + *out = new(BootstrapPgBaseBackup) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootstrapConfiguration. +func (in *BootstrapConfiguration) DeepCopy() *BootstrapConfiguration { + if in == nil { + return nil + } + out := new(BootstrapConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootstrapInitDB) DeepCopyInto(out *BootstrapInitDB) { + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(api.LocalObjectReference) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DataChecksums != nil { + in, out := &in.DataChecksums, &out.DataChecksums + *out = new(bool) + **out = **in + } + if in.PostInitSQL != nil { + in, out := &in.PostInitSQL, &out.PostInitSQL + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PostInitApplicationSQL != nil { + in, out := &in.PostInitApplicationSQL, &out.PostInitApplicationSQL + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PostInitTemplateSQL != nil { + in, out := &in.PostInitTemplateSQL, &out.PostInitTemplateSQL + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Import != nil { + in, out := &in.Import, &out.Import + *out = new(Import) + (*in).DeepCopyInto(*out) + } + if in.PostInitApplicationSQLRefs != nil { + in, out := &in.PostInitApplicationSQLRefs, &out.PostInitApplicationSQLRefs + *out = new(SQLRefs) + (*in).DeepCopyInto(*out) + } + if in.PostInitTemplateSQLRefs != nil { + in, out := &in.PostInitTemplateSQLRefs, &out.PostInitTemplateSQLRefs + *out = new(SQLRefs) + (*in).DeepCopyInto(*out) + } + if in.PostInitSQLRefs != nil { + in, out := &in.PostInitSQLRefs, &out.PostInitSQLRefs + *out = new(SQLRefs) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootstrapInitDB. +func (in *BootstrapInitDB) DeepCopy() *BootstrapInitDB { + if in == nil { + return nil + } + out := new(BootstrapInitDB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootstrapPgBaseBackup) DeepCopyInto(out *BootstrapPgBaseBackup) { + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(api.LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootstrapPgBaseBackup. +func (in *BootstrapPgBaseBackup) DeepCopy() *BootstrapPgBaseBackup { + if in == nil { + return nil + } + out := new(BootstrapPgBaseBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootstrapRecovery) DeepCopyInto(out *BootstrapRecovery) { + *out = *in + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(BackupSource) + (*in).DeepCopyInto(*out) + } + if in.VolumeSnapshots != nil { + in, out := &in.VolumeSnapshots, &out.VolumeSnapshots + *out = new(DataSource) + (*in).DeepCopyInto(*out) + } + if in.RecoveryTarget != nil { + in, out := &in.RecoveryTarget, &out.RecoveryTarget + *out = new(RecoveryTarget) + (*in).DeepCopyInto(*out) + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(api.LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootstrapRecovery. +func (in *BootstrapRecovery) DeepCopy() *BootstrapRecovery { + if in == nil { + return nil + } + out := new(BootstrapRecovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogImage) DeepCopyInto(out *CatalogImage) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogImage. +func (in *CatalogImage) DeepCopy() *CatalogImage { + if in == nil { + return nil + } + out := new(CatalogImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificatesConfiguration) DeepCopyInto(out *CertificatesConfiguration) { + *out = *in + if in.ServerAltDNSNames != nil { + in, out := &in.ServerAltDNSNames, &out.ServerAltDNSNames + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificatesConfiguration. +func (in *CertificatesConfiguration) DeepCopy() *CertificatesConfiguration { + if in == nil { + return nil + } + out := new(CertificatesConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificatesStatus) DeepCopyInto(out *CertificatesStatus) { + *out = *in + in.CertificatesConfiguration.DeepCopyInto(&out.CertificatesConfiguration) + if in.Expirations != nil { + in, out := &in.Expirations, &out.Expirations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificatesStatus. +func (in *CertificatesStatus) DeepCopy() *CertificatesStatus { + if in == nil { + return nil + } + out := new(CertificatesStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImageCatalog) DeepCopyInto(out *ClusterImageCatalog) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImageCatalog. +func (in *ClusterImageCatalog) DeepCopy() *ClusterImageCatalog { + if in == nil { + return nil + } + out := new(ClusterImageCatalog) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterImageCatalog) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImageCatalogList) DeepCopyInto(out *ClusterImageCatalogList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterImageCatalog, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImageCatalogList. +func (in *ClusterImageCatalogList) DeepCopy() *ClusterImageCatalogList { + if in == nil { + return nil + } + out := new(ClusterImageCatalogList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterImageCatalogList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterList) DeepCopyInto(out *ClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *ClusterList) DeepCopy() *ClusterList { + if in == nil { + return nil + } + out := new(ClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMonitoringTLSConfiguration) DeepCopyInto(out *ClusterMonitoringTLSConfiguration) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMonitoringTLSConfiguration. +func (in *ClusterMonitoringTLSConfiguration) DeepCopy() *ClusterMonitoringTLSConfiguration { + if in == nil { + return nil + } + out := new(ClusterMonitoringTLSConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + if in.InheritedMetadata != nil { + in, out := &in.InheritedMetadata, &out.InheritedMetadata + *out = new(EmbeddedObjectMetadata) + (*in).DeepCopyInto(*out) + } + if in.ImageCatalogRef != nil { + in, out := &in.ImageCatalogRef, &out.ImageCatalogRef + *out = new(ImageCatalogRef) + (*in).DeepCopyInto(*out) + } + in.PostgresConfiguration.DeepCopyInto(&out.PostgresConfiguration) + if in.ReplicationSlots != nil { + in, out := &in.ReplicationSlots, &out.ReplicationSlots + *out = new(ReplicationSlotsConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Bootstrap != nil { + in, out := &in.Bootstrap, &out.Bootstrap + *out = new(BootstrapConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ReplicaCluster != nil { + in, out := &in.ReplicaCluster, &out.ReplicaCluster + *out = new(ReplicaClusterConfiguration) + (*in).DeepCopyInto(*out) + } + if in.SuperuserSecret != nil { + in, out := &in.SuperuserSecret, &out.SuperuserSecret + *out = new(api.LocalObjectReference) + **out = **in + } + if in.EnableSuperuserAccess != nil { + in, out := &in.EnableSuperuserAccess, &out.EnableSuperuserAccess + *out = new(bool) + **out = **in + } + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = new(CertificatesConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]api.LocalObjectReference, len(*in)) + copy(*out, *in) + } + in.StorageConfiguration.DeepCopyInto(&out.StorageConfiguration) + if in.ServiceAccountTemplate != nil { + in, out := &in.ServiceAccountTemplate, &out.ServiceAccountTemplate + *out = new(ServiceAccountTemplate) + (*in).DeepCopyInto(*out) + } + if in.WalStorage != nil { + in, out := &in.WalStorage, &out.WalStorage + *out = new(StorageConfiguration) + (*in).DeepCopyInto(*out) + } + if in.EphemeralVolumeSource != nil { + in, out := &in.EphemeralVolumeSource, &out.EphemeralVolumeSource + *out = new(corev1.EphemeralVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.SmartShutdownTimeout != nil { + in, out := &in.SmartShutdownTimeout, &out.SmartShutdownTimeout + *out = new(int32) + **out = **in + } + if in.LivenessProbeTimeout != nil { + in, out := &in.LivenessProbeTimeout, &out.LivenessProbeTimeout + *out = new(int32) + **out = **in + } + in.Affinity.DeepCopyInto(&out.Affinity) + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]corev1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.EphemeralVolumesSizeLimit != nil { + in, out := &in.EphemeralVolumesSizeLimit, &out.EphemeralVolumesSizeLimit + *out = new(EphemeralVolumesSizeLimitConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(BackupConfiguration) + (*in).DeepCopyInto(*out) + } + if in.NodeMaintenanceWindow != nil { + in, out := &in.NodeMaintenanceWindow, &out.NodeMaintenanceWindow + *out = new(NodeMaintenanceWindow) + (*in).DeepCopyInto(*out) + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(MonitoringConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ExternalClusters != nil { + in, out := &in.ExternalClusters, &out.ExternalClusters + *out = make([]ExternalCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProjectedVolumeTemplate != nil { + in, out := &in.ProjectedVolumeTemplate, &out.ProjectedVolumeTemplate + *out = new(corev1.ProjectedVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]corev1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Managed != nil { + in, out := &in.Managed, &out.Managed + *out = new(ManagedConfiguration) + (*in).DeepCopyInto(*out) + } + if in.SeccompProfile != nil { + in, out := &in.SeccompProfile, &out.SeccompProfile + *out = new(corev1.SeccompProfile) + (*in).DeepCopyInto(*out) + } + if in.Tablespaces != nil { + in, out := &in.Tablespaces, &out.Tablespaces + *out = make([]TablespaceConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnablePDB != nil { + in, out := &in.EnablePDB, &out.EnablePDB + *out = new(bool) + **out = **in + } + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = make(PluginConfigurationList, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + if in.InstancesStatus != nil { + in, out := &in.InstancesStatus, &out.InstancesStatus + *out = make(map[PodStatus][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.InstancesReportedState != nil { + in, out := &in.InstancesReportedState, &out.InstancesReportedState + *out = make(map[PodName]InstanceReportedState, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.ManagedRolesStatus.DeepCopyInto(&out.ManagedRolesStatus) + if in.TablespacesStatus != nil { + in, out := &in.TablespacesStatus, &out.TablespacesStatus + *out = make([]TablespaceState, len(*in)) + copy(*out, *in) + } + in.Topology.DeepCopyInto(&out.Topology) + if in.DanglingPVC != nil { + in, out := &in.DanglingPVC, &out.DanglingPVC + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResizingPVC != nil { + in, out := &in.ResizingPVC, &out.ResizingPVC + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.InitializingPVC != nil { + in, out := &in.InitializingPVC, &out.InitializingPVC + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.HealthyPVC != nil { + in, out := &in.HealthyPVC, &out.HealthyPVC + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UnusablePVC != nil { + in, out := &in.UnusablePVC, &out.UnusablePVC + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.SecretsResourceVersion.DeepCopyInto(&out.SecretsResourceVersion) + in.ConfigMapResourceVersion.DeepCopyInto(&out.ConfigMapResourceVersion) + in.Certificates.DeepCopyInto(&out.Certificates) + if in.FirstRecoverabilityPointByMethod != nil { + in, out := &in.FirstRecoverabilityPointByMethod, &out.FirstRecoverabilityPointByMethod + *out = make(map[BackupMethod]metav1.Time, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.LastSuccessfulBackupByMethod != nil { + in, out := &in.LastSuccessfulBackupByMethod, &out.LastSuccessfulBackupByMethod + *out = make(map[BackupMethod]metav1.Time, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.PoolerIntegrations != nil { + in, out := &in.PoolerIntegrations, &out.PoolerIntegrations + *out = new(PoolerIntegrations) + (*in).DeepCopyInto(*out) + } + if in.AvailableArchitectures != nil { + in, out := &in.AvailableArchitectures, &out.AvailableArchitectures + *out = make([]AvailableArchitecture, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InstanceNames != nil { + in, out := &in.InstanceNames, &out.InstanceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PluginStatus != nil { + in, out := &in.PluginStatus, &out.PluginStatus + *out = make([]PluginStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.SwitchReplicaClusterStatus = in.SwitchReplicaClusterStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapResourceVersion) DeepCopyInto(out *ConfigMapResourceVersion) { + *out = *in + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapResourceVersion. +func (in *ConfigMapResourceVersion) DeepCopy() *ConfigMapResourceVersion { + if in == nil { + return nil + } + out := new(ConfigMapResourceVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSource) DeepCopyInto(out *DataSource) { + *out = *in + in.Storage.DeepCopyInto(&out.Storage) + if in.WalStorage != nil { + in, out := &in.WalStorage, &out.WalStorage + *out = new(corev1.TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } + if in.TablespaceStorage != nil { + in, out := &in.TablespaceStorage, &out.TablespaceStorage + *out = make(map[string]corev1.TypedLocalObjectReference, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSource. +func (in *DataSource) DeepCopy() *DataSource { + if in == nil { + return nil + } + out := new(DataSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Database) DeepCopyInto(out *Database) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Database. +func (in *Database) DeepCopy() *Database { + if in == nil { + return nil + } + out := new(Database) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Database) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseList) DeepCopyInto(out *DatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Database, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseList. +func (in *DatabaseList) DeepCopy() *DatabaseList { + if in == nil { + return nil + } + out := new(DatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseRoleRef) DeepCopyInto(out *DatabaseRoleRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseRoleRef. +func (in *DatabaseRoleRef) DeepCopy() *DatabaseRoleRef { + if in == nil { + return nil + } + out := new(DatabaseRoleRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseSpec) DeepCopyInto(out *DatabaseSpec) { + *out = *in + out.ClusterRef = in.ClusterRef + if in.IsTemplate != nil { + in, out := &in.IsTemplate, &out.IsTemplate + *out = new(bool) + **out = **in + } + if in.AllowConnections != nil { + in, out := &in.AllowConnections, &out.AllowConnections + *out = new(bool) + **out = **in + } + if in.ConnectionLimit != nil { + in, out := &in.ConnectionLimit, &out.ConnectionLimit + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseSpec. +func (in *DatabaseSpec) DeepCopy() *DatabaseSpec { + if in == nil { + return nil + } + out := new(DatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseStatus) DeepCopyInto(out *DatabaseStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseStatus. +func (in *DatabaseStatus) DeepCopy() *DatabaseStatus { + if in == nil { + return nil + } + out := new(DatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmbeddedObjectMetadata) DeepCopyInto(out *EmbeddedObjectMetadata) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedObjectMetadata. +func (in *EmbeddedObjectMetadata) DeepCopy() *EmbeddedObjectMetadata { + if in == nil { + return nil + } + out := new(EmbeddedObjectMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralVolumesSizeLimitConfiguration) DeepCopyInto(out *EphemeralVolumesSizeLimitConfiguration) { + *out = *in + if in.Shm != nil { + in, out := &in.Shm, &out.Shm + x := (*in).DeepCopy() + *out = &x + } + if in.TemporaryData != nil { + in, out := &in.TemporaryData, &out.TemporaryData + x := (*in).DeepCopy() + *out = &x + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralVolumesSizeLimitConfiguration. +func (in *EphemeralVolumesSizeLimitConfiguration) DeepCopy() *EphemeralVolumesSizeLimitConfiguration { + if in == nil { + return nil + } + out := new(EphemeralVolumesSizeLimitConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalCluster) DeepCopyInto(out *ExternalCluster) { + *out = *in + if in.ConnectionParameters != nil { + in, out := &in.ConnectionParameters, &out.ConnectionParameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.SSLCert != nil { + in, out := &in.SSLCert, &out.SSLCert + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.SSLKey != nil { + in, out := &in.SSLKey, &out.SSLKey + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.SSLRootCert != nil { + in, out := &in.SSLRootCert, &out.SSLRootCert + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.BarmanObjectStore != nil { + in, out := &in.BarmanObjectStore, &out.BarmanObjectStore + *out = new(pkgapi.BarmanObjectStoreConfiguration) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalCluster. +func (in *ExternalCluster) DeepCopy() *ExternalCluster { + if in == nil { + return nil + } + out := new(ExternalCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageCatalog) DeepCopyInto(out *ImageCatalog) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageCatalog. +func (in *ImageCatalog) DeepCopy() *ImageCatalog { + if in == nil { + return nil + } + out := new(ImageCatalog) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageCatalog) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageCatalogList) DeepCopyInto(out *ImageCatalogList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageCatalog, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageCatalogList. +func (in *ImageCatalogList) DeepCopy() *ImageCatalogList { + if in == nil { + return nil + } + out := new(ImageCatalogList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageCatalogList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageCatalogRef) DeepCopyInto(out *ImageCatalogRef) { + *out = *in + in.TypedLocalObjectReference.DeepCopyInto(&out.TypedLocalObjectReference) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageCatalogRef. +func (in *ImageCatalogRef) DeepCopy() *ImageCatalogRef { + if in == nil { + return nil + } + out := new(ImageCatalogRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageCatalogSpec) DeepCopyInto(out *ImageCatalogSpec) { + *out = *in + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]CatalogImage, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageCatalogSpec. +func (in *ImageCatalogSpec) DeepCopy() *ImageCatalogSpec { + if in == nil { + return nil + } + out := new(ImageCatalogSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Import) DeepCopyInto(out *Import) { + *out = *in + out.Source = in.Source + if in.Databases != nil { + in, out := &in.Databases, &out.Databases + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PostImportApplicationSQL != nil { + in, out := &in.PostImportApplicationSQL, &out.PostImportApplicationSQL + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Import. +func (in *Import) DeepCopy() *Import { + if in == nil { + return nil + } + out := new(Import) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImportSource) DeepCopyInto(out *ImportSource) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportSource. +func (in *ImportSource) DeepCopy() *ImportSource { + if in == nil { + return nil + } + out := new(ImportSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceID) DeepCopyInto(out *InstanceID) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceID. +func (in *InstanceID) DeepCopy() *InstanceID { + if in == nil { + return nil + } + out := new(InstanceID) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceReportedState) DeepCopyInto(out *InstanceReportedState) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceReportedState. +func (in *InstanceReportedState) DeepCopy() *InstanceReportedState { + if in == nil { + return nil + } + out := new(InstanceReportedState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPBindAsAuth) DeepCopyInto(out *LDAPBindAsAuth) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPBindAsAuth. +func (in *LDAPBindAsAuth) DeepCopy() *LDAPBindAsAuth { + if in == nil { + return nil + } + out := new(LDAPBindAsAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPBindSearchAuth) DeepCopyInto(out *LDAPBindSearchAuth) { + *out = *in + if in.BindPassword != nil { + in, out := &in.BindPassword, &out.BindPassword + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPBindSearchAuth. +func (in *LDAPBindSearchAuth) DeepCopy() *LDAPBindSearchAuth { + if in == nil { + return nil + } + out := new(LDAPBindSearchAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPConfig) DeepCopyInto(out *LDAPConfig) { + *out = *in + if in.BindAsAuth != nil { + in, out := &in.BindAsAuth, &out.BindAsAuth + *out = new(LDAPBindAsAuth) + **out = **in + } + if in.BindSearchAuth != nil { + in, out := &in.BindSearchAuth, &out.BindSearchAuth + *out = new(LDAPBindSearchAuth) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPConfig. +func (in *LDAPConfig) DeepCopy() *LDAPConfig { + if in == nil { + return nil + } + out := new(LDAPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedConfiguration) DeepCopyInto(out *ManagedConfiguration) { + *out = *in + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]RoleConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = new(ManagedServices) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedConfiguration. +func (in *ManagedConfiguration) DeepCopy() *ManagedConfiguration { + if in == nil { + return nil + } + out := new(ManagedConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedRoles) DeepCopyInto(out *ManagedRoles) { + *out = *in + if in.ByStatus != nil { + in, out := &in.ByStatus, &out.ByStatus + *out = make(map[RoleStatus][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.CannotReconcile != nil { + in, out := &in.CannotReconcile, &out.CannotReconcile + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.PasswordStatus != nil { + in, out := &in.PasswordStatus, &out.PasswordStatus + *out = make(map[string]PasswordState, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedRoles. +func (in *ManagedRoles) DeepCopy() *ManagedRoles { + if in == nil { + return nil + } + out := new(ManagedRoles) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedService) DeepCopyInto(out *ManagedService) { + *out = *in + in.ServiceTemplate.DeepCopyInto(&out.ServiceTemplate) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedService. +func (in *ManagedService) DeepCopy() *ManagedService { + if in == nil { + return nil + } + out := new(ManagedService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedServices) DeepCopyInto(out *ManagedServices) { + *out = *in + if in.DisabledDefaultServices != nil { + in, out := &in.DisabledDefaultServices, &out.DisabledDefaultServices + *out = make([]ServiceSelectorType, len(*in)) + copy(*out, *in) + } + if in.Additional != nil { + in, out := &in.Additional, &out.Additional + *out = make([]ManagedService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedServices. +func (in *ManagedServices) DeepCopy() *ManagedServices { + if in == nil { + return nil + } + out := new(ManagedServices) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metadata) DeepCopyInto(out *Metadata) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata. +func (in *Metadata) DeepCopy() *Metadata { + if in == nil { + return nil + } + out := new(Metadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringConfiguration) DeepCopyInto(out *MonitoringConfiguration) { + *out = *in + if in.DisableDefaultQueries != nil { + in, out := &in.DisableDefaultQueries, &out.DisableDefaultQueries + *out = new(bool) + **out = **in + } + if in.CustomQueriesConfigMap != nil { + in, out := &in.CustomQueriesConfigMap, &out.CustomQueriesConfigMap + *out = make([]api.ConfigMapKeySelector, len(*in)) + copy(*out, *in) + } + if in.CustomQueriesSecret != nil { + in, out := &in.CustomQueriesSecret, &out.CustomQueriesSecret + *out = make([]api.SecretKeySelector, len(*in)) + copy(*out, *in) + } + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(ClusterMonitoringTLSConfiguration) + **out = **in + } + if in.PodMonitorMetricRelabelConfigs != nil { + in, out := &in.PodMonitorMetricRelabelConfigs, &out.PodMonitorMetricRelabelConfigs + *out = make([]monitoringv1.RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PodMonitorRelabelConfigs != nil { + in, out := &in.PodMonitorRelabelConfigs, &out.PodMonitorRelabelConfigs + *out = make([]monitoringv1.RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringConfiguration. +func (in *MonitoringConfiguration) DeepCopy() *MonitoringConfiguration { + if in == nil { + return nil + } + out := new(MonitoringConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeMaintenanceWindow) DeepCopyInto(out *NodeMaintenanceWindow) { + *out = *in + if in.ReusePVC != nil { + in, out := &in.ReusePVC, &out.ReusePVC + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMaintenanceWindow. +func (in *NodeMaintenanceWindow) DeepCopy() *NodeMaintenanceWindow { + if in == nil { + return nil + } + out := new(NodeMaintenanceWindow) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnlineConfiguration) DeepCopyInto(out *OnlineConfiguration) { + *out = *in + if in.WaitForArchive != nil { + in, out := &in.WaitForArchive, &out.WaitForArchive + *out = new(bool) + **out = **in + } + if in.ImmediateCheckpoint != nil { + in, out := &in.ImmediateCheckpoint, &out.ImmediateCheckpoint + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnlineConfiguration. +func (in *OnlineConfiguration) DeepCopy() *OnlineConfiguration { + if in == nil { + return nil + } + out := new(OnlineConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PasswordState) DeepCopyInto(out *PasswordState) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PasswordState. +func (in *PasswordState) DeepCopy() *PasswordState { + if in == nil { + return nil + } + out := new(PasswordState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PgBouncerIntegrationStatus) DeepCopyInto(out *PgBouncerIntegrationStatus) { + *out = *in + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PgBouncerIntegrationStatus. +func (in *PgBouncerIntegrationStatus) DeepCopy() *PgBouncerIntegrationStatus { + if in == nil { + return nil + } + out := new(PgBouncerIntegrationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PgBouncerSecrets) DeepCopyInto(out *PgBouncerSecrets) { + *out = *in + out.AuthQuery = in.AuthQuery +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PgBouncerSecrets. +func (in *PgBouncerSecrets) DeepCopy() *PgBouncerSecrets { + if in == nil { + return nil + } + out := new(PgBouncerSecrets) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PgBouncerSpec) DeepCopyInto(out *PgBouncerSpec) { + *out = *in + if in.AuthQuerySecret != nil { + in, out := &in.AuthQuerySecret, &out.AuthQuerySecret + *out = new(api.LocalObjectReference) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PgHBA != nil { + in, out := &in.PgHBA, &out.PgHBA + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Paused != nil { + in, out := &in.Paused, &out.Paused + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PgBouncerSpec. +func (in *PgBouncerSpec) DeepCopy() *PgBouncerSpec { + if in == nil { + return nil + } + out := new(PgBouncerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginConfiguration) DeepCopyInto(out *PluginConfiguration) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfiguration. +func (in *PluginConfiguration) DeepCopy() *PluginConfiguration { + if in == nil { + return nil + } + out := new(PluginConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in PluginConfigurationList) DeepCopyInto(out *PluginConfigurationList) { + { + in := &in + *out = make(PluginConfigurationList, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfigurationList. +func (in PluginConfigurationList) DeepCopy() PluginConfigurationList { + if in == nil { + return nil + } + out := new(PluginConfigurationList) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginStatus) DeepCopyInto(out *PluginStatus) { + *out = *in + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OperatorCapabilities != nil { + in, out := &in.OperatorCapabilities, &out.OperatorCapabilities + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.WALCapabilities != nil { + in, out := &in.WALCapabilities, &out.WALCapabilities + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.BackupCapabilities != nil { + in, out := &in.BackupCapabilities, &out.BackupCapabilities + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginStatus. +func (in *PluginStatus) DeepCopy() *PluginStatus { + if in == nil { + return nil + } + out := new(PluginStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplateSpec) DeepCopyInto(out *PodTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateSpec. +func (in *PodTemplateSpec) DeepCopy() *PodTemplateSpec { + if in == nil { + return nil + } + out := new(PodTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in PodTopologyLabels) DeepCopyInto(out *PodTopologyLabels) { + { + in := &in + *out = make(PodTopologyLabels, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTopologyLabels. +func (in PodTopologyLabels) DeepCopy() PodTopologyLabels { + if in == nil { + return nil + } + out := new(PodTopologyLabels) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Pooler) DeepCopyInto(out *Pooler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pooler. +func (in *Pooler) DeepCopy() *Pooler { + if in == nil { + return nil + } + out := new(Pooler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Pooler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PoolerIntegrations) DeepCopyInto(out *PoolerIntegrations) { + *out = *in + in.PgBouncerIntegration.DeepCopyInto(&out.PgBouncerIntegration) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoolerIntegrations. +func (in *PoolerIntegrations) DeepCopy() *PoolerIntegrations { + if in == nil { + return nil + } + out := new(PoolerIntegrations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PoolerList) DeepCopyInto(out *PoolerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Pooler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoolerList. +func (in *PoolerList) DeepCopy() *PoolerList { + if in == nil { + return nil + } + out := new(PoolerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PoolerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PoolerMonitoringConfiguration) DeepCopyInto(out *PoolerMonitoringConfiguration) { + *out = *in + if in.PodMonitorMetricRelabelConfigs != nil { + in, out := &in.PodMonitorMetricRelabelConfigs, &out.PodMonitorMetricRelabelConfigs + *out = make([]monitoringv1.RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PodMonitorRelabelConfigs != nil { + in, out := &in.PodMonitorRelabelConfigs, &out.PodMonitorRelabelConfigs + *out = make([]monitoringv1.RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoolerMonitoringConfiguration. +func (in *PoolerMonitoringConfiguration) DeepCopy() *PoolerMonitoringConfiguration { + if in == nil { + return nil + } + out := new(PoolerMonitoringConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PoolerSecrets) DeepCopyInto(out *PoolerSecrets) { + *out = *in + out.ServerTLS = in.ServerTLS + out.ServerCA = in.ServerCA + out.ClientCA = in.ClientCA + if in.PgBouncerSecrets != nil { + in, out := &in.PgBouncerSecrets, &out.PgBouncerSecrets + *out = new(PgBouncerSecrets) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoolerSecrets. +func (in *PoolerSecrets) DeepCopy() *PoolerSecrets { + if in == nil { + return nil + } + out := new(PoolerSecrets) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PoolerSpec) DeepCopyInto(out *PoolerSpec) { + *out = *in + out.Cluster = in.Cluster + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = new(int32) + **out = **in + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(PodTemplateSpec) + (*in).DeepCopyInto(*out) + } + if in.PgBouncer != nil { + in, out := &in.PgBouncer, &out.PgBouncer + *out = new(PgBouncerSpec) + (*in).DeepCopyInto(*out) + } + if in.DeploymentStrategy != nil { + in, out := &in.DeploymentStrategy, &out.DeploymentStrategy + *out = new(appsv1.DeploymentStrategy) + (*in).DeepCopyInto(*out) + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(PoolerMonitoringConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ServiceTemplate != nil { + in, out := &in.ServiceTemplate, &out.ServiceTemplate + *out = new(ServiceTemplateSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoolerSpec. +func (in *PoolerSpec) DeepCopy() *PoolerSpec { + if in == nil { + return nil + } + out := new(PoolerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PoolerStatus) DeepCopyInto(out *PoolerStatus) { + *out = *in + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = new(PoolerSecrets) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoolerStatus. +func (in *PoolerStatus) DeepCopy() *PoolerStatus { + if in == nil { + return nil + } + out := new(PoolerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresConfiguration) DeepCopyInto(out *PostgresConfiguration) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Synchronous != nil { + in, out := &in.Synchronous, &out.Synchronous + *out = new(SynchronousReplicaConfiguration) + (*in).DeepCopyInto(*out) + } + if in.PgHBA != nil { + in, out := &in.PgHBA, &out.PgHBA + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PgIdent != nil { + in, out := &in.PgIdent, &out.PgIdent + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.SyncReplicaElectionConstraint.DeepCopyInto(&out.SyncReplicaElectionConstraint) + if in.AdditionalLibraries != nil { + in, out := &in.AdditionalLibraries, &out.AdditionalLibraries + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LDAP != nil { + in, out := &in.LDAP, &out.LDAP + *out = new(LDAPConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresConfiguration. +func (in *PostgresConfiguration) DeepCopy() *PostgresConfiguration { + if in == nil { + return nil + } + out := new(PostgresConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecoveryTarget) DeepCopyInto(out *RecoveryTarget) { + *out = *in + if in.TargetImmediate != nil { + in, out := &in.TargetImmediate, &out.TargetImmediate + *out = new(bool) + **out = **in + } + if in.Exclusive != nil { + in, out := &in.Exclusive, &out.Exclusive + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecoveryTarget. +func (in *RecoveryTarget) DeepCopy() *RecoveryTarget { + if in == nil { + return nil + } + out := new(RecoveryTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaClusterConfiguration) DeepCopyInto(out *ReplicaClusterConfiguration) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.MinApplyDelay != nil { + in, out := &in.MinApplyDelay, &out.MinApplyDelay + *out = new(metav1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaClusterConfiguration. +func (in *ReplicaClusterConfiguration) DeepCopy() *ReplicaClusterConfiguration { + if in == nil { + return nil + } + out := new(ReplicaClusterConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationSlotsConfiguration) DeepCopyInto(out *ReplicationSlotsConfiguration) { + *out = *in + if in.HighAvailability != nil { + in, out := &in.HighAvailability, &out.HighAvailability + *out = new(ReplicationSlotsHAConfiguration) + (*in).DeepCopyInto(*out) + } + if in.SynchronizeReplicas != nil { + in, out := &in.SynchronizeReplicas, &out.SynchronizeReplicas + *out = new(SynchronizeReplicasConfiguration) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationSlotsConfiguration. +func (in *ReplicationSlotsConfiguration) DeepCopy() *ReplicationSlotsConfiguration { + if in == nil { + return nil + } + out := new(ReplicationSlotsConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationSlotsHAConfiguration) DeepCopyInto(out *ReplicationSlotsHAConfiguration) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationSlotsHAConfiguration. +func (in *ReplicationSlotsHAConfiguration) DeepCopy() *ReplicationSlotsHAConfiguration { + if in == nil { + return nil + } + out := new(ReplicationSlotsHAConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleConfiguration) DeepCopyInto(out *RoleConfiguration) { + *out = *in + if in.PasswordSecret != nil { + in, out := &in.PasswordSecret, &out.PasswordSecret + *out = new(api.LocalObjectReference) + **out = **in + } + if in.ValidUntil != nil { + in, out := &in.ValidUntil, &out.ValidUntil + *out = (*in).DeepCopy() + } + if in.InRoles != nil { + in, out := &in.InRoles, &out.InRoles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Inherit != nil { + in, out := &in.Inherit, &out.Inherit + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleConfiguration. +func (in *RoleConfiguration) DeepCopy() *RoleConfiguration { + if in == nil { + return nil + } + out := new(RoleConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateStatus) DeepCopyInto(out *RollingUpdateStatus) { + *out = *in + in.StartedAt.DeepCopyInto(&out.StartedAt) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatus. +func (in *RollingUpdateStatus) DeepCopy() *RollingUpdateStatus { + if in == nil { + return nil + } + out := new(RollingUpdateStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SQLRefs) DeepCopyInto(out *SQLRefs) { + *out = *in + if in.SecretRefs != nil { + in, out := &in.SecretRefs, &out.SecretRefs + *out = make([]api.SecretKeySelector, len(*in)) + copy(*out, *in) + } + if in.ConfigMapRefs != nil { + in, out := &in.ConfigMapRefs, &out.ConfigMapRefs + *out = make([]api.ConfigMapKeySelector, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLRefs. +func (in *SQLRefs) DeepCopy() *SQLRefs { + if in == nil { + return nil + } + out := new(SQLRefs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledBackup) DeepCopyInto(out *ScheduledBackup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledBackup. +func (in *ScheduledBackup) DeepCopy() *ScheduledBackup { + if in == nil { + return nil + } + out := new(ScheduledBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScheduledBackup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledBackupList) DeepCopyInto(out *ScheduledBackupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ScheduledBackup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledBackupList. +func (in *ScheduledBackupList) DeepCopy() *ScheduledBackupList { + if in == nil { + return nil + } + out := new(ScheduledBackupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScheduledBackupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledBackupSpec) DeepCopyInto(out *ScheduledBackupSpec) { + *out = *in + if in.Suspend != nil { + in, out := &in.Suspend, &out.Suspend + *out = new(bool) + **out = **in + } + if in.Immediate != nil { + in, out := &in.Immediate, &out.Immediate + *out = new(bool) + **out = **in + } + out.Cluster = in.Cluster + if in.PluginConfiguration != nil { + in, out := &in.PluginConfiguration, &out.PluginConfiguration + *out = new(BackupPluginConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Online != nil { + in, out := &in.Online, &out.Online + *out = new(bool) + **out = **in + } + if in.OnlineConfiguration != nil { + in, out := &in.OnlineConfiguration, &out.OnlineConfiguration + *out = new(OnlineConfiguration) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledBackupSpec. +func (in *ScheduledBackupSpec) DeepCopy() *ScheduledBackupSpec { + if in == nil { + return nil + } + out := new(ScheduledBackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledBackupStatus) DeepCopyInto(out *ScheduledBackupStatus) { + *out = *in + if in.LastCheckTime != nil { + in, out := &in.LastCheckTime, &out.LastCheckTime + *out = (*in).DeepCopy() + } + if in.LastScheduleTime != nil { + in, out := &in.LastScheduleTime, &out.LastScheduleTime + *out = (*in).DeepCopy() + } + if in.NextScheduleTime != nil { + in, out := &in.NextScheduleTime, &out.NextScheduleTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledBackupStatus. +func (in *ScheduledBackupStatus) DeepCopy() *ScheduledBackupStatus { + if in == nil { + return nil + } + out := new(ScheduledBackupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretVersion) DeepCopyInto(out *SecretVersion) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVersion. +func (in *SecretVersion) DeepCopy() *SecretVersion { + if in == nil { + return nil + } + out := new(SecretVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretsResourceVersion) DeepCopyInto(out *SecretsResourceVersion) { + *out = *in + if in.ManagedRoleSecretVersions != nil { + in, out := &in.ManagedRoleSecretVersions, &out.ManagedRoleSecretVersions + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExternalClusterSecretVersions != nil { + in, out := &in.ExternalClusterSecretVersions, &out.ExternalClusterSecretVersions + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretsResourceVersion. +func (in *SecretsResourceVersion) DeepCopy() *SecretsResourceVersion { + if in == nil { + return nil + } + out := new(SecretsResourceVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountTemplate) DeepCopyInto(out *ServiceAccountTemplate) { + *out = *in + in.Metadata.DeepCopyInto(&out.Metadata) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountTemplate. +func (in *ServiceAccountTemplate) DeepCopy() *ServiceAccountTemplate { + if in == nil { + return nil + } + out := new(ServiceAccountTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceTemplateSpec) DeepCopyInto(out *ServiceTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceTemplateSpec. +func (in *ServiceTemplateSpec) DeepCopy() *ServiceTemplateSpec { + if in == nil { + return nil + } + out := new(ServiceTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageConfiguration) DeepCopyInto(out *StorageConfiguration) { + *out = *in + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } + if in.ResizeInUseVolumes != nil { + in, out := &in.ResizeInUseVolumes, &out.ResizeInUseVolumes + *out = new(bool) + **out = **in + } + if in.PersistentVolumeClaimTemplate != nil { + in, out := &in.PersistentVolumeClaimTemplate, &out.PersistentVolumeClaimTemplate + *out = new(corev1.PersistentVolumeClaimSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageConfiguration. +func (in *StorageConfiguration) DeepCopy() *StorageConfiguration { + if in == nil { + return nil + } + out := new(StorageConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SwitchReplicaClusterStatus) DeepCopyInto(out *SwitchReplicaClusterStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SwitchReplicaClusterStatus. +func (in *SwitchReplicaClusterStatus) DeepCopy() *SwitchReplicaClusterStatus { + if in == nil { + return nil + } + out := new(SwitchReplicaClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncReplicaElectionConstraints) DeepCopyInto(out *SyncReplicaElectionConstraints) { + *out = *in + if in.NodeLabelsAntiAffinity != nil { + in, out := &in.NodeLabelsAntiAffinity, &out.NodeLabelsAntiAffinity + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncReplicaElectionConstraints. +func (in *SyncReplicaElectionConstraints) DeepCopy() *SyncReplicaElectionConstraints { + if in == nil { + return nil + } + out := new(SyncReplicaElectionConstraints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SynchronizeReplicasConfiguration) DeepCopyInto(out *SynchronizeReplicasConfiguration) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.ExcludePatterns != nil { + in, out := &in.ExcludePatterns, &out.ExcludePatterns + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.synchronizeReplicasCache.DeepCopyInto(&out.synchronizeReplicasCache) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SynchronizeReplicasConfiguration. +func (in *SynchronizeReplicasConfiguration) DeepCopy() *SynchronizeReplicasConfiguration { + if in == nil { + return nil + } + out := new(SynchronizeReplicasConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SynchronousReplicaConfiguration) DeepCopyInto(out *SynchronousReplicaConfiguration) { + *out = *in + if in.MaxStandbyNamesFromCluster != nil { + in, out := &in.MaxStandbyNamesFromCluster, &out.MaxStandbyNamesFromCluster + *out = new(int) + **out = **in + } + if in.StandbyNamesPre != nil { + in, out := &in.StandbyNamesPre, &out.StandbyNamesPre + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.StandbyNamesPost != nil { + in, out := &in.StandbyNamesPost, &out.StandbyNamesPost + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SynchronousReplicaConfiguration. +func (in *SynchronousReplicaConfiguration) DeepCopy() *SynchronousReplicaConfiguration { + if in == nil { + return nil + } + out := new(SynchronousReplicaConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TablespaceConfiguration) DeepCopyInto(out *TablespaceConfiguration) { + *out = *in + in.Storage.DeepCopyInto(&out.Storage) + out.Owner = in.Owner +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TablespaceConfiguration. +func (in *TablespaceConfiguration) DeepCopy() *TablespaceConfiguration { + if in == nil { + return nil + } + out := new(TablespaceConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TablespaceState) DeepCopyInto(out *TablespaceState) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TablespaceState. +func (in *TablespaceState) DeepCopy() *TablespaceState { + if in == nil { + return nil + } + out := new(TablespaceState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Topology) DeepCopyInto(out *Topology) { + *out = *in + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = make(map[PodName]PodTopologyLabels, len(*in)) + for key, val := range *in { + var outVal map[string]string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = make(PodTopologyLabels, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Topology. +func (in *Topology) DeepCopy() *Topology { + if in == nil { + return nil + } + out := new(Topology) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotConfiguration) DeepCopyInto(out *VolumeSnapshotConfiguration) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.TablespaceClassName != nil { + in, out := &in.TablespaceClassName, &out.TablespaceClassName + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Online != nil { + in, out := &in.Online, &out.Online + *out = new(bool) + **out = **in + } + in.OnlineConfiguration.DeepCopyInto(&out.OnlineConfiguration) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotConfiguration. +func (in *VolumeSnapshotConfiguration) DeepCopy() *VolumeSnapshotConfiguration { + if in == nil { + return nil + } + out := new(VolumeSnapshotConfiguration) + in.DeepCopyInto(out) + return out +}