diff --git a/apis/redis/v1alpha1/doc.go b/apis/redis/v1alpha1/doc.go new file mode 100644 index 0000000000..e6ae4a0c5c --- /dev/null +++ b/apis/redis/v1alpha1/doc.go @@ -0,0 +1,16 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +kcc:proto=google.cloud.redis.cluster.v1 +package v1alpha1 diff --git a/apis/redis/v1alpha1/groupversion_info.go b/apis/redis/v1alpha1/groupversion_info.go new file mode 100644 index 0000000000..09063cd6ee --- /dev/null +++ b/apis/redis/v1alpha1/groupversion_info.go @@ -0,0 +1,33 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +kubebuilder:object:generate=true +// +groupName=redis.cnrm.cloud.google.com +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "redis.cnrm.cloud.google.com", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/redis/v1alpha1/rediscluster_types.go b/apis/redis/v1alpha1/rediscluster_types.go new file mode 100644 index 0000000000..0eacc93c37 --- /dev/null +++ b/apis/redis/v1alpha1/rediscluster_types.go @@ -0,0 +1,167 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + refs "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/apis/k8s/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var RedisClusterGVK = GroupVersion.WithKind("RedisCluster") + +// RedisClusterSpec defines the desired state of RedisCluster +// +kcc:proto=google.cloud.redis.cluster.v1.Cluster +type RedisClusterSpec struct { + // The RedisCluster name. If not given, the metadata.name will be used. + // + optional + ResourceID *string `json:"resourceID,omitempty"` + + /* NOTYET + // Required. Unique name of the resource in this scope including project and + // location using the form: + // `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}` + Name *string `json:"name,omitempty"` + */ + + // Optional. The authorization mode of the Redis cluster. + // If not provided, auth feature is disabled for the cluster. + AuthorizationMode *string `json:"authorizationMode,omitempty"` + + // Optional. The in-transit encryption for the Redis cluster. + // If not provided, encryption is disabled for the cluster. + TransitEncryptionMode *string `json:"transitEncryptionMode,omitempty"` + + // Required. Number of shards for the Redis cluster. + ShardCount *int32 `json:"shardCount,omitempty"` + + // Required. Each PscConfig configures the consumer network where IPs will + // be designated to the cluster for client access through Private Service + // Connect Automation. Currently, only one PscConfig is supported. + PscConfigs []PscConfig `json:"pscConfigs,omitempty"` + + // Optional. The type of a redis node in the cluster. NodeType determines the + // underlying machine-type of a redis node. + NodeType *string `json:"nodeType,omitempty"` + + // Optional. Persistence config (RDB, AOF) for the cluster. + PersistenceConfig *ClusterPersistenceConfig `json:"persistenceConfig,omitempty"` + + // Optional. Key/Value pairs of customer overrides for mutable Redis Configs + RedisConfigs map[string]string `json:"redisConfigs,omitempty"` + + // Optional. The number of replica nodes per shard. + ReplicaCount *int32 `json:"replicaCount,omitempty"` + + // Optional. This config will be used to determine how the customer wants us + // to distribute cluster resources within the region. + ZoneDistributionConfig *ZoneDistributionConfig `json:"zoneDistributionConfig,omitempty"` + + // Optional. The delete operation will fail when the value is set to true. + DeletionProtectionEnabled *bool `json:"deletionProtectionEnabled,omitempty"` +} + +// +kcc:proto=google.cloud.redis.cluster.v1.PscConfig +type PscConfig struct { + // Required. The network where the IP address of the discovery endpoint will + // be reserved, in the form of + // projects/{network_project}/global/networks/{network_id}. + NetworkRef *refs.ComputeNetworkRef `json:"networkRef,omitempty"` +} + +// RedisClusterStatus defines the config connector machine state of RedisCluster +type RedisClusterStatus struct { + /* Conditions represent the latest available observations of the + object's current state. */ + Conditions []v1alpha1.Condition `json:"conditions,omitempty"` + + // ObservedGeneration is the generation of the resource that was most recently observed by the Config Connector controller. If this is equal to metadata.generation, then that means that the current reported status reflects the most recent desired state of the resource. + // +optional + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + + // A unique specifier for the RedisCluster resource in GCP. + // +optional + ExternalRef *string `json:"externalRef,omitempty"` + + // ObservedState is the state of the resource as most recently observed in GCP. + // +optional + ObservedState *RedisClusterObservedState `json:"observedState,omitempty"` +} + +// RedisClusterSpec defines the desired state of RedisCluster +// +kcc:proto=google.cloud.redis.cluster.v1.Cluster +type RedisClusterObservedState struct { + + // Output only. The timestamp associated with the cluster creation request. + CreateTime *string `json:"createTime,omitempty"` + + // Output only. The current state of this cluster. + // Can be CREATING, READY, UPDATING, DELETING and SUSPENDED + State *string `json:"state,omitempty"` + + // Output only. System assigned, unique identifier for the cluster. + Uid *string `json:"uid,omitempty"` + + // Output only. Redis memory size in GB for the entire cluster rounded up to + // the next integer. + SizeGb *int32 `json:"sizeGb,omitempty"` + + // Output only. Endpoints created on each given network, for Redis clients to + // connect to the cluster. Currently only one discovery endpoint is supported. + DiscoveryEndpoints []DiscoveryEndpoint `json:"discoveryEndpoints,omitempty"` + + // Output only. PSC connections for discovery of the cluster topology and + // accessing the cluster. + PscConnections []PscConnection `json:"pscConnections,omitempty"` + + // Output only. Additional information about the current state of the cluster. + StateInfo *Cluster_StateInfo `json:"stateInfo,omitempty"` + + // Output only. Precise value of redis memory size in GB for the entire + // cluster. + PreciseSizeGb *float64 `json:"preciseSizeGb,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:categories=gcp +// +kubebuilder:subresource:status +// +kubebuilder:metadata:labels="cnrm.cloud.google.com/managed-by-kcc=true";"cnrm.cloud.google.com/system=true" +// +kubebuilder:printcolumn:name="Age",JSONPath=".metadata.creationTimestamp",type="date" +// +kubebuilder:printcolumn:name="Ready",JSONPath=".status.conditions[?(@.type=='Ready')].status",type="string",description="When 'True', the most recent reconcile of the resource succeeded" +// +kubebuilder:printcolumn:name="Status",JSONPath=".status.conditions[?(@.type=='Ready')].reason",type="string",description="The reason for the value in 'Ready'" +// +kubebuilder:printcolumn:name="Status Age",JSONPath=".status.conditions[?(@.type=='Ready')].lastTransitionTime",type="date",description="The last transition time for the value in 'Status'" + +// RedisCluster is the Schema for the RedisCluster API +// +k8s:openapi-gen=true +type RedisCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec RedisClusterSpec `json:"spec,omitempty"` + Status RedisClusterStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// RedisClusterList contains a list of RedisCluster +type RedisClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RedisCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&RedisCluster{}, &RedisClusterList{}) +} diff --git a/apis/redis/v1alpha1/types.generated.go b/apis/redis/v1alpha1/types.generated.go new file mode 100644 index 0000000000..32277747ed --- /dev/null +++ b/apis/redis/v1alpha1/types.generated.go @@ -0,0 +1,205 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +// +kcc:proto=google.cloud.redis.cluster.v1.CertificateAuthority +type CertificateAuthority struct { + ManagedServerCa *CertificateAuthority_ManagedCertificateAuthority `json:"managedServerCa,omitempty"` + + // Identifier. Unique name of the resource in this scope including project, + // location and cluster using the form: + // `projects/{project}/locations/{location}/clusters/{cluster}/certificateAuthority` + Name *string `json:"name,omitempty"` +} + +// +kcc:proto=google.cloud.redis.cluster.v1.CertificateAuthority.ManagedCertificateAuthority +type CertificateAuthority_ManagedCertificateAuthority struct { + // The PEM encoded CA certificate chains for redis managed + // server authentication + CaCerts []CertificateAuthority_ManagedCertificateAuthority_CertChain `json:"caCerts,omitempty"` +} + +// +kcc:proto=google.cloud.redis.cluster.v1.CertificateAuthority.ManagedCertificateAuthority.CertChain +type CertificateAuthority_ManagedCertificateAuthority_CertChain struct { + // The certificates that form the CA chain, from leaf to root order. + Certificates []string `json:"certificates,omitempty"` +} + +// +kcc:proto=google.cloud.redis.cluster.v1.Cluster +type Cluster struct { + // Required. Unique name of the resource in this scope including project and + // location using the form: + // `projects/{project_id}/locations/{location_id}/clusters/{cluster_id}` + Name *string `json:"name,omitempty"` + + // Output only. The timestamp associated with the cluster creation request. + CreateTime *string `json:"createTime,omitempty"` + + // Output only. The current state of this cluster. + // Can be CREATING, READY, UPDATING, DELETING and SUSPENDED + State *string `json:"state,omitempty"` + + // Output only. System assigned, unique identifier for the cluster. + Uid *string `json:"uid,omitempty"` + + // Optional. The number of replica nodes per shard. + ReplicaCount *int32 `json:"replicaCount,omitempty"` + + // Optional. The authorization mode of the Redis cluster. + // If not provided, auth feature is disabled for the cluster. + AuthorizationMode *string `json:"authorizationMode,omitempty"` + + // Optional. The in-transit encryption for the Redis cluster. + // If not provided, encryption is disabled for the cluster. + TransitEncryptionMode *string `json:"transitEncryptionMode,omitempty"` + + // Output only. Redis memory size in GB for the entire cluster rounded up to + // the next integer. + SizeGb *int32 `json:"sizeGb,omitempty"` + + // Required. Number of shards for the Redis cluster. + ShardCount *int32 `json:"shardCount,omitempty"` + + // Required. Each PscConfig configures the consumer network where IPs will + // be designated to the cluster for client access through Private Service + // Connect Automation. Currently, only one PscConfig is supported. + PscConfigs []PscConfig `json:"pscConfigs,omitempty"` + + // Output only. Endpoints created on each given network, for Redis clients to + // connect to the cluster. Currently only one discovery endpoint is supported. + DiscoveryEndpoints []DiscoveryEndpoint `json:"discoveryEndpoints,omitempty"` + + // Output only. PSC connections for discovery of the cluster topology and + // accessing the cluster. + PscConnections []PscConnection `json:"pscConnections,omitempty"` + + // Output only. Additional information about the current state of the cluster. + StateInfo *Cluster_StateInfo `json:"stateInfo,omitempty"` + + // Optional. The type of a redis node in the cluster. NodeType determines the + // underlying machine-type of a redis node. + NodeType *string `json:"nodeType,omitempty"` + + // Optional. Persistence config (RDB, AOF) for the cluster. + PersistenceConfig *ClusterPersistenceConfig `json:"persistenceConfig,omitempty"` + + // Optional. Key/Value pairs of customer overrides for mutable Redis Configs + RedisConfigs map[string]string `json:"redisConfigs,omitempty"` + + // Output only. Precise value of redis memory size in GB for the entire + // cluster. + PreciseSizeGb *float64 `json:"preciseSizeGb,omitempty"` + + // Optional. This config will be used to determine how the customer wants us + // to distribute cluster resources within the region. + ZoneDistributionConfig *ZoneDistributionConfig `json:"zoneDistributionConfig,omitempty"` + + // Optional. The delete operation will fail when the value is set to true. + DeletionProtectionEnabled *bool `json:"deletionProtectionEnabled,omitempty"` +} + +// +kcc:proto=google.cloud.redis.cluster.v1.Cluster.StateInfo +type Cluster_StateInfo struct { + // Describes ongoing update on the cluster when cluster state is UPDATING. + UpdateInfo *Cluster_StateInfo_UpdateInfo `json:"updateInfo,omitempty"` +} + +// +kcc:proto=google.cloud.redis.cluster.v1.Cluster.StateInfo.UpdateInfo +type Cluster_StateInfo_UpdateInfo struct { + // Target number of shards for redis cluster + TargetShardCount *int32 `json:"targetShardCount,omitempty"` + + // Target number of replica nodes per shard. + TargetReplicaCount *int32 `json:"targetReplicaCount,omitempty"` +} + +// +kcc:proto=google.cloud.redis.cluster.v1.ClusterPersistenceConfig +type ClusterPersistenceConfig struct { + // Optional. The mode of persistence. + Mode *string `json:"mode,omitempty"` + + // Optional. RDB configuration. This field will be ignored if mode is not RDB. + RdbConfig *ClusterPersistenceConfig_RDBConfig `json:"rdbConfig,omitempty"` + + // Optional. AOF configuration. This field will be ignored if mode is not AOF. + AofConfig *ClusterPersistenceConfig_AOFConfig `json:"aofConfig,omitempty"` +} + +// +kcc:proto=google.cloud.redis.cluster.v1.ClusterPersistenceConfig.RDBConfig +type ClusterPersistenceConfig_RDBConfig struct { + // Optional. Period between RDB snapshots. + RdbSnapshotPeriod *string `json:"rdbSnapshotPeriod,omitempty"` + + // Optional. The time that the first snapshot was/will be attempted, and to + // which future snapshots will be aligned. If not provided, the current time + // will be used. + RdbSnapshotStartTime *string `json:"rdbSnapshotStartTime,omitempty"` +} + +// +kcc:proto=google.cloud.redis.cluster.v1.ClusterPersistenceConfig.AOFConfig +type ClusterPersistenceConfig_AOFConfig struct { + // Optional. fsync configuration. + AppendFsync *string `json:"appendFsync,omitempty"` +} + +// +kcc:proto=google.cloud.redis.cluster.v1.DiscoveryEndpoint +type DiscoveryEndpoint struct { + // Output only. Address of the exposed Redis endpoint used by clients to + // connect to the service. The address could be either IP or hostname. + Address *string `json:"address,omitempty"` + + // Output only. The port number of the exposed Redis endpoint. + Port *int32 `json:"port,omitempty"` + + // Output only. Customer configuration for where the endpoint is created and + // accessed from. + PscConfig *PscConfig `json:"pscConfig,omitempty"` +} + +// +kcc:proto=google.cloud.redis.cluster.v1.PscConnection +type PscConnection struct { + // Output only. The PSC connection id of the forwarding rule connected to the + // service attachment. + PscConnectionID *string `json:"pscConnectionID,omitempty"` + + // Output only. The IP allocated on the consumer network for the PSC + // forwarding rule. + Address *string `json:"address,omitempty"` + + // Output only. The URI of the consumer side forwarding rule. + // Example: + // projects/{projectNumOrId}/regions/us-east1/forwardingRules/{resourceId}. + ForwardingRule *string `json:"forwardingRule,omitempty"` + + // Output only. The consumer project_id where the forwarding rule is created + // from. + ProjectID *string `json:"projectID,omitempty"` + + // The consumer network where the IP address resides, in the form of + // projects/{project_id}/global/networks/{network_id}. + Network *string `json:"network,omitempty"` +} + +// +kcc:proto=google.cloud.redis.cluster.v1.ZoneDistributionConfig +type ZoneDistributionConfig struct { + // Optional. The mode of zone distribution. Defaults to MULTI_ZONE, when not + // specified. + Mode *string `json:"mode,omitempty"` + + // Optional. When SINGLE ZONE distribution is selected, zone field would be + // used to allocate all resources in that zone. This is not applicable to + // MULTI_ZONE, and would be ignored for MULTI_ZONE clusters. + Zone *string `json:"zone,omitempty"` +} diff --git a/apis/redis/v1alpha1/zz_generated.deepcopy.go b/apis/redis/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..e4ffe6ff22 --- /dev/null +++ b/apis/redis/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,658 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" + k8sv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/apis/k8s/v1alpha1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateAuthority) DeepCopyInto(out *CertificateAuthority) { + *out = *in + if in.ManagedServerCa != nil { + in, out := &in.ManagedServerCa, &out.ManagedServerCa + *out = new(CertificateAuthority_ManagedCertificateAuthority) + (*in).DeepCopyInto(*out) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateAuthority. +func (in *CertificateAuthority) DeepCopy() *CertificateAuthority { + if in == nil { + return nil + } + out := new(CertificateAuthority) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateAuthority_ManagedCertificateAuthority) DeepCopyInto(out *CertificateAuthority_ManagedCertificateAuthority) { + *out = *in + if in.CaCerts != nil { + in, out := &in.CaCerts, &out.CaCerts + *out = make([]CertificateAuthority_ManagedCertificateAuthority_CertChain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateAuthority_ManagedCertificateAuthority. +func (in *CertificateAuthority_ManagedCertificateAuthority) DeepCopy() *CertificateAuthority_ManagedCertificateAuthority { + if in == nil { + return nil + } + out := new(CertificateAuthority_ManagedCertificateAuthority) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateAuthority_ManagedCertificateAuthority_CertChain) DeepCopyInto(out *CertificateAuthority_ManagedCertificateAuthority_CertChain) { + *out = *in + if in.Certificates != nil { + in, out := &in.Certificates, &out.Certificates + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateAuthority_ManagedCertificateAuthority_CertChain. +func (in *CertificateAuthority_ManagedCertificateAuthority_CertChain) DeepCopy() *CertificateAuthority_ManagedCertificateAuthority_CertChain { + if in == nil { + return nil + } + out := new(CertificateAuthority_ManagedCertificateAuthority_CertChain) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.CreateTime != nil { + in, out := &in.CreateTime, &out.CreateTime + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Uid != nil { + in, out := &in.Uid, &out.Uid + *out = new(string) + **out = **in + } + if in.ReplicaCount != nil { + in, out := &in.ReplicaCount, &out.ReplicaCount + *out = new(int32) + **out = **in + } + if in.AuthorizationMode != nil { + in, out := &in.AuthorizationMode, &out.AuthorizationMode + *out = new(string) + **out = **in + } + if in.TransitEncryptionMode != nil { + in, out := &in.TransitEncryptionMode, &out.TransitEncryptionMode + *out = new(string) + **out = **in + } + if in.SizeGb != nil { + in, out := &in.SizeGb, &out.SizeGb + *out = new(int32) + **out = **in + } + if in.ShardCount != nil { + in, out := &in.ShardCount, &out.ShardCount + *out = new(int32) + **out = **in + } + if in.PscConfigs != nil { + in, out := &in.PscConfigs, &out.PscConfigs + *out = make([]PscConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiscoveryEndpoints != nil { + in, out := &in.DiscoveryEndpoints, &out.DiscoveryEndpoints + *out = make([]DiscoveryEndpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PscConnections != nil { + in, out := &in.PscConnections, &out.PscConnections + *out = make([]PscConnection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StateInfo != nil { + in, out := &in.StateInfo, &out.StateInfo + *out = new(Cluster_StateInfo) + (*in).DeepCopyInto(*out) + } + if in.NodeType != nil { + in, out := &in.NodeType, &out.NodeType + *out = new(string) + **out = **in + } + if in.PersistenceConfig != nil { + in, out := &in.PersistenceConfig, &out.PersistenceConfig + *out = new(ClusterPersistenceConfig) + (*in).DeepCopyInto(*out) + } + if in.RedisConfigs != nil { + in, out := &in.RedisConfigs, &out.RedisConfigs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PreciseSizeGb != nil { + in, out := &in.PreciseSizeGb, &out.PreciseSizeGb + *out = new(float64) + **out = **in + } + if in.ZoneDistributionConfig != nil { + in, out := &in.ZoneDistributionConfig, &out.ZoneDistributionConfig + *out = new(ZoneDistributionConfig) + (*in).DeepCopyInto(*out) + } + if in.DeletionProtectionEnabled != nil { + in, out := &in.DeletionProtectionEnabled, &out.DeletionProtectionEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPersistenceConfig) DeepCopyInto(out *ClusterPersistenceConfig) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.RdbConfig != nil { + in, out := &in.RdbConfig, &out.RdbConfig + *out = new(ClusterPersistenceConfig_RDBConfig) + (*in).DeepCopyInto(*out) + } + if in.AofConfig != nil { + in, out := &in.AofConfig, &out.AofConfig + *out = new(ClusterPersistenceConfig_AOFConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPersistenceConfig. +func (in *ClusterPersistenceConfig) DeepCopy() *ClusterPersistenceConfig { + if in == nil { + return nil + } + out := new(ClusterPersistenceConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPersistenceConfig_AOFConfig) DeepCopyInto(out *ClusterPersistenceConfig_AOFConfig) { + *out = *in + if in.AppendFsync != nil { + in, out := &in.AppendFsync, &out.AppendFsync + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPersistenceConfig_AOFConfig. +func (in *ClusterPersistenceConfig_AOFConfig) DeepCopy() *ClusterPersistenceConfig_AOFConfig { + if in == nil { + return nil + } + out := new(ClusterPersistenceConfig_AOFConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPersistenceConfig_RDBConfig) DeepCopyInto(out *ClusterPersistenceConfig_RDBConfig) { + *out = *in + if in.RdbSnapshotPeriod != nil { + in, out := &in.RdbSnapshotPeriod, &out.RdbSnapshotPeriod + *out = new(string) + **out = **in + } + if in.RdbSnapshotStartTime != nil { + in, out := &in.RdbSnapshotStartTime, &out.RdbSnapshotStartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPersistenceConfig_RDBConfig. +func (in *ClusterPersistenceConfig_RDBConfig) DeepCopy() *ClusterPersistenceConfig_RDBConfig { + if in == nil { + return nil + } + out := new(ClusterPersistenceConfig_RDBConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_StateInfo) DeepCopyInto(out *Cluster_StateInfo) { + *out = *in + if in.UpdateInfo != nil { + in, out := &in.UpdateInfo, &out.UpdateInfo + *out = new(Cluster_StateInfo_UpdateInfo) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_StateInfo. +func (in *Cluster_StateInfo) DeepCopy() *Cluster_StateInfo { + if in == nil { + return nil + } + out := new(Cluster_StateInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster_StateInfo_UpdateInfo) DeepCopyInto(out *Cluster_StateInfo_UpdateInfo) { + *out = *in + if in.TargetShardCount != nil { + in, out := &in.TargetShardCount, &out.TargetShardCount + *out = new(int32) + **out = **in + } + if in.TargetReplicaCount != nil { + in, out := &in.TargetReplicaCount, &out.TargetReplicaCount + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster_StateInfo_UpdateInfo. +func (in *Cluster_StateInfo_UpdateInfo) DeepCopy() *Cluster_StateInfo_UpdateInfo { + if in == nil { + return nil + } + out := new(Cluster_StateInfo_UpdateInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiscoveryEndpoint) DeepCopyInto(out *DiscoveryEndpoint) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.PscConfig != nil { + in, out := &in.PscConfig, &out.PscConfig + *out = new(PscConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiscoveryEndpoint. +func (in *DiscoveryEndpoint) DeepCopy() *DiscoveryEndpoint { + if in == nil { + return nil + } + out := new(DiscoveryEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PscConfig) DeepCopyInto(out *PscConfig) { + *out = *in + if in.NetworkRef != nil { + in, out := &in.NetworkRef, &out.NetworkRef + *out = new(v1beta1.ComputeNetworkRef) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PscConfig. +func (in *PscConfig) DeepCopy() *PscConfig { + if in == nil { + return nil + } + out := new(PscConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PscConnection) DeepCopyInto(out *PscConnection) { + *out = *in + if in.PscConnectionID != nil { + in, out := &in.PscConnectionID, &out.PscConnectionID + *out = new(string) + **out = **in + } + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.ForwardingRule != nil { + in, out := &in.ForwardingRule, &out.ForwardingRule + *out = new(string) + **out = **in + } + if in.ProjectID != nil { + in, out := &in.ProjectID, &out.ProjectID + *out = new(string) + **out = **in + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PscConnection. +func (in *PscConnection) DeepCopy() *PscConnection { + if in == nil { + return nil + } + out := new(PscConnection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCluster) DeepCopyInto(out *RedisCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCluster. +func (in *RedisCluster) DeepCopy() *RedisCluster { + if in == nil { + return nil + } + out := new(RedisCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RedisCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterList) DeepCopyInto(out *RedisClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RedisCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterList. +func (in *RedisClusterList) DeepCopy() *RedisClusterList { + if in == nil { + return nil + } + out := new(RedisClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RedisClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterObservedState) DeepCopyInto(out *RedisClusterObservedState) { + *out = *in + if in.CreateTime != nil { + in, out := &in.CreateTime, &out.CreateTime + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Uid != nil { + in, out := &in.Uid, &out.Uid + *out = new(string) + **out = **in + } + if in.SizeGb != nil { + in, out := &in.SizeGb, &out.SizeGb + *out = new(int32) + **out = **in + } + if in.DiscoveryEndpoints != nil { + in, out := &in.DiscoveryEndpoints, &out.DiscoveryEndpoints + *out = make([]DiscoveryEndpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PscConnections != nil { + in, out := &in.PscConnections, &out.PscConnections + *out = make([]PscConnection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StateInfo != nil { + in, out := &in.StateInfo, &out.StateInfo + *out = new(Cluster_StateInfo) + (*in).DeepCopyInto(*out) + } + if in.PreciseSizeGb != nil { + in, out := &in.PreciseSizeGb, &out.PreciseSizeGb + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterObservedState. +func (in *RedisClusterObservedState) DeepCopy() *RedisClusterObservedState { + if in == nil { + return nil + } + out := new(RedisClusterObservedState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterSpec) DeepCopyInto(out *RedisClusterSpec) { + *out = *in + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.AuthorizationMode != nil { + in, out := &in.AuthorizationMode, &out.AuthorizationMode + *out = new(string) + **out = **in + } + if in.TransitEncryptionMode != nil { + in, out := &in.TransitEncryptionMode, &out.TransitEncryptionMode + *out = new(string) + **out = **in + } + if in.ShardCount != nil { + in, out := &in.ShardCount, &out.ShardCount + *out = new(int32) + **out = **in + } + if in.PscConfigs != nil { + in, out := &in.PscConfigs, &out.PscConfigs + *out = make([]PscConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NodeType != nil { + in, out := &in.NodeType, &out.NodeType + *out = new(string) + **out = **in + } + if in.PersistenceConfig != nil { + in, out := &in.PersistenceConfig, &out.PersistenceConfig + *out = new(ClusterPersistenceConfig) + (*in).DeepCopyInto(*out) + } + if in.RedisConfigs != nil { + in, out := &in.RedisConfigs, &out.RedisConfigs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ReplicaCount != nil { + in, out := &in.ReplicaCount, &out.ReplicaCount + *out = new(int32) + **out = **in + } + if in.ZoneDistributionConfig != nil { + in, out := &in.ZoneDistributionConfig, &out.ZoneDistributionConfig + *out = new(ZoneDistributionConfig) + (*in).DeepCopyInto(*out) + } + if in.DeletionProtectionEnabled != nil { + in, out := &in.DeletionProtectionEnabled, &out.DeletionProtectionEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterSpec. +func (in *RedisClusterSpec) DeepCopy() *RedisClusterSpec { + if in == nil { + return nil + } + out := new(RedisClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterStatus) DeepCopyInto(out *RedisClusterStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]k8sv1alpha1.Condition, len(*in)) + copy(*out, *in) + } + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.ExternalRef != nil { + in, out := &in.ExternalRef, &out.ExternalRef + *out = new(string) + **out = **in + } + if in.ObservedState != nil { + in, out := &in.ObservedState, &out.ObservedState + *out = new(RedisClusterObservedState) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterStatus. +func (in *RedisClusterStatus) DeepCopy() *RedisClusterStatus { + if in == nil { + return nil + } + out := new(RedisClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZoneDistributionConfig) DeepCopyInto(out *ZoneDistributionConfig) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZoneDistributionConfig. +func (in *ZoneDistributionConfig) DeepCopy() *ZoneDistributionConfig { + if in == nil { + return nil + } + out := new(ZoneDistributionConfig) + in.DeepCopyInto(out) + return out +} diff --git a/dev/tools/controllerbuilder/generate.sh b/dev/tools/controllerbuilder/generate.sh new file mode 100755 index 0000000000..1026450a1d --- /dev/null +++ b/dev/tools/controllerbuilder/generate.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -e +set -x + +# RedisCluster + +go run . generate-types \ + --proto-source-path ../proto-to-mapper/build/googleapis.pb \ + --service google.cloud.redis.cluster.v1 \ + --version redis.cnrm.cloud.google.com/v1alpha1 \ + --output-api ~/kcc/k8s-config-connector/apis/ \ + --kinds RedisCluster + +go run . generate-mapper \ + --proto-source-path ../proto-to-mapper/build/googleapis.pb \ + --service google.cloud.redis.cluster.v1 \ + --api-go-package-path github.com/GoogleCloudPlatform/k8s-config-connector/apis \ + --output-dir ~/kcc/k8s-config-connector/pkg/controller/direct/ \ + --api-dir ~/kcc/k8s-config-connector/apis/ diff --git a/dev/tools/controllerbuilder/pkg/codegen/mappergenerator.go b/dev/tools/controllerbuilder/pkg/codegen/mappergenerator.go index b9ec60ad2a..8399cd4bf9 100644 --- a/dev/tools/controllerbuilder/pkg/codegen/mappergenerator.go +++ b/dev/tools/controllerbuilder/pkg/codegen/mappergenerator.go @@ -174,8 +174,11 @@ func (v *MapperGenerator) GenerateMappers() error { krmPackage := pair.KRMType.GoPackage out.contents.WriteString(fmt.Sprintf("package %s\n\n", lastGoComponent(goPackage))) - out.contents.WriteString(fmt.Sprintf("import pb %q\n\n", pbPackage)) - out.contents.WriteString(fmt.Sprintf("import krm %q\n\n", krmPackage)) + out.contents.WriteString("import (\n") + out.contents.WriteString(fmt.Sprintf("\tpb %q\n", pbPackage)) + out.contents.WriteString(fmt.Sprintf("\tkrm %q\n", krmPackage)) + out.contents.WriteString(fmt.Sprintf("\t%q\n", "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct")) + out.contents.WriteString(")\n") } v.writeMapFunctionsForPair(&out.contents, &pair) @@ -197,7 +200,7 @@ func (v *MapperGenerator) writeMapFunctionsForPair(out io.Writer, pair *typePair } { - fmt.Fprintf(out, "func %s_FromProto(mapCtx *MapContext, in *pb.%s) *krm.%s {\n", goTypeName, pbTypeName, goTypeName) + fmt.Fprintf(out, "func %s_FromProto(mapCtx *direct.MapContext, in *pb.%s) *krm.%s {\n", goTypeName, pbTypeName, goTypeName) fmt.Fprintf(out, "\tif in == nil {\n") fmt.Fprintf(out, "\t\treturn nil\n") fmt.Fprintf(out, "\t}\n") @@ -207,7 +210,8 @@ func (v *MapperGenerator) writeMapFunctionsForPair(out io.Writer, pair *typePair protoFieldName := strings.Title(protoField.JSONName()) protoAccessor := "Get" + protoFieldName + "()" - krmFieldName := strings.Title(protoField.JSONName()) + krmJSON := getJSONForKRM(protoField) + krmFieldName := strings.Title(krmJSON) krmField := goFields[krmFieldName] if krmField == nil { fmt.Fprintf(out, "\t// MISSING: %s\n", krmFieldName) @@ -270,7 +274,7 @@ func (v *MapperGenerator) writeMapFunctionsForPair(out io.Writer, pair *typePair } if useSliceFromProtoFunction != "" { - fmt.Fprintf(out, "\tout.%s = Slice_FromProto(mapCtx, in.%s, %s)\n", + fmt.Fprintf(out, "\tout.%s = direct.Slice_FromProto(mapCtx, in.%s, %s)\n", krmFieldName, krmFieldName, useSliceFromProtoFunction, @@ -308,7 +312,7 @@ func (v *MapperGenerator) writeMapFunctionsForPair(out io.Writer, pair *typePair protoAccessor, ) case protoreflect.EnumKind: - functionName := "Enum_FromProto" + functionName := "direct.Enum_FromProto" fmt.Fprintf(out, "\tout.%s = %s(mapCtx, in.%s)\n", krmFieldName, functionName, @@ -324,10 +328,17 @@ func (v *MapperGenerator) writeMapFunctionsForPair(out io.Writer, pair *typePair protoreflect.Uint64Kind, protoreflect.Fixed64Kind, protoreflect.BytesKind: - fmt.Fprintf(out, "\tout.%s = LazyPtr(in.%s)\n", - krmFieldName, - protoAccessor, - ) + if protoIsPointerInGo(protoField) { + fmt.Fprintf(out, "\tout.%s = in.%s\n", + krmFieldName, + protoFieldName, + ) + } else { + fmt.Fprintf(out, "\tout.%s = direct.LazyPtr(in.%s)\n", + krmFieldName, + protoAccessor, + ) + } default: klog.Fatalf("unhandled kind %q for field %v", protoField.Kind(), protoField) } @@ -337,14 +348,16 @@ func (v *MapperGenerator) writeMapFunctionsForPair(out io.Writer, pair *typePair } { - fmt.Fprintf(out, "func %s_ToProto(mapCtx *MapContext, in *krm.%s) *pb.%s {\n", goTypeName, goTypeName, pbTypeName) + fmt.Fprintf(out, "func %s_ToProto(mapCtx *direct.MapContext, in *krm.%s) *pb.%s {\n", goTypeName, goTypeName, pbTypeName) fmt.Fprintf(out, "\tif in == nil {\n") fmt.Fprintf(out, "\t\treturn nil\n") fmt.Fprintf(out, "\t}\n") fmt.Fprintf(out, "\tout := &pb.%s{}\n", pbTypeName) for i := 0; i < msg.Fields().Len(); i++ { protoField := msg.Fields().Get(i) - krmFieldName := strings.Title(protoField.JSONName()) + jsonName := getJSONForKRM(protoField) + + krmFieldName := strings.Title(jsonName) krmField := goFields[krmFieldName] if krmField == nil { fmt.Fprintf(out, "\t// MISSING: %s\n", krmFieldName) @@ -378,7 +391,7 @@ func (v *MapperGenerator) writeMapFunctionsForPair(out io.Writer, pair *typePair krmElemTypeName = strings.TrimPrefix(krmElemTypeName, "[]") protoTypeName := "pb." + protoNameForEnum(protoField.Enum()) - functionName := "Enum_ToProto" + functionName := "direct.Enum_ToProto" useSliceToProtoFunction = fmt.Sprintf("%s[%s](mapCtx, in.%s)", functionName, protoTypeName, @@ -413,7 +426,7 @@ func (v *MapperGenerator) writeMapFunctionsForPair(out io.Writer, pair *typePair } if useSliceToProtoFunction != "" { - fmt.Fprintf(out, "\tout.%s = Slice_ToProto(mapCtx, in.%s, %s)\n", + fmt.Fprintf(out, "\tout.%s = direct.Slice_ToProto(mapCtx, in.%s, %s)\n", protoFieldName, krmFieldName, useSliceToProtoFunction, @@ -471,7 +484,7 @@ func (v *MapperGenerator) writeMapFunctionsForPair(out io.Writer, pair *typePair ) case protoreflect.EnumKind: protoTypeName := "pb." + protoNameForEnum(protoField.Enum()) - functionName := "Enum_ToProto" + functionName := "direct.Enum_ToProto" fmt.Fprintf(out, "\tout.%s = %s[%s](mapCtx, in.%s)\n", protoFieldName, functionName, @@ -499,7 +512,12 @@ func (v *MapperGenerator) writeMapFunctionsForPair(out io.Writer, pair *typePair } oneof := protoField.ContainingOneof() - if oneof != nil { + if protoField.HasOptionalKeyword() { + fmt.Fprintf(out, "\tout.%s = in.%s\n", + protoFieldName, + krmFieldName, + ) + } else if oneof != nil { functionName := fmt.Sprintf("%s_%s_ToProto", goTypeName, protoFieldName) fmt.Fprintf(out, "\tif oneof := %s(mapCtx, in.%s); oneof != nil {\n", functionName, @@ -519,7 +537,7 @@ func (v *MapperGenerator) writeMapFunctionsForPair(out io.Writer, pair *typePair krmFieldName, ) } else { - fmt.Fprintf(out, "\tout.%s = ValueOf(in.%s)\n", + fmt.Fprintf(out, "\tout.%s = direct.ValueOf(in.%s)\n", protoFieldName, krmFieldName, ) @@ -574,3 +592,31 @@ func ToGoFieldName(name protoreflect.Name) string { } return strings.Join(tokens, "") } + +// protoIsPointerInGo returns if the field is going to be represented as a pointer in go. +// Most proto3 fields are not pointers, but a few are. +func protoIsPointerInGo(field protoreflect.FieldDescriptor) bool { + switch field.Kind() { + case protoreflect.EnumKind: + if field.HasOptionalKeyword() { + return true + } + return false + + case protoreflect.StringKind, + protoreflect.FloatKind, + protoreflect.DoubleKind, + protoreflect.BoolKind, + protoreflect.Int64Kind, + protoreflect.Int32Kind, + protoreflect.Uint32Kind, + protoreflect.Uint64Kind, + protoreflect.Fixed64Kind, + protoreflect.BytesKind: + return field.HasOptionalKeyword() + + default: + klog.Fatalf("protoIsPointerInGo not implemented for %v", field) + } + return false +} diff --git a/dev/tools/controllerbuilder/pkg/codegen/typegenerator.go b/dev/tools/controllerbuilder/pkg/codegen/typegenerator.go index 99a16c5325..6f2c48e8f3 100644 --- a/dev/tools/controllerbuilder/pkg/codegen/typegenerator.go +++ b/dev/tools/controllerbuilder/pkg/codegen/typegenerator.go @@ -137,14 +137,15 @@ func (v *TypeGenerator) writeTypes(out io.Writer, msg protoreflect.MessageDescri goType := goNameForProtoMessage(msg, msg) { + fmt.Fprintf(out, "\n") fmt.Fprintf(out, "// +kcc:proto=%s\n", msg.FullName()) fmt.Fprintf(out, "type %s struct {\n", goType) for i := 0; i < msg.Fields().Len(); i++ { field := msg.Fields().Get(i) sourceLocations := msg.ParentFile().SourceLocations().ByDescriptor(field) - goFieldName := strings.Title(field.JSONName()) - jsonName := field.JSONName() + jsonName := getJSONForKRM(field) + goFieldName := strings.Title(jsonName) goType := "" if field.IsMap() { @@ -185,11 +186,11 @@ func (v *TypeGenerator) writeTypes(out io.Writer, msg protoreflect.MessageDescri if sourceLocations.LeadingComments != "" { comment := strings.TrimSpace(sourceLocations.LeadingComments) for _, line := range strings.Split(comment, "\n") { - fmt.Fprintf(out, " // %s\n", line) + fmt.Fprintf(out, "\t// %s\n", line) } } - fmt.Fprintf(out, " %s %s `json:\"%s,omitempty\"`\n", + fmt.Fprintf(out, "\t%s %s `json:\"%s,omitempty\"`\n", goFieldName, goType, jsonName, @@ -277,3 +278,13 @@ func goTypeForProtoKind(kind protoreflect.Kind) string { return goType } + +// getJSONForKRM returns the KRM JSON name for the field, +// honoring KRM conventions +func getJSONForKRM(protoField protoreflect.FieldDescriptor) string { + jsonName := protoField.JSONName() + if strings.HasSuffix(jsonName, "Id") { + jsonName = strings.TrimSuffix(jsonName, "Id") + "ID" + } + return jsonName +} diff --git a/dev/tools/controllerbuilder/pkg/commands/generatemapper/generatemappercommand.go b/dev/tools/controllerbuilder/pkg/commands/generatemapper/generatemappercommand.go index 7611340a68..8b5c85a15f 100644 --- a/dev/tools/controllerbuilder/pkg/commands/generatemapper/generatemappercommand.go +++ b/dev/tools/controllerbuilder/pkg/commands/generatemapper/generatemappercommand.go @@ -93,7 +93,10 @@ func RunGenerateMapper(ctx context.Context, o *GenerateMapperOptions) error { if strings.HasSuffix(fullName, "Response") { return "", false } - if !strings.HasPrefix(fullName, o.ServiceName) { + if strings.HasSuffix(fullName, "OperationMetadata") { + return "", false + } + if !strings.HasPrefix(fullName, o.ServiceName+".") { return "", false } diff --git a/dev/tools/controllerbuilder/pkg/commands/generatetypes/generatetypescommand.go b/dev/tools/controllerbuilder/pkg/commands/generatetypes/generatetypescommand.go index 5743e69256..efa2b1f963 100644 --- a/dev/tools/controllerbuilder/pkg/commands/generatetypes/generatetypescommand.go +++ b/dev/tools/controllerbuilder/pkg/commands/generatetypes/generatetypescommand.go @@ -99,8 +99,8 @@ func RunGenerateCRD(ctx context.Context, o *GenerateCRDOptions) error { return fmt.Errorf("loading proto: %w", err) } - goPackage := "" - protoPackagePath := "" + goPackage := strings.TrimSuffix(gv.Group, ".cnrm.cloud.google.com") + "/" + gv.Version + pathForMessage := func(msg protoreflect.MessageDescriptor) (string, bool) { fullName := string(msg.FullName()) if strings.HasSuffix(fullName, "Request") { @@ -109,18 +109,12 @@ func RunGenerateCRD(ctx context.Context, o *GenerateCRDOptions) error { if strings.HasSuffix(fullName, "Response") { return "", false } - if !strings.HasPrefix(fullName, o.ServiceName) { + if strings.HasSuffix(fullName, "OperationMetadata") { + return "", false + } + if !strings.HasPrefix(fullName, o.ServiceName+".") { return "", false } - - protoPackagePath = string(msg.ParentFile().Package()) - protoPackagePath = strings.TrimPrefix(protoPackagePath, "google.") - protoPackagePath = strings.TrimPrefix(protoPackagePath, "cloud.") - protoPackagePath = strings.TrimSuffix(protoPackagePath, ".v1") - protoPackagePath = strings.TrimSuffix(protoPackagePath, ".v1beta1") - protoPackagePath = strings.Join(strings.Split(protoPackagePath, "."), "/") - goPackage = "apis/" + protoPackagePath + "/" + gv.Version - return goPackage, true } typeGenerator := codegen.NewTypeGenerator(pathForMessage) @@ -134,10 +128,13 @@ func RunGenerateCRD(ctx context.Context, o *GenerateCRDOptions) error { } if o.KindNames != nil { + if gv.Group == "" { + return fmt.Errorf("--group must be specified with --kinds") + } scaffolder := &scaffold.APIScaffolder{ BaseDir: o.OutputAPIDirectory, GoPackage: goPackage, - Service: protoPackagePath, + Group: gv.Group, Version: gv.Version, PackageProtoTag: o.ServiceName, } diff --git a/dev/tools/controllerbuilder/scaffold/apis.go b/dev/tools/controllerbuilder/scaffold/apis.go index baa07fb64f..0d60b8b1ee 100644 --- a/dev/tools/controllerbuilder/scaffold/apis.go +++ b/dev/tools/controllerbuilder/scaffold/apis.go @@ -22,8 +22,6 @@ import ( "path/filepath" "strings" "text/template" - "unicode" - "unicode/utf8" "github.com/GoogleCloudPlatform/k8s-config-connector/dev/tools/controllerbuilder/template/apis" "github.com/fatih/color" @@ -32,7 +30,7 @@ import ( type APIScaffolder struct { BaseDir string GoPackage string - Service string + Group string Version string PackageProtoTag string } @@ -53,14 +51,14 @@ func (a *APIScaffolder) GetTypeFile(kind string) string { func (a *APIScaffolder) AddTypeFile(kind string) error { gcpResource := kind - if !strings.HasPrefix(strings.ToLower(kind), a.Service) { - s, size := utf8.DecodeRuneInString(a.Service) - uc := unicode.ToUpper(s) - kind = string(uc) + a.Service[size:] + kind - } + // if !strings.HasPrefix(strings.ToLower(kind), a.Service) { + // s, size := utf8.DecodeRuneInString(a.Service) + // uc := unicode.ToUpper(s) + // kind = string(uc) + a.Service[size:] + kind + // } typeFilePath := a.GetTypeFile(kind) cArgs := &apis.APIArgs{ - Service: a.Service, + Group: a.Group, Version: a.Version, Kind: kind, PackageProtoTag: a.PackageProtoTag, @@ -103,7 +101,7 @@ func (a *APIScaffolder) GroupVersionFileNotExist() bool { func (a *APIScaffolder) AddGroupVersionFile() error { docFilePath := filepath.Join(a.BaseDir, a.GoPackage, "groupversion_info.go") cArgs := &apis.APIArgs{ - Service: a.Service, + Group: a.Group, Version: a.Version, PackageProtoTag: a.PackageProtoTag, } @@ -122,7 +120,7 @@ func (a *APIScaffolder) DocFileNotExist() bool { func (a *APIScaffolder) AddDocFile() error { docFilePath := filepath.Join(a.BaseDir, a.GoPackage, "doc.go") cArgs := &apis.APIArgs{ - Service: a.Service, + Group: a.Group, Version: a.Version, PackageProtoTag: a.PackageProtoTag, } @@ -130,7 +128,7 @@ func (a *APIScaffolder) AddDocFile() error { } func scaffoldDocFile(path string, cArgs *apis.APIArgs) error { - tmpl, err := template.New(cArgs.Service).Parse(apis.DocTemplate) + tmpl, err := template.New("doc.go").Parse(apis.DocTemplate) if err != nil { return fmt.Errorf("parse doc.go template: %w", err) } @@ -146,7 +144,7 @@ func scaffoldDocFile(path string, cArgs *apis.APIArgs) error { } func scaffoldGropuVersionFile(path string, cArgs *apis.APIArgs) error { - tmpl, err := template.New(cArgs.Service).Parse(apis.GroupVersionInfoTemplate) + tmpl, err := template.New("groupversioninfo.go").Parse(apis.GroupVersionInfoTemplate) if err != nil { return fmt.Errorf("parse groupversion_info.go template: %w", err) } diff --git a/dev/tools/controllerbuilder/template/apis/doc.go b/dev/tools/controllerbuilder/template/apis/doc.go index d80522dfad..da65573126 100644 --- a/dev/tools/controllerbuilder/template/apis/doc.go +++ b/dev/tools/controllerbuilder/template/apis/doc.go @@ -1,7 +1,6 @@ package apis -const DocTemplate = ` -// Copyright 2024 Google LLC +const DocTemplate = `// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,8 +14,6 @@ const DocTemplate = ` // See the License for the specific language governing permissions and // limitations under the License. -{{- if .PackageProtoTag }} -// +kcc:proto={{ .PackageProtoTag }} -{{- end }} +{{ if .PackageProtoTag }}// +kcc:proto={{ .PackageProtoTag }}{{ end }} package {{ .Version }} ` diff --git a/dev/tools/controllerbuilder/template/apis/groupversion_info.go b/dev/tools/controllerbuilder/template/apis/groupversion_info.go index 7673078e88..ee75befd2c 100644 --- a/dev/tools/controllerbuilder/template/apis/groupversion_info.go +++ b/dev/tools/controllerbuilder/template/apis/groupversion_info.go @@ -1,7 +1,6 @@ package apis -const GroupVersionInfoTemplate = ` -// Copyright 2024 Google LLC +const GroupVersionInfoTemplate = `// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,7 +15,7 @@ const GroupVersionInfoTemplate = ` // limitations under the License. // +kubebuilder:object:generate=true -// +groupName={{.Service}}.cnrm.cloud.google.com +// +groupName={{.Group}} package {{ .Version }} import ( @@ -26,7 +25,7 @@ import ( var ( // GroupVersion is group version used to register these objects - GroupVersion = schema.GroupVersion{Group: "{{.Service}}.cnrm.cloud.google.com", Version: "{{.Version}}"} + GroupVersion = schema.GroupVersion{Group: "{{ .Group }}", Version: "{{.Version}}"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} diff --git a/dev/tools/controllerbuilder/template/apis/types.go b/dev/tools/controllerbuilder/template/apis/types.go index 3efedc053c..9a60a36080 100644 --- a/dev/tools/controllerbuilder/template/apis/types.go +++ b/dev/tools/controllerbuilder/template/apis/types.go @@ -1,7 +1,7 @@ package apis type APIArgs struct { - Service string + Group string Version string Kind string GcpResource string diff --git a/dev/tools/proto-to-mapper/Makefile b/dev/tools/proto-to-mapper/Makefile index 9ecd85cb48..a6d8e75306 100644 --- a/dev/tools/proto-to-mapper/Makefile +++ b/dev/tools/proto-to-mapper/Makefile @@ -12,6 +12,7 @@ generate-pb: install-protoc-linux ./third_party/googleapis/google/api/*.proto \ ./third_party/googleapis/google/api/*/*/*.proto \ ./third_party/googleapis/google/cloud/*/*/*.proto \ + ./third_party/googleapis/google/cloud/*/*/*/*.proto \ ./third_party/googleapis/google/iam/v1/*.proto \ ./third_party/googleapis/google/logging/v2/*.proto \ ./third_party/googleapis/google/monitoring/v3/*.proto \ diff --git a/dev/tools/proto-to-mapper/main.go b/dev/tools/proto-to-mapper/main.go index 30991c9a24..8246ff2d0a 100644 --- a/dev/tools/proto-to-mapper/main.go +++ b/dev/tools/proto-to-mapper/main.go @@ -755,8 +755,10 @@ func (v *visitor) writeMapFunctions() { v.generatedFiles[k] = out out.contents.WriteString(fmt.Sprintf("package %s\n\n", lastGoComponent(goPackage))) - out.contents.WriteString(fmt.Sprintf("import pb %q\n\n", "cloud.google.com/go/monitoring/dashboard/apiv1/dashboardpb")) - out.contents.WriteString(fmt.Sprintf("import krm %q\n\n", "github.com/GoogleCloudPlatform/k8s-config-connector/apis/monitoring/v1beta1")) + out.contents.WriteString("import (\n") + out.contents.WriteString(fmt.Sprintf("\tkrm %q\n\n", "github.com/GoogleCloudPlatform/k8s-config-connector/apis/monitoring/v1beta1")) + out.contents.WriteString(fmt.Sprintf("\tpb %q\n\n", "cloud.google.com/go/monitoring/dashboard/apiv1/dashboardpb")) + out.contents.WriteString(")\n") } v.writeMapFunctionsForPair(&out.contents, &pair) diff --git a/go.mod b/go.mod index 92ab726e19..df2e53299e 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,7 @@ require ( cloud.google.com/go/iam v1.1.10 cloud.google.com/go/monitoring v1.20.1 cloud.google.com/go/profiler v0.4.1 + cloud.google.com/go/redis v1.16.2 cloud.google.com/go/resourcemanager v1.9.9 cloud.google.com/go/securesourcemanager v1.0.1 cloud.google.com/go/security v1.17.2 diff --git a/go.sum b/go.sum index 4b7e6e0427..c246f45a74 100644 --- a/go.sum +++ b/go.sum @@ -59,6 +59,8 @@ cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2k cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/redis v1.16.2 h1:QbarPMu22tuUOqi3ynNKk2mQWl7xitMTxAaAUaBUFsE= +cloud.google.com/go/redis v1.16.2/go.mod h1:bn/4nXSZkoH4QTXRjqWR2AZ0WA1b13ct354nul2SSiU= cloud.google.com/go/resourcemanager v1.9.9 h1:9JgRo4uBdCLJpWb6c+1+q7QPyWzH0LSCKUcF/IliKNk= cloud.google.com/go/resourcemanager v1.9.9/go.mod h1:vCBRKurJv+XVvRZ0XFhI/eBrBM7uBOPFjMEwSDMIflY= cloud.google.com/go/securesourcemanager v1.0.1 h1:lpnGYxqLJqw9UpTweNGQlRqwi0Ep/qdH/OTt3Tyb6OQ= diff --git a/pkg/clients/generated/apis/redis/v1alpha1/doc.go b/pkg/clients/generated/apis/redis/v1alpha1/doc.go new file mode 100644 index 0000000000..a6803c4284 --- /dev/null +++ b/pkg/clients/generated/apis/redis/v1alpha1/doc.go @@ -0,0 +1,41 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Config Connector and manual +// changes will be clobbered when the file is regenerated. +// +// ---------------------------------------------------------------------------- + +// *** DISCLAIMER *** +// Config Connector's go-client for CRDs is currently in ALPHA, which means +// that future versions of the go-client may include breaking changes. +// Please try it out and give us feedback! + +// Package v1alpha1 contains API Schema definitions for the redis v1alpha1 API group. +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/pkg/apis/redis +// +k8s:defaulter-gen=TypeMeta +// +groupName=redis.cnrm.cloud.google.com + +// Generate deepcopy object for redis/v1alpha1 API group +// +//go:generate go run ../../../../../../scripts/deepcopy-gen/main.go -O zz_generated.deepcopy -i . -h ../../../../../../hack/boilerplate_client_alpha.go.txt +package v1alpha1 diff --git a/pkg/clients/generated/apis/redis/v1alpha1/rediscluster_types.go b/pkg/clients/generated/apis/redis/v1alpha1/rediscluster_types.go new file mode 100644 index 0000000000..61805e4db3 --- /dev/null +++ b/pkg/clients/generated/apis/redis/v1alpha1/rediscluster_types.go @@ -0,0 +1,270 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Config Connector and manual +// changes will be clobbered when the file is regenerated. +// +// ---------------------------------------------------------------------------- + +// *** DISCLAIMER *** +// Config Connector's go-client for CRDs is currently in ALPHA, which means +// that future versions of the go-client may include breaking changes. +// Please try it out and give us feedback! + +package v1alpha1 + +import ( + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type ClusterAofConfig struct { + /* Optional. fsync configuration. */ + // +optional + AppendFsync *string `json:"appendFsync,omitempty"` +} + +type ClusterPersistenceConfig struct { + /* Optional. AOF configuration. This field will be ignored if mode is not AOF. */ + // +optional + AofConfig *ClusterAofConfig `json:"aofConfig,omitempty"` + + /* Optional. The mode of persistence. */ + // +optional + Mode *string `json:"mode,omitempty"` + + /* Optional. RDB configuration. This field will be ignored if mode is not RDB. */ + // +optional + RdbConfig *ClusterRdbConfig `json:"rdbConfig,omitempty"` +} + +type ClusterPscConfigs struct { + /* Required. The network where the IP address of the discovery endpoint will be reserved, in the form of projects/{network_project}/global/networks/{network_id}. */ + // +optional + NetworkRef *v1alpha1.ResourceRef `json:"networkRef,omitempty"` +} + +type ClusterRdbConfig struct { + /* Optional. Period between RDB snapshots. */ + // +optional + RdbSnapshotPeriod *string `json:"rdbSnapshotPeriod,omitempty"` + + /* Optional. The time that the first snapshot was/will be attempted, and to which future snapshots will be aligned. If not provided, the current time will be used. */ + // +optional + RdbSnapshotStartTime *string `json:"rdbSnapshotStartTime,omitempty"` +} + +type ClusterZoneDistributionConfig struct { + /* Optional. The mode of zone distribution. Defaults to MULTI_ZONE, when not specified. */ + // +optional + Mode *string `json:"mode,omitempty"` + + /* Optional. When SINGLE ZONE distribution is selected, zone field would be used to allocate all resources in that zone. This is not applicable to MULTI_ZONE, and would be ignored for MULTI_ZONE clusters. */ + // +optional + Zone *string `json:"zone,omitempty"` +} + +type RedisClusterSpec struct { + /* Optional. The authorization mode of the Redis cluster. If not provided, auth feature is disabled for the cluster. */ + // +optional + AuthorizationMode *string `json:"authorizationMode,omitempty"` + + /* Optional. The delete operation will fail when the value is set to true. */ + // +optional + DeletionProtectionEnabled *bool `json:"deletionProtectionEnabled,omitempty"` + + /* Optional. The type of a redis node in the cluster. NodeType determines the underlying machine-type of a redis node. */ + // +optional + NodeType *string `json:"nodeType,omitempty"` + + /* Optional. Persistence config (RDB, AOF) for the cluster. */ + // +optional + PersistenceConfig *ClusterPersistenceConfig `json:"persistenceConfig,omitempty"` + + /* Required. Each PscConfig configures the consumer network where IPs will be designated to the cluster for client access through Private Service Connect Automation. Currently, only one PscConfig is supported. */ + // +optional + PscConfigs []ClusterPscConfigs `json:"pscConfigs,omitempty"` + + /* Optional. Key/Value pairs of customer overrides for mutable Redis Configs */ + // +optional + RedisConfigs map[string]string `json:"redisConfigs,omitempty"` + + /* Optional. The number of replica nodes per shard. */ + // +optional + ReplicaCount *int32 `json:"replicaCount,omitempty"` + + /* The RedisCluster name. If not given, the metadata.name will be used. */ + // +optional + ResourceID *string `json:"resourceID,omitempty"` + + /* Required. Number of shards for the Redis cluster. */ + // +optional + ShardCount *int32 `json:"shardCount,omitempty"` + + /* Optional. The in-transit encryption for the Redis cluster. If not provided, encryption is disabled for the cluster. */ + // +optional + TransitEncryptionMode *string `json:"transitEncryptionMode,omitempty"` + + /* Optional. This config will be used to determine how the customer wants us to distribute cluster resources within the region. */ + // +optional + ZoneDistributionConfig *ClusterZoneDistributionConfig `json:"zoneDistributionConfig,omitempty"` +} + +type ClusterDiscoveryEndpointsStatus struct { + /* Output only. Address of the exposed Redis endpoint used by clients to connect to the service. The address could be either IP or hostname. */ + // +optional + Address *string `json:"address,omitempty"` + + /* Output only. The port number of the exposed Redis endpoint. */ + // +optional + Port *int32 `json:"port,omitempty"` + + /* Output only. Customer configuration for where the endpoint is created and accessed from. */ + // +optional + PscConfig *ClusterPscConfigStatus `json:"pscConfig,omitempty"` +} + +type ClusterObservedStateStatus struct { + /* Output only. The timestamp associated with the cluster creation request. */ + // +optional + CreateTime *string `json:"createTime,omitempty"` + + /* Output only. Endpoints created on each given network, for Redis clients to connect to the cluster. Currently only one discovery endpoint is supported. */ + // +optional + DiscoveryEndpoints []ClusterDiscoveryEndpointsStatus `json:"discoveryEndpoints,omitempty"` + + /* Output only. Precise value of redis memory size in GB for the entire cluster. */ + // +optional + PreciseSizeGb *float64 `json:"preciseSizeGb,omitempty"` + + /* Output only. PSC connections for discovery of the cluster topology and accessing the cluster. */ + // +optional + PscConnections []ClusterPscConnectionsStatus `json:"pscConnections,omitempty"` + + /* Output only. Redis memory size in GB for the entire cluster rounded up to the next integer. */ + // +optional + SizeGb *int32 `json:"sizeGb,omitempty"` + + /* Output only. The current state of this cluster. Can be CREATING, READY, UPDATING, DELETING and SUSPENDED */ + // +optional + State *string `json:"state,omitempty"` + + /* Output only. Additional information about the current state of the cluster. */ + // +optional + StateInfo *ClusterStateInfoStatus `json:"stateInfo,omitempty"` + + /* Output only. System assigned, unique identifier for the cluster. */ + // +optional + Uid *string `json:"uid,omitempty"` +} + +type ClusterPscConfigStatus struct { + /* Required. The network where the IP address of the discovery endpoint will be reserved, in the form of projects/{network_project}/global/networks/{network_id}. */ + // +optional + NetworkRef *v1alpha1.ResourceRef `json:"networkRef,omitempty"` +} + +type ClusterPscConnectionsStatus struct { + /* Output only. The IP allocated on the consumer network for the PSC forwarding rule. */ + // +optional + Address *string `json:"address,omitempty"` + + /* Output only. The URI of the consumer side forwarding rule. Example: projects/{projectNumOrId}/regions/us-east1/forwardingRules/{resourceId}. */ + // +optional + ForwardingRule *string `json:"forwardingRule,omitempty"` + + /* The consumer network where the IP address resides, in the form of projects/{project_id}/global/networks/{network_id}. */ + // +optional + Network *string `json:"network,omitempty"` + + /* Output only. The consumer project_id where the forwarding rule is created from. */ + // +optional + ProjectID *string `json:"projectID,omitempty"` + + /* Output only. The PSC connection id of the forwarding rule connected to the service attachment. */ + // +optional + PscConnectionID *string `json:"pscConnectionID,omitempty"` +} + +type ClusterStateInfoStatus struct { + /* Describes ongoing update on the cluster when cluster state is UPDATING. */ + // +optional + UpdateInfo *ClusterUpdateInfoStatus `json:"updateInfo,omitempty"` +} + +type ClusterUpdateInfoStatus struct { + /* Target number of replica nodes per shard. */ + // +optional + TargetReplicaCount *int32 `json:"targetReplicaCount,omitempty"` + + /* Target number of shards for redis cluster */ + // +optional + TargetShardCount *int32 `json:"targetShardCount,omitempty"` +} + +type RedisClusterStatus struct { + /* Conditions represent the latest available observations of the + RedisCluster's current state. */ + Conditions []v1alpha1.Condition `json:"conditions,omitempty"` + /* A unique specifier for the RedisCluster resource in GCP. */ + // +optional + ExternalRef *string `json:"externalRef,omitempty"` + + /* ObservedGeneration is the generation of the resource that was most recently observed by the Config Connector controller. If this is equal to metadata.generation, then that means that the current reported status reflects the most recent desired state of the resource. */ + // +optional + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + + /* ObservedState is the state of the resource as most recently observed in GCP. */ + // +optional + ObservedState *ClusterObservedStateStatus `json:"observedState,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:categories=gcp,shortName= +// +kubebuilder:subresource:status +// +kubebuilder:metadata:labels="cnrm.cloud.google.com/managed-by-kcc=true";"cnrm.cloud.google.com/system=true" +// +kubebuilder:printcolumn:name="Age",JSONPath=".metadata.creationTimestamp",type="date" +// +kubebuilder:printcolumn:name="Ready",JSONPath=".status.conditions[?(@.type=='Ready')].status",type="string",description="When 'True', the most recent reconcile of the resource succeeded" +// +kubebuilder:printcolumn:name="Status",JSONPath=".status.conditions[?(@.type=='Ready')].reason",type="string",description="The reason for the value in 'Ready'" +// +kubebuilder:printcolumn:name="Status Age",JSONPath=".status.conditions[?(@.type=='Ready')].lastTransitionTime",type="date",description="The last transition time for the value in 'Status'" + +// RedisCluster is the Schema for the redis API +// +k8s:openapi-gen=true +type RedisCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec RedisClusterSpec `json:"spec,omitempty"` + Status RedisClusterStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RedisClusterList contains a list of RedisCluster +type RedisClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RedisCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&RedisCluster{}, &RedisClusterList{}) +} diff --git a/pkg/clients/generated/apis/redis/v1alpha1/register.go b/pkg/clients/generated/apis/redis/v1alpha1/register.go new file mode 100644 index 0000000000..c9333d9ff4 --- /dev/null +++ b/pkg/clients/generated/apis/redis/v1alpha1/register.go @@ -0,0 +1,63 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Config Connector and manual +// changes will be clobbered when the file is regenerated. +// +// ---------------------------------------------------------------------------- + +// *** DISCLAIMER *** +// Config Connector's go-client for CRDs is currently in ALPHA, which means +// that future versions of the go-client may include breaking changes. +// Please try it out and give us feedback! + +// Package v1alpha1 contains API Schema definitions for the redis v1alpha1 API group. +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/pkg/apis/redis +// +k8s:defaulter-gen=TypeMeta +// +groupName=redis.cnrm.cloud.google.com +package v1alpha1 + +import ( + "reflect" + + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // SchemeGroupVersion is the group version used to register these objects. + SchemeGroupVersion = schema.GroupVersion{Group: "redis.cnrm.cloud.google.com", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = SchemeBuilder.AddToScheme + + RedisClusterGVK = schema.GroupVersionKind{ + Group: SchemeGroupVersion.Group, + Version: SchemeGroupVersion.Version, + Kind: reflect.TypeOf(RedisCluster{}).Name(), + } + + redisAPIVersion = SchemeGroupVersion.String() +) diff --git a/pkg/clients/generated/apis/redis/v1alpha1/zz_generated.deepcopy.go b/pkg/clients/generated/apis/redis/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..ea1668fcb2 --- /dev/null +++ b/pkg/clients/generated/apis/redis/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,527 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// *** DISCLAIMER *** +// Config Connector's go-client for CRDs is currently in ALPHA, which means +// that future versions of the go-client may include breaking changes. +// Please try it out and give us feedback! + +// Code generated by main. DO NOT EDIT. + +package v1alpha1 + +import ( + k8sv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterAofConfig) DeepCopyInto(out *ClusterAofConfig) { + *out = *in + if in.AppendFsync != nil { + in, out := &in.AppendFsync, &out.AppendFsync + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAofConfig. +func (in *ClusterAofConfig) DeepCopy() *ClusterAofConfig { + if in == nil { + return nil + } + out := new(ClusterAofConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDiscoveryEndpointsStatus) DeepCopyInto(out *ClusterDiscoveryEndpointsStatus) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + if in.PscConfig != nil { + in, out := &in.PscConfig, &out.PscConfig + *out = new(ClusterPscConfigStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDiscoveryEndpointsStatus. +func (in *ClusterDiscoveryEndpointsStatus) DeepCopy() *ClusterDiscoveryEndpointsStatus { + if in == nil { + return nil + } + out := new(ClusterDiscoveryEndpointsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterObservedStateStatus) DeepCopyInto(out *ClusterObservedStateStatus) { + *out = *in + if in.CreateTime != nil { + in, out := &in.CreateTime, &out.CreateTime + *out = new(string) + **out = **in + } + if in.DiscoveryEndpoints != nil { + in, out := &in.DiscoveryEndpoints, &out.DiscoveryEndpoints + *out = make([]ClusterDiscoveryEndpointsStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreciseSizeGb != nil { + in, out := &in.PreciseSizeGb, &out.PreciseSizeGb + *out = new(float64) + **out = **in + } + if in.PscConnections != nil { + in, out := &in.PscConnections, &out.PscConnections + *out = make([]ClusterPscConnectionsStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SizeGb != nil { + in, out := &in.SizeGb, &out.SizeGb + *out = new(int32) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.StateInfo != nil { + in, out := &in.StateInfo, &out.StateInfo + *out = new(ClusterStateInfoStatus) + (*in).DeepCopyInto(*out) + } + if in.Uid != nil { + in, out := &in.Uid, &out.Uid + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterObservedStateStatus. +func (in *ClusterObservedStateStatus) DeepCopy() *ClusterObservedStateStatus { + if in == nil { + return nil + } + out := new(ClusterObservedStateStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPersistenceConfig) DeepCopyInto(out *ClusterPersistenceConfig) { + *out = *in + if in.AofConfig != nil { + in, out := &in.AofConfig, &out.AofConfig + *out = new(ClusterAofConfig) + (*in).DeepCopyInto(*out) + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.RdbConfig != nil { + in, out := &in.RdbConfig, &out.RdbConfig + *out = new(ClusterRdbConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPersistenceConfig. +func (in *ClusterPersistenceConfig) DeepCopy() *ClusterPersistenceConfig { + if in == nil { + return nil + } + out := new(ClusterPersistenceConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPscConfigStatus) DeepCopyInto(out *ClusterPscConfigStatus) { + *out = *in + if in.NetworkRef != nil { + in, out := &in.NetworkRef, &out.NetworkRef + *out = new(k8sv1alpha1.ResourceRef) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPscConfigStatus. +func (in *ClusterPscConfigStatus) DeepCopy() *ClusterPscConfigStatus { + if in == nil { + return nil + } + out := new(ClusterPscConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPscConfigs) DeepCopyInto(out *ClusterPscConfigs) { + *out = *in + if in.NetworkRef != nil { + in, out := &in.NetworkRef, &out.NetworkRef + *out = new(k8sv1alpha1.ResourceRef) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPscConfigs. +func (in *ClusterPscConfigs) DeepCopy() *ClusterPscConfigs { + if in == nil { + return nil + } + out := new(ClusterPscConfigs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPscConnectionsStatus) DeepCopyInto(out *ClusterPscConnectionsStatus) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } + if in.ForwardingRule != nil { + in, out := &in.ForwardingRule, &out.ForwardingRule + *out = new(string) + **out = **in + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(string) + **out = **in + } + if in.ProjectID != nil { + in, out := &in.ProjectID, &out.ProjectID + *out = new(string) + **out = **in + } + if in.PscConnectionID != nil { + in, out := &in.PscConnectionID, &out.PscConnectionID + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPscConnectionsStatus. +func (in *ClusterPscConnectionsStatus) DeepCopy() *ClusterPscConnectionsStatus { + if in == nil { + return nil + } + out := new(ClusterPscConnectionsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRdbConfig) DeepCopyInto(out *ClusterRdbConfig) { + *out = *in + if in.RdbSnapshotPeriod != nil { + in, out := &in.RdbSnapshotPeriod, &out.RdbSnapshotPeriod + *out = new(string) + **out = **in + } + if in.RdbSnapshotStartTime != nil { + in, out := &in.RdbSnapshotStartTime, &out.RdbSnapshotStartTime + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRdbConfig. +func (in *ClusterRdbConfig) DeepCopy() *ClusterRdbConfig { + if in == nil { + return nil + } + out := new(ClusterRdbConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStateInfoStatus) DeepCopyInto(out *ClusterStateInfoStatus) { + *out = *in + if in.UpdateInfo != nil { + in, out := &in.UpdateInfo, &out.UpdateInfo + *out = new(ClusterUpdateInfoStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStateInfoStatus. +func (in *ClusterStateInfoStatus) DeepCopy() *ClusterStateInfoStatus { + if in == nil { + return nil + } + out := new(ClusterStateInfoStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterUpdateInfoStatus) DeepCopyInto(out *ClusterUpdateInfoStatus) { + *out = *in + if in.TargetReplicaCount != nil { + in, out := &in.TargetReplicaCount, &out.TargetReplicaCount + *out = new(int32) + **out = **in + } + if in.TargetShardCount != nil { + in, out := &in.TargetShardCount, &out.TargetShardCount + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterUpdateInfoStatus. +func (in *ClusterUpdateInfoStatus) DeepCopy() *ClusterUpdateInfoStatus { + if in == nil { + return nil + } + out := new(ClusterUpdateInfoStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterZoneDistributionConfig) DeepCopyInto(out *ClusterZoneDistributionConfig) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(string) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterZoneDistributionConfig. +func (in *ClusterZoneDistributionConfig) DeepCopy() *ClusterZoneDistributionConfig { + if in == nil { + return nil + } + out := new(ClusterZoneDistributionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCluster) DeepCopyInto(out *RedisCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCluster. +func (in *RedisCluster) DeepCopy() *RedisCluster { + if in == nil { + return nil + } + out := new(RedisCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RedisCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterList) DeepCopyInto(out *RedisClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RedisCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterList. +func (in *RedisClusterList) DeepCopy() *RedisClusterList { + if in == nil { + return nil + } + out := new(RedisClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RedisClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterSpec) DeepCopyInto(out *RedisClusterSpec) { + *out = *in + if in.AuthorizationMode != nil { + in, out := &in.AuthorizationMode, &out.AuthorizationMode + *out = new(string) + **out = **in + } + if in.DeletionProtectionEnabled != nil { + in, out := &in.DeletionProtectionEnabled, &out.DeletionProtectionEnabled + *out = new(bool) + **out = **in + } + if in.NodeType != nil { + in, out := &in.NodeType, &out.NodeType + *out = new(string) + **out = **in + } + if in.PersistenceConfig != nil { + in, out := &in.PersistenceConfig, &out.PersistenceConfig + *out = new(ClusterPersistenceConfig) + (*in).DeepCopyInto(*out) + } + if in.PscConfigs != nil { + in, out := &in.PscConfigs, &out.PscConfigs + *out = make([]ClusterPscConfigs, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RedisConfigs != nil { + in, out := &in.RedisConfigs, &out.RedisConfigs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ReplicaCount != nil { + in, out := &in.ReplicaCount, &out.ReplicaCount + *out = new(int32) + **out = **in + } + if in.ResourceID != nil { + in, out := &in.ResourceID, &out.ResourceID + *out = new(string) + **out = **in + } + if in.ShardCount != nil { + in, out := &in.ShardCount, &out.ShardCount + *out = new(int32) + **out = **in + } + if in.TransitEncryptionMode != nil { + in, out := &in.TransitEncryptionMode, &out.TransitEncryptionMode + *out = new(string) + **out = **in + } + if in.ZoneDistributionConfig != nil { + in, out := &in.ZoneDistributionConfig, &out.ZoneDistributionConfig + *out = new(ClusterZoneDistributionConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterSpec. +func (in *RedisClusterSpec) DeepCopy() *RedisClusterSpec { + if in == nil { + return nil + } + out := new(RedisClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisClusterStatus) DeepCopyInto(out *RedisClusterStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]k8sv1alpha1.Condition, len(*in)) + copy(*out, *in) + } + if in.ExternalRef != nil { + in, out := &in.ExternalRef, &out.ExternalRef + *out = new(string) + **out = **in + } + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.ObservedState != nil { + in, out := &in.ObservedState, &out.ObservedState + *out = new(ClusterObservedStateStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisClusterStatus. +func (in *RedisClusterStatus) DeepCopy() *RedisClusterStatus { + if in == nil { + return nil + } + out := new(RedisClusterStatus) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/clients/generated/client/clientset/versioned/clientset.go b/pkg/clients/generated/client/clientset/versioned/clientset.go index 030965862b..da4a91a955 100644 --- a/pkg/clients/generated/client/clientset/versioned/clientset.go +++ b/pkg/clients/generated/client/clientset/versioned/clientset.go @@ -117,6 +117,7 @@ import ( pubsublitev1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/pubsublite/v1alpha1" pubsublitev1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/pubsublite/v1beta1" recaptchaenterprisev1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/recaptchaenterprise/v1beta1" + redisv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1" redisv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/redis/v1beta1" resourcemanagerv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/resourcemanager/v1beta1" runv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/run/v1beta1" @@ -241,6 +242,7 @@ type Interface interface { PubsubliteV1alpha1() pubsublitev1alpha1.PubsubliteV1alpha1Interface PubsubliteV1beta1() pubsublitev1beta1.PubsubliteV1beta1Interface RecaptchaenterpriseV1beta1() recaptchaenterprisev1beta1.RecaptchaenterpriseV1beta1Interface + RedisV1alpha1() redisv1alpha1.RedisV1alpha1Interface RedisV1beta1() redisv1beta1.RedisV1beta1Interface ResourcemanagerV1beta1() resourcemanagerv1beta1.ResourcemanagerV1beta1Interface RunV1beta1() runv1beta1.RunV1beta1Interface @@ -363,6 +365,7 @@ type Clientset struct { pubsubliteV1alpha1 *pubsublitev1alpha1.PubsubliteV1alpha1Client pubsubliteV1beta1 *pubsublitev1beta1.PubsubliteV1beta1Client recaptchaenterpriseV1beta1 *recaptchaenterprisev1beta1.RecaptchaenterpriseV1beta1Client + redisV1alpha1 *redisv1alpha1.RedisV1alpha1Client redisV1beta1 *redisv1beta1.RedisV1beta1Client resourcemanagerV1beta1 *resourcemanagerv1beta1.ResourcemanagerV1beta1Client runV1beta1 *runv1beta1.RunV1beta1Client @@ -850,6 +853,11 @@ func (c *Clientset) RecaptchaenterpriseV1beta1() recaptchaenterprisev1beta1.Reca return c.recaptchaenterpriseV1beta1 } +// RedisV1alpha1 retrieves the RedisV1alpha1Client +func (c *Clientset) RedisV1alpha1() redisv1alpha1.RedisV1alpha1Interface { + return c.redisV1alpha1 +} + // RedisV1beta1 retrieves the RedisV1beta1Client func (c *Clientset) RedisV1beta1() redisv1beta1.RedisV1beta1Interface { return c.redisV1beta1 @@ -1387,6 +1395,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.redisV1alpha1, err = redisv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.redisV1beta1, err = redisv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -1600,6 +1612,7 @@ func New(c rest.Interface) *Clientset { cs.pubsubliteV1alpha1 = pubsublitev1alpha1.New(c) cs.pubsubliteV1beta1 = pubsublitev1beta1.New(c) cs.recaptchaenterpriseV1beta1 = recaptchaenterprisev1beta1.New(c) + cs.redisV1alpha1 = redisv1alpha1.New(c) cs.redisV1beta1 = redisv1beta1.New(c) cs.resourcemanagerV1beta1 = resourcemanagerv1beta1.New(c) cs.runV1beta1 = runv1beta1.New(c) diff --git a/pkg/clients/generated/client/clientset/versioned/fake/clientset_generated.go b/pkg/clients/generated/client/clientset/versioned/fake/clientset_generated.go index 4d656cb351..32d5f1d394 100644 --- a/pkg/clients/generated/client/clientset/versioned/fake/clientset_generated.go +++ b/pkg/clients/generated/client/clientset/versioned/fake/clientset_generated.go @@ -207,6 +207,8 @@ import ( fakepubsublitev1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/pubsublite/v1beta1/fake" recaptchaenterprisev1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/recaptchaenterprise/v1beta1" fakerecaptchaenterprisev1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/recaptchaenterprise/v1beta1/fake" + redisv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1" + fakeredisv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1/fake" redisv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/redis/v1beta1" fakeredisv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/redis/v1beta1/fake" resourcemanagerv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/resourcemanager/v1beta1" @@ -774,6 +776,11 @@ func (c *Clientset) RecaptchaenterpriseV1beta1() recaptchaenterprisev1beta1.Reca return &fakerecaptchaenterprisev1beta1.FakeRecaptchaenterpriseV1beta1{Fake: &c.Fake} } +// RedisV1alpha1 retrieves the RedisV1alpha1Client +func (c *Clientset) RedisV1alpha1() redisv1alpha1.RedisV1alpha1Interface { + return &fakeredisv1alpha1.FakeRedisV1alpha1{Fake: &c.Fake} +} + // RedisV1beta1 retrieves the RedisV1beta1Client func (c *Clientset) RedisV1beta1() redisv1beta1.RedisV1beta1Interface { return &fakeredisv1beta1.FakeRedisV1beta1{Fake: &c.Fake} diff --git a/pkg/clients/generated/client/clientset/versioned/fake/register.go b/pkg/clients/generated/client/clientset/versioned/fake/register.go index 734e4f1b41..ce16f3c476 100644 --- a/pkg/clients/generated/client/clientset/versioned/fake/register.go +++ b/pkg/clients/generated/client/clientset/versioned/fake/register.go @@ -114,6 +114,7 @@ import ( pubsublitev1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/pubsublite/v1alpha1" pubsublitev1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/pubsublite/v1beta1" recaptchaenterprisev1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/recaptchaenterprise/v1beta1" + redisv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/redis/v1alpha1" redisv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/redis/v1beta1" resourcemanagerv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/resourcemanager/v1beta1" runv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/run/v1beta1" @@ -242,6 +243,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ pubsublitev1alpha1.AddToScheme, pubsublitev1beta1.AddToScheme, recaptchaenterprisev1beta1.AddToScheme, + redisv1alpha1.AddToScheme, redisv1beta1.AddToScheme, resourcemanagerv1beta1.AddToScheme, runv1beta1.AddToScheme, diff --git a/pkg/clients/generated/client/clientset/versioned/scheme/register.go b/pkg/clients/generated/client/clientset/versioned/scheme/register.go index 330900f56f..d693432ed1 100644 --- a/pkg/clients/generated/client/clientset/versioned/scheme/register.go +++ b/pkg/clients/generated/client/clientset/versioned/scheme/register.go @@ -114,6 +114,7 @@ import ( pubsublitev1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/pubsublite/v1alpha1" pubsublitev1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/pubsublite/v1beta1" recaptchaenterprisev1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/recaptchaenterprise/v1beta1" + redisv1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/redis/v1alpha1" redisv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/redis/v1beta1" resourcemanagerv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/resourcemanager/v1beta1" runv1beta1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/run/v1beta1" @@ -242,6 +243,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ pubsublitev1alpha1.AddToScheme, pubsublitev1beta1.AddToScheme, recaptchaenterprisev1beta1.AddToScheme, + redisv1alpha1.AddToScheme, redisv1beta1.AddToScheme, resourcemanagerv1beta1.AddToScheme, runv1beta1.AddToScheme, diff --git a/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1/doc.go b/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1/doc.go new file mode 100644 index 0000000000..61f2499ab1 --- /dev/null +++ b/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1/doc.go @@ -0,0 +1,23 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// *** DISCLAIMER *** +// Config Connector's go-client for CRDs is currently in ALPHA, which means +// that future versions of the go-client may include breaking changes. +// Please try it out and give us feedback! + +// Code generated by main. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1/fake/doc.go b/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1/fake/doc.go new file mode 100644 index 0000000000..7a39491606 --- /dev/null +++ b/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1/fake/doc.go @@ -0,0 +1,23 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// *** DISCLAIMER *** +// Config Connector's go-client for CRDs is currently in ALPHA, which means +// that future versions of the go-client may include breaking changes. +// Please try it out and give us feedback! + +// Code generated by main. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1/fake/fake_redis_client.go b/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1/fake/fake_redis_client.go new file mode 100644 index 0000000000..e1907953b0 --- /dev/null +++ b/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1/fake/fake_redis_client.go @@ -0,0 +1,43 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// *** DISCLAIMER *** +// Config Connector's go-client for CRDs is currently in ALPHA, which means +// that future versions of the go-client may include breaking changes. +// Please try it out and give us feedback! + +// Code generated by main. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeRedisV1alpha1 struct { + *testing.Fake +} + +func (c *FakeRedisV1alpha1) RedisClusters(namespace string) v1alpha1.RedisClusterInterface { + return &FakeRedisClusters{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeRedisV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1/fake/fake_rediscluster.go b/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1/fake/fake_rediscluster.go new file mode 100644 index 0000000000..04cf9288af --- /dev/null +++ b/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1/fake/fake_rediscluster.go @@ -0,0 +1,144 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// *** DISCLAIMER *** +// Config Connector's go-client for CRDs is currently in ALPHA, which means +// that future versions of the go-client may include breaking changes. +// Please try it out and give us feedback! + +// Code generated by main. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/redis/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeRedisClusters implements RedisClusterInterface +type FakeRedisClusters struct { + Fake *FakeRedisV1alpha1 + ns string +} + +var redisclustersResource = v1alpha1.SchemeGroupVersion.WithResource("redisclusters") + +var redisclustersKind = v1alpha1.SchemeGroupVersion.WithKind("RedisCluster") + +// Get takes name of the redisCluster, and returns the corresponding redisCluster object, and an error if there is any. +func (c *FakeRedisClusters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RedisCluster, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(redisclustersResource, c.ns, name), &v1alpha1.RedisCluster{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RedisCluster), err +} + +// List takes label and field selectors, and returns the list of RedisClusters that match those selectors. +func (c *FakeRedisClusters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RedisClusterList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(redisclustersResource, redisclustersKind, c.ns, opts), &v1alpha1.RedisClusterList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.RedisClusterList{ListMeta: obj.(*v1alpha1.RedisClusterList).ListMeta} + for _, item := range obj.(*v1alpha1.RedisClusterList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested redisClusters. +func (c *FakeRedisClusters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(redisclustersResource, c.ns, opts)) + +} + +// Create takes the representation of a redisCluster and creates it. Returns the server's representation of the redisCluster, and an error, if there is any. +func (c *FakeRedisClusters) Create(ctx context.Context, redisCluster *v1alpha1.RedisCluster, opts v1.CreateOptions) (result *v1alpha1.RedisCluster, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(redisclustersResource, c.ns, redisCluster), &v1alpha1.RedisCluster{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RedisCluster), err +} + +// Update takes the representation of a redisCluster and updates it. Returns the server's representation of the redisCluster, and an error, if there is any. +func (c *FakeRedisClusters) Update(ctx context.Context, redisCluster *v1alpha1.RedisCluster, opts v1.UpdateOptions) (result *v1alpha1.RedisCluster, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(redisclustersResource, c.ns, redisCluster), &v1alpha1.RedisCluster{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RedisCluster), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeRedisClusters) UpdateStatus(ctx context.Context, redisCluster *v1alpha1.RedisCluster, opts v1.UpdateOptions) (*v1alpha1.RedisCluster, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(redisclustersResource, "status", c.ns, redisCluster), &v1alpha1.RedisCluster{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RedisCluster), err +} + +// Delete takes name of the redisCluster and deletes it. Returns an error if one occurs. +func (c *FakeRedisClusters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(redisclustersResource, c.ns, name, opts), &v1alpha1.RedisCluster{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeRedisClusters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(redisclustersResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.RedisClusterList{}) + return err +} + +// Patch applies the patch and returns the patched redisCluster. +func (c *FakeRedisClusters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RedisCluster, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(redisclustersResource, c.ns, name, pt, data, subresources...), &v1alpha1.RedisCluster{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RedisCluster), err +} diff --git a/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1/generated_expansion.go b/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1/generated_expansion.go new file mode 100644 index 0000000000..6a3bd5e4a8 --- /dev/null +++ b/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1/generated_expansion.go @@ -0,0 +1,24 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// *** DISCLAIMER *** +// Config Connector's go-client for CRDs is currently in ALPHA, which means +// that future versions of the go-client may include breaking changes. +// Please try it out and give us feedback! + +// Code generated by main. DO NOT EDIT. + +package v1alpha1 + +type RedisClusterExpansion interface{} diff --git a/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1/redis_client.go b/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1/redis_client.go new file mode 100644 index 0000000000..6a8e6db769 --- /dev/null +++ b/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1/redis_client.go @@ -0,0 +1,110 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// *** DISCLAIMER *** +// Config Connector's go-client for CRDs is currently in ALPHA, which means +// that future versions of the go-client may include breaking changes. +// Please try it out and give us feedback! + +// Code generated by main. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/redis/v1alpha1" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type RedisV1alpha1Interface interface { + RESTClient() rest.Interface + RedisClustersGetter +} + +// RedisV1alpha1Client is used to interact with features provided by the redis.cnrm.cloud.google.com group. +type RedisV1alpha1Client struct { + restClient rest.Interface +} + +func (c *RedisV1alpha1Client) RedisClusters(namespace string) RedisClusterInterface { + return newRedisClusters(c, namespace) +} + +// NewForConfig creates a new RedisV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*RedisV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new RedisV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*RedisV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &RedisV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new RedisV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *RedisV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new RedisV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *RedisV1alpha1Client { + return &RedisV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *RedisV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1/rediscluster.go b/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1/rediscluster.go new file mode 100644 index 0000000000..7672bfe4d1 --- /dev/null +++ b/pkg/clients/generated/client/clientset/versioned/typed/redis/v1alpha1/rediscluster.go @@ -0,0 +1,198 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// *** DISCLAIMER *** +// Config Connector's go-client for CRDs is currently in ALPHA, which means +// that future versions of the go-client may include breaking changes. +// Please try it out and give us feedback! + +// Code generated by main. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/redis/v1alpha1" + scheme "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// RedisClustersGetter has a method to return a RedisClusterInterface. +// A group's client should implement this interface. +type RedisClustersGetter interface { + RedisClusters(namespace string) RedisClusterInterface +} + +// RedisClusterInterface has methods to work with RedisCluster resources. +type RedisClusterInterface interface { + Create(ctx context.Context, redisCluster *v1alpha1.RedisCluster, opts v1.CreateOptions) (*v1alpha1.RedisCluster, error) + Update(ctx context.Context, redisCluster *v1alpha1.RedisCluster, opts v1.UpdateOptions) (*v1alpha1.RedisCluster, error) + UpdateStatus(ctx context.Context, redisCluster *v1alpha1.RedisCluster, opts v1.UpdateOptions) (*v1alpha1.RedisCluster, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.RedisCluster, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RedisClusterList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RedisCluster, err error) + RedisClusterExpansion +} + +// redisClusters implements RedisClusterInterface +type redisClusters struct { + client rest.Interface + ns string +} + +// newRedisClusters returns a RedisClusters +func newRedisClusters(c *RedisV1alpha1Client, namespace string) *redisClusters { + return &redisClusters{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the redisCluster, and returns the corresponding redisCluster object, and an error if there is any. +func (c *redisClusters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RedisCluster, err error) { + result = &v1alpha1.RedisCluster{} + err = c.client.Get(). + Namespace(c.ns). + Resource("redisclusters"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RedisClusters that match those selectors. +func (c *redisClusters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RedisClusterList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.RedisClusterList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("redisclusters"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested redisClusters. +func (c *redisClusters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("redisclusters"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a redisCluster and creates it. Returns the server's representation of the redisCluster, and an error, if there is any. +func (c *redisClusters) Create(ctx context.Context, redisCluster *v1alpha1.RedisCluster, opts v1.CreateOptions) (result *v1alpha1.RedisCluster, err error) { + result = &v1alpha1.RedisCluster{} + err = c.client.Post(). + Namespace(c.ns). + Resource("redisclusters"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(redisCluster). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a redisCluster and updates it. Returns the server's representation of the redisCluster, and an error, if there is any. +func (c *redisClusters) Update(ctx context.Context, redisCluster *v1alpha1.RedisCluster, opts v1.UpdateOptions) (result *v1alpha1.RedisCluster, err error) { + result = &v1alpha1.RedisCluster{} + err = c.client.Put(). + Namespace(c.ns). + Resource("redisclusters"). + Name(redisCluster.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(redisCluster). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *redisClusters) UpdateStatus(ctx context.Context, redisCluster *v1alpha1.RedisCluster, opts v1.UpdateOptions) (result *v1alpha1.RedisCluster, err error) { + result = &v1alpha1.RedisCluster{} + err = c.client.Put(). + Namespace(c.ns). + Resource("redisclusters"). + Name(redisCluster.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(redisCluster). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the redisCluster and deletes it. Returns an error if one occurs. +func (c *redisClusters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("redisclusters"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *redisClusters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("redisclusters"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched redisCluster. +func (c *redisClusters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RedisCluster, err error) { + result = &v1alpha1.RedisCluster{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("redisclusters"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/controller/direct/maputils.go b/pkg/controller/direct/maputils.go index d11713407b..eb105a0bb8 100644 --- a/pkg/controller/direct/maputils.go +++ b/pkg/controller/direct/maputils.go @@ -17,6 +17,7 @@ package direct import ( "errors" "fmt" + "runtime" "strings" "time" @@ -34,6 +35,18 @@ func (c *MapContext) Errorf(msg string, args ...interface{}) { c.errs = append(c.errs, fmt.Errorf(msg, args...)) } +func (c *MapContext) NotImplemented() { + functionName := "?" + + pc, _, _, _ := runtime.Caller(1) + fn := runtime.FuncForPC(pc) + if fn != nil { + functionName = fn.Name() + } + + c.Errorf("function %q not implemented", functionName) +} + func (c *MapContext) Err() error { return errors.Join(c.errs...) } @@ -43,6 +56,32 @@ type ProtoEnum interface { Descriptor() protoreflect.EnumDescriptor } +func Slice_ToProto[T, U any](mapCtx *MapContext, in []T, mapper func(mapCtx *MapContext, in *T) *U) []*U { + if in == nil { + return nil + } + + outSlice := make([]*U, 0, len(in)) + for _, inItem := range in { + outItem := mapper(mapCtx, &inItem) + outSlice = append(outSlice, outItem) + } + return outSlice +} + +func Slice_FromProto[T, U any](mapCtx *MapContext, in []*T, mapper func(mapCtx *MapContext, in *T) *U) []U { + if in == nil { + return nil + } + + outSlice := make([]U, 0, len(in)) + for _, inItem := range in { + outItem := mapper(mapCtx, inItem) + outSlice = append(outSlice, *outItem) + } + return outSlice +} + func Enum_ToProto[U ProtoEnum](mapCtx *MapContext, in *string) U { var defaultU U descriptor := defaultU.Descriptor() diff --git a/pkg/controller/direct/redis/cluster/mapper.generated.go b/pkg/controller/direct/redis/cluster/mapper.generated.go new file mode 100644 index 0000000000..aa20d4d71b --- /dev/null +++ b/pkg/controller/direct/redis/cluster/mapper.generated.go @@ -0,0 +1,382 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cluster + +import ( + pb "cloud.google.com/go/redis/cluster/apiv1/clusterpb" + krm "github.com/GoogleCloudPlatform/k8s-config-connector/apis/redis/v1alpha1" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct" +) + +func CertificateAuthority_FromProto(mapCtx *direct.MapContext, in *pb.CertificateAuthority) *krm.CertificateAuthority { + if in == nil { + return nil + } + out := &krm.CertificateAuthority{} + out.ManagedServerCa = CertificateAuthority_ManagedCertificateAuthority_FromProto(mapCtx, in.GetManagedServerCa()) + out.Name = direct.LazyPtr(in.GetName()) + return out +} +func CertificateAuthority_ToProto(mapCtx *direct.MapContext, in *krm.CertificateAuthority) *pb.CertificateAuthority { + if in == nil { + return nil + } + out := &pb.CertificateAuthority{} + if oneof := CertificateAuthority_ManagedCertificateAuthority_ToProto(mapCtx, in.ManagedServerCa); oneof != nil { + out.ServerCa = &pb.CertificateAuthority_ManagedServerCa{ManagedServerCa: oneof} + } + out.Name = direct.ValueOf(in.Name) + return out +} +func CertificateAuthority_ManagedCertificateAuthority_FromProto(mapCtx *direct.MapContext, in *pb.CertificateAuthority_ManagedCertificateAuthority) *krm.CertificateAuthority_ManagedCertificateAuthority { + if in == nil { + return nil + } + out := &krm.CertificateAuthority_ManagedCertificateAuthority{} + out.CaCerts = direct.Slice_FromProto(mapCtx, in.CaCerts, CertificateAuthority_ManagedCertificateAuthority_CertChain_FromProto) + return out +} +func CertificateAuthority_ManagedCertificateAuthority_ToProto(mapCtx *direct.MapContext, in *krm.CertificateAuthority_ManagedCertificateAuthority) *pb.CertificateAuthority_ManagedCertificateAuthority { + if in == nil { + return nil + } + out := &pb.CertificateAuthority_ManagedCertificateAuthority{} + out.CaCerts = direct.Slice_ToProto(mapCtx, in.CaCerts, CertificateAuthority_ManagedCertificateAuthority_CertChain_ToProto) + return out +} +func CertificateAuthority_ManagedCertificateAuthority_CertChain_FromProto(mapCtx *direct.MapContext, in *pb.CertificateAuthority_ManagedCertificateAuthority_CertChain) *krm.CertificateAuthority_ManagedCertificateAuthority_CertChain { + if in == nil { + return nil + } + out := &krm.CertificateAuthority_ManagedCertificateAuthority_CertChain{} + out.Certificates = in.Certificates + return out +} +func CertificateAuthority_ManagedCertificateAuthority_CertChain_ToProto(mapCtx *direct.MapContext, in *krm.CertificateAuthority_ManagedCertificateAuthority_CertChain) *pb.CertificateAuthority_ManagedCertificateAuthority_CertChain { + if in == nil { + return nil + } + out := &pb.CertificateAuthority_ManagedCertificateAuthority_CertChain{} + out.Certificates = in.Certificates + return out +} +func Cluster_FromProto(mapCtx *direct.MapContext, in *pb.Cluster) *krm.Cluster { + if in == nil { + return nil + } + out := &krm.Cluster{} + out.Name = direct.LazyPtr(in.GetName()) + out.CreateTime = Cluster_CreateTime_FromProto(mapCtx, in.GetCreateTime()) + out.State = direct.Enum_FromProto(mapCtx, in.State) + out.Uid = direct.LazyPtr(in.GetUid()) + out.ReplicaCount = in.ReplicaCount + out.AuthorizationMode = direct.Enum_FromProto(mapCtx, in.AuthorizationMode) + out.TransitEncryptionMode = direct.Enum_FromProto(mapCtx, in.TransitEncryptionMode) + out.SizeGb = in.SizeGb + out.ShardCount = in.ShardCount + out.PscConfigs = direct.Slice_FromProto(mapCtx, in.PscConfigs, PscConfig_FromProto) + out.DiscoveryEndpoints = direct.Slice_FromProto(mapCtx, in.DiscoveryEndpoints, DiscoveryEndpoint_FromProto) + out.PscConnections = direct.Slice_FromProto(mapCtx, in.PscConnections, PscConnection_FromProto) + out.StateInfo = Cluster_StateInfo_FromProto(mapCtx, in.GetStateInfo()) + out.NodeType = direct.Enum_FromProto(mapCtx, in.NodeType) + out.PersistenceConfig = ClusterPersistenceConfig_FromProto(mapCtx, in.GetPersistenceConfig()) + out.RedisConfigs = in.RedisConfigs + out.PreciseSizeGb = in.PreciseSizeGb + out.ZoneDistributionConfig = ZoneDistributionConfig_FromProto(mapCtx, in.GetZoneDistributionConfig()) + out.DeletionProtectionEnabled = in.DeletionProtectionEnabled + return out +} +func Cluster_ToProto(mapCtx *direct.MapContext, in *krm.Cluster) *pb.Cluster { + if in == nil { + return nil + } + out := &pb.Cluster{} + out.Name = direct.ValueOf(in.Name) + out.CreateTime = Cluster_CreateTime_ToProto(mapCtx, in.CreateTime) + out.State = direct.Enum_ToProto[pb.Cluster_State](mapCtx, in.State) + out.Uid = direct.ValueOf(in.Uid) + out.ReplicaCount = in.ReplicaCount + out.AuthorizationMode = direct.Enum_ToProto[pb.AuthorizationMode](mapCtx, in.AuthorizationMode) + out.TransitEncryptionMode = direct.Enum_ToProto[pb.TransitEncryptionMode](mapCtx, in.TransitEncryptionMode) + out.SizeGb = in.SizeGb + out.ShardCount = in.ShardCount + out.PscConfigs = direct.Slice_ToProto(mapCtx, in.PscConfigs, PscConfig_ToProto) + out.DiscoveryEndpoints = direct.Slice_ToProto(mapCtx, in.DiscoveryEndpoints, DiscoveryEndpoint_ToProto) + out.PscConnections = direct.Slice_ToProto(mapCtx, in.PscConnections, PscConnection_ToProto) + out.StateInfo = Cluster_StateInfo_ToProto(mapCtx, in.StateInfo) + out.NodeType = direct.Enum_ToProto[pb.NodeType](mapCtx, in.NodeType) + out.PersistenceConfig = ClusterPersistenceConfig_ToProto(mapCtx, in.PersistenceConfig) + out.RedisConfigs = in.RedisConfigs + out.PreciseSizeGb = in.PreciseSizeGb + out.ZoneDistributionConfig = ZoneDistributionConfig_ToProto(mapCtx, in.ZoneDistributionConfig) + out.DeletionProtectionEnabled = in.DeletionProtectionEnabled + return out +} +func ClusterPersistenceConfig_FromProto(mapCtx *direct.MapContext, in *pb.ClusterPersistenceConfig) *krm.ClusterPersistenceConfig { + if in == nil { + return nil + } + out := &krm.ClusterPersistenceConfig{} + out.Mode = direct.Enum_FromProto(mapCtx, in.Mode) + out.RdbConfig = ClusterPersistenceConfig_RDBConfig_FromProto(mapCtx, in.GetRdbConfig()) + out.AofConfig = ClusterPersistenceConfig_AOFConfig_FromProto(mapCtx, in.GetAofConfig()) + return out +} +func ClusterPersistenceConfig_ToProto(mapCtx *direct.MapContext, in *krm.ClusterPersistenceConfig) *pb.ClusterPersistenceConfig { + if in == nil { + return nil + } + out := &pb.ClusterPersistenceConfig{} + out.Mode = direct.Enum_ToProto[pb.ClusterPersistenceConfig_PersistenceMode](mapCtx, in.Mode) + out.RdbConfig = ClusterPersistenceConfig_RDBConfig_ToProto(mapCtx, in.RdbConfig) + out.AofConfig = ClusterPersistenceConfig_AOFConfig_ToProto(mapCtx, in.AofConfig) + return out +} +func ClusterPersistenceConfig_AOFConfig_FromProto(mapCtx *direct.MapContext, in *pb.ClusterPersistenceConfig_AOFConfig) *krm.ClusterPersistenceConfig_AOFConfig { + if in == nil { + return nil + } + out := &krm.ClusterPersistenceConfig_AOFConfig{} + out.AppendFsync = direct.Enum_FromProto(mapCtx, in.AppendFsync) + return out +} +func ClusterPersistenceConfig_AOFConfig_ToProto(mapCtx *direct.MapContext, in *krm.ClusterPersistenceConfig_AOFConfig) *pb.ClusterPersistenceConfig_AOFConfig { + if in == nil { + return nil + } + out := &pb.ClusterPersistenceConfig_AOFConfig{} + out.AppendFsync = direct.Enum_ToProto[pb.ClusterPersistenceConfig_AOFConfig_AppendFsync](mapCtx, in.AppendFsync) + return out +} +func ClusterPersistenceConfig_RDBConfig_FromProto(mapCtx *direct.MapContext, in *pb.ClusterPersistenceConfig_RDBConfig) *krm.ClusterPersistenceConfig_RDBConfig { + if in == nil { + return nil + } + out := &krm.ClusterPersistenceConfig_RDBConfig{} + out.RdbSnapshotPeriod = direct.Enum_FromProto(mapCtx, in.RdbSnapshotPeriod) + out.RdbSnapshotStartTime = RDBConfig_RdbSnapshotStartTime_FromProto(mapCtx, in.GetRdbSnapshotStartTime()) + return out +} +func ClusterPersistenceConfig_RDBConfig_ToProto(mapCtx *direct.MapContext, in *krm.ClusterPersistenceConfig_RDBConfig) *pb.ClusterPersistenceConfig_RDBConfig { + if in == nil { + return nil + } + out := &pb.ClusterPersistenceConfig_RDBConfig{} + out.RdbSnapshotPeriod = direct.Enum_ToProto[pb.ClusterPersistenceConfig_RDBConfig_SnapshotPeriod](mapCtx, in.RdbSnapshotPeriod) + out.RdbSnapshotStartTime = RDBConfig_RdbSnapshotStartTime_ToProto(mapCtx, in.RdbSnapshotStartTime) + return out +} +func Cluster_StateInfo_FromProto(mapCtx *direct.MapContext, in *pb.Cluster_StateInfo) *krm.Cluster_StateInfo { + if in == nil { + return nil + } + out := &krm.Cluster_StateInfo{} + out.UpdateInfo = Cluster_StateInfo_UpdateInfo_FromProto(mapCtx, in.GetUpdateInfo()) + return out +} +func Cluster_StateInfo_ToProto(mapCtx *direct.MapContext, in *krm.Cluster_StateInfo) *pb.Cluster_StateInfo { + if in == nil { + return nil + } + out := &pb.Cluster_StateInfo{} + if oneof := Cluster_StateInfo_UpdateInfo_ToProto(mapCtx, in.UpdateInfo); oneof != nil { + out.Info = &pb.Cluster_StateInfo_UpdateInfo_{UpdateInfo: oneof} + } + return out +} +func Cluster_StateInfo_UpdateInfo_FromProto(mapCtx *direct.MapContext, in *pb.Cluster_StateInfo_UpdateInfo) *krm.Cluster_StateInfo_UpdateInfo { + if in == nil { + return nil + } + out := &krm.Cluster_StateInfo_UpdateInfo{} + out.TargetShardCount = in.TargetShardCount + out.TargetReplicaCount = in.TargetReplicaCount + return out +} +func Cluster_StateInfo_UpdateInfo_ToProto(mapCtx *direct.MapContext, in *krm.Cluster_StateInfo_UpdateInfo) *pb.Cluster_StateInfo_UpdateInfo { + if in == nil { + return nil + } + out := &pb.Cluster_StateInfo_UpdateInfo{} + out.TargetShardCount = in.TargetShardCount + out.TargetReplicaCount = in.TargetReplicaCount + return out +} +func DiscoveryEndpoint_FromProto(mapCtx *direct.MapContext, in *pb.DiscoveryEndpoint) *krm.DiscoveryEndpoint { + if in == nil { + return nil + } + out := &krm.DiscoveryEndpoint{} + out.Address = direct.LazyPtr(in.GetAddress()) + out.Port = direct.LazyPtr(in.GetPort()) + out.PscConfig = PscConfig_FromProto(mapCtx, in.GetPscConfig()) + return out +} +func DiscoveryEndpoint_ToProto(mapCtx *direct.MapContext, in *krm.DiscoveryEndpoint) *pb.DiscoveryEndpoint { + if in == nil { + return nil + } + out := &pb.DiscoveryEndpoint{} + out.Address = direct.ValueOf(in.Address) + out.Port = direct.ValueOf(in.Port) + out.PscConfig = PscConfig_ToProto(mapCtx, in.PscConfig) + return out +} +func PscConnection_FromProto(mapCtx *direct.MapContext, in *pb.PscConnection) *krm.PscConnection { + if in == nil { + return nil + } + out := &krm.PscConnection{} + out.PscConnectionID = direct.LazyPtr(in.GetPscConnectionId()) + out.Address = direct.LazyPtr(in.GetAddress()) + out.ForwardingRule = direct.LazyPtr(in.GetForwardingRule()) + out.ProjectID = direct.LazyPtr(in.GetProjectId()) + out.Network = direct.LazyPtr(in.GetNetwork()) + return out +} +func PscConnection_ToProto(mapCtx *direct.MapContext, in *krm.PscConnection) *pb.PscConnection { + if in == nil { + return nil + } + out := &pb.PscConnection{} + out.PscConnectionId = direct.ValueOf(in.PscConnectionID) + out.Address = direct.ValueOf(in.Address) + out.ForwardingRule = direct.ValueOf(in.ForwardingRule) + out.ProjectId = direct.ValueOf(in.ProjectID) + out.Network = direct.ValueOf(in.Network) + return out +} +func RedisClusterObservedState_FromProto(mapCtx *direct.MapContext, in *pb.Cluster) *krm.RedisClusterObservedState { + if in == nil { + return nil + } + out := &krm.RedisClusterObservedState{} + // MISSING: Name + out.CreateTime = Cluster_CreateTime_FromProto(mapCtx, in.GetCreateTime()) + out.State = direct.Enum_FromProto(mapCtx, in.State) + out.Uid = direct.LazyPtr(in.GetUid()) + // MISSING: ReplicaCount + // MISSING: AuthorizationMode + // MISSING: TransitEncryptionMode + out.SizeGb = in.SizeGb + // MISSING: ShardCount + // MISSING: PscConfigs + out.DiscoveryEndpoints = direct.Slice_FromProto(mapCtx, in.DiscoveryEndpoints, DiscoveryEndpoint_FromProto) + out.PscConnections = direct.Slice_FromProto(mapCtx, in.PscConnections, PscConnection_FromProto) + out.StateInfo = Cluster_StateInfo_FromProto(mapCtx, in.GetStateInfo()) + // MISSING: NodeType + // MISSING: PersistenceConfig + // MISSING: RedisConfigs + out.PreciseSizeGb = in.PreciseSizeGb + // MISSING: ZoneDistributionConfig + // MISSING: DeletionProtectionEnabled + return out +} +func RedisClusterObservedState_ToProto(mapCtx *direct.MapContext, in *krm.RedisClusterObservedState) *pb.Cluster { + if in == nil { + return nil + } + out := &pb.Cluster{} + // MISSING: Name + out.CreateTime = Cluster_CreateTime_ToProto(mapCtx, in.CreateTime) + out.State = direct.Enum_ToProto[pb.Cluster_State](mapCtx, in.State) + out.Uid = direct.ValueOf(in.Uid) + // MISSING: ReplicaCount + // MISSING: AuthorizationMode + // MISSING: TransitEncryptionMode + out.SizeGb = in.SizeGb + // MISSING: ShardCount + // MISSING: PscConfigs + out.DiscoveryEndpoints = direct.Slice_ToProto(mapCtx, in.DiscoveryEndpoints, DiscoveryEndpoint_ToProto) + out.PscConnections = direct.Slice_ToProto(mapCtx, in.PscConnections, PscConnection_ToProto) + out.StateInfo = Cluster_StateInfo_ToProto(mapCtx, in.StateInfo) + // MISSING: NodeType + // MISSING: PersistenceConfig + // MISSING: RedisConfigs + out.PreciseSizeGb = in.PreciseSizeGb + // MISSING: ZoneDistributionConfig + // MISSING: DeletionProtectionEnabled + return out +} +func RedisClusterSpec_FromProto(mapCtx *direct.MapContext, in *pb.Cluster) *krm.RedisClusterSpec { + if in == nil { + return nil + } + out := &krm.RedisClusterSpec{} + // MISSING: Name + // MISSING: CreateTime + // MISSING: State + // MISSING: Uid + out.ReplicaCount = in.ReplicaCount + out.AuthorizationMode = direct.Enum_FromProto(mapCtx, in.AuthorizationMode) + out.TransitEncryptionMode = direct.Enum_FromProto(mapCtx, in.TransitEncryptionMode) + // MISSING: SizeGb + out.ShardCount = in.ShardCount + out.PscConfigs = direct.Slice_FromProto(mapCtx, in.PscConfigs, PscConfig_FromProto) + // MISSING: DiscoveryEndpoints + // MISSING: PscConnections + // MISSING: StateInfo + out.NodeType = direct.Enum_FromProto(mapCtx, in.NodeType) + out.PersistenceConfig = ClusterPersistenceConfig_FromProto(mapCtx, in.GetPersistenceConfig()) + out.RedisConfigs = in.RedisConfigs + // MISSING: PreciseSizeGb + out.ZoneDistributionConfig = ZoneDistributionConfig_FromProto(mapCtx, in.GetZoneDistributionConfig()) + out.DeletionProtectionEnabled = in.DeletionProtectionEnabled + return out +} +func RedisClusterSpec_ToProto(mapCtx *direct.MapContext, in *krm.RedisClusterSpec) *pb.Cluster { + if in == nil { + return nil + } + out := &pb.Cluster{} + // MISSING: Name + // MISSING: CreateTime + // MISSING: State + // MISSING: Uid + out.ReplicaCount = in.ReplicaCount + out.AuthorizationMode = direct.Enum_ToProto[pb.AuthorizationMode](mapCtx, in.AuthorizationMode) + out.TransitEncryptionMode = direct.Enum_ToProto[pb.TransitEncryptionMode](mapCtx, in.TransitEncryptionMode) + // MISSING: SizeGb + out.ShardCount = in.ShardCount + out.PscConfigs = direct.Slice_ToProto(mapCtx, in.PscConfigs, PscConfig_ToProto) + // MISSING: DiscoveryEndpoints + // MISSING: PscConnections + // MISSING: StateInfo + out.NodeType = direct.Enum_ToProto[pb.NodeType](mapCtx, in.NodeType) + out.PersistenceConfig = ClusterPersistenceConfig_ToProto(mapCtx, in.PersistenceConfig) + out.RedisConfigs = in.RedisConfigs + // MISSING: PreciseSizeGb + out.ZoneDistributionConfig = ZoneDistributionConfig_ToProto(mapCtx, in.ZoneDistributionConfig) + out.DeletionProtectionEnabled = in.DeletionProtectionEnabled + return out +} +func ZoneDistributionConfig_FromProto(mapCtx *direct.MapContext, in *pb.ZoneDistributionConfig) *krm.ZoneDistributionConfig { + if in == nil { + return nil + } + out := &krm.ZoneDistributionConfig{} + out.Mode = direct.Enum_FromProto(mapCtx, in.Mode) + out.Zone = direct.LazyPtr(in.GetZone()) + return out +} +func ZoneDistributionConfig_ToProto(mapCtx *direct.MapContext, in *krm.ZoneDistributionConfig) *pb.ZoneDistributionConfig { + if in == nil { + return nil + } + out := &pb.ZoneDistributionConfig{} + out.Mode = direct.Enum_ToProto[pb.ZoneDistributionConfig_ZoneDistributionMode](mapCtx, in.Mode) + out.Zone = direct.ValueOf(in.Zone) + return out +} diff --git a/pkg/controller/direct/redis/cluster/mapper.go b/pkg/controller/direct/redis/cluster/mapper.go new file mode 100644 index 0000000000..bd4ad4da66 --- /dev/null +++ b/pkg/controller/direct/redis/cluster/mapper.go @@ -0,0 +1,79 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cluster + +import ( + "time" + + pb "cloud.google.com/go/redis/cluster/apiv1/clusterpb" + krm "github.com/GoogleCloudPlatform/k8s-config-connector/apis/redis/v1alpha1" + refs "github.com/GoogleCloudPlatform/k8s-config-connector/apis/refs/v1beta1" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func Cluster_CreateTime_FromProto(mapCtx *direct.MapContext, in *timestamppb.Timestamp) *string { + return Timestamp_FromProto(mapCtx, in) +} +func Cluster_CreateTime_ToProto(mapCtx *direct.MapContext, in *string) *timestamppb.Timestamp { + return Timestamp_ToProto(mapCtx, in) +} +func RDBConfig_RdbSnapshotStartTime_FromProto(mapCtx *direct.MapContext, in *timestamppb.Timestamp) *string { + return Timestamp_FromProto(mapCtx, in) +} +func RDBConfig_RdbSnapshotStartTime_ToProto(mapCtx *direct.MapContext, in *string) *timestamppb.Timestamp { + return Timestamp_ToProto(mapCtx, in) +} + +func Timestamp_FromProto(mapCtx *direct.MapContext, in *timestamppb.Timestamp) *string { + if in == nil { + return nil + } + t := in.AsTime() + s := t.Format(time.RFC3339Nano) + return &s +} +func Timestamp_ToProto(mapCtx *direct.MapContext, in *string) *timestamppb.Timestamp { + if in == nil { + return nil + } + t, err := time.Parse(time.RFC3339Nano, *in) + if err != nil { + mapCtx.Errorf("invalid timestamp %q", *in) + } + ts := timestamppb.New(t) + return ts +} + +func PscConfig_FromProto(mapCtx *direct.MapContext, in *pb.PscConfig) *krm.PscConfig { + if in == nil { + return nil + } + out := &krm.PscConfig{} + if in.Network != "" { + out.NetworkRef = &refs.ComputeNetworkRef{External: in.Network} + } + return out +} +func PscConfig_ToProto(mapCtx *direct.MapContext, in *krm.PscConfig) *pb.PscConfig { + if in == nil { + return nil + } + out := &pb.PscConfig{} + if in.NetworkRef != nil { + out.Network = in.NetworkRef.External + } + return out +} diff --git a/pkg/controller/direct/redis/cluster/roundtrip_test.go b/pkg/controller/direct/redis/cluster/roundtrip_test.go new file mode 100644 index 0000000000..f3b51862c8 --- /dev/null +++ b/pkg/controller/direct/redis/cluster/roundtrip_test.go @@ -0,0 +1,148 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cluster + +import ( + "math/rand" + "testing" + + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/controller/direct" + "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/test/fuzz" + "github.com/google/go-cmp/cmp" + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/testing/protocmp" + "k8s.io/apimachinery/pkg/util/sets" + + pb "cloud.google.com/go/redis/cluster/apiv1/clusterpb" +) + +func FuzzRedisClusterSpec(f *testing.F) { + f.Fuzz(func(t *testing.T, seed int64) { + randStream := rand.New(rand.NewSource(seed)) + + p1 := &pb.Cluster{} + fuzz.FillWithRandom(t, randStream, p1) + + // We don't expect output fields to round-trip + outputFields := sets.New(".etag") + + // A few fields are not implemented yet in KRM, don't test them + unimplementedFields := sets.New( + ".name", + ".labels", + ) + + // Status fields + unimplementedFields.Insert(".discovery_endpoints") + unimplementedFields.Insert(".uid") + unimplementedFields.Insert(".precise_size_gb") + unimplementedFields.Insert(".size_gb") + unimplementedFields.Insert(".state_info") + unimplementedFields.Insert(".create_time") + unimplementedFields.Insert(".state") + unimplementedFields.Insert(".psc_connections") + + // Remove any output only or known-unimplemented fields + clearFields := &fuzz.ClearFields{ + Paths: unimplementedFields.Union(outputFields), + } + fuzz.Visit("", p1.ProtoReflect(), nil, clearFields) + + r := &fuzz.ReplaceFields{} + r.Func = func(path string, val protoreflect.Value) (protoreflect.Value, bool) { + // TODO: Any values that must follow a pattern + return protoreflect.Value{}, false + } + fuzz.Visit("", p1.ProtoReflect(), nil, r) + + ctx := &direct.MapContext{} + k := RedisClusterSpec_FromProto(ctx, p1) + if ctx.Err() != nil { + t.Fatalf("error mapping from proto to krm: %v", ctx.Err()) + } + + p2 := RedisClusterSpec_ToProto(ctx, k) + if ctx.Err() != nil { + t.Fatalf("error mapping from krm to proto: %v", ctx.Err()) + } + + if diff := cmp.Diff(p1, p2, protocmp.Transform()); diff != "" { + t.Logf("p1 = %v", prototext.Format(p1)) + t.Logf("p2 = %v", prototext.Format(p2)) + t.Errorf("roundtrip failed; diff:\n%s", diff) + } + }) +} + +func FuzzRedisClusterObservedState(f *testing.F) { + f.Fuzz(func(t *testing.T, seed int64) { + randStream := rand.New(rand.NewSource(seed)) + + p1 := &pb.Cluster{} + fuzz.FillWithRandom(t, randStream, p1) + + // We don't expect output fields to round-trip + outputFields := sets.New(".etag") + + // A few fields are not implemented yet in KRM, don't test them + unimplementedFields := sets.New( + ".name", + ".labels", + ) + + // Spec fields + unimplementedFields.Insert(".persistence_config") + unimplementedFields.Insert(".psc_configs") + unimplementedFields.Insert(".zone_distribution_config") + unimplementedFields.Insert(".redis_configs") + unimplementedFields.Insert(".shard_count") + unimplementedFields.Insert(".transit_encryption_mode") + unimplementedFields.Insert(".node_type") + unimplementedFields.Insert(".authorization_mode") + unimplementedFields.Insert(".replica_count") + unimplementedFields.Insert(".deletion_protection_enabled") + + // Remove any output only or known-unimplemented fields + clearFields := &fuzz.ClearFields{ + Paths: unimplementedFields.Union(outputFields), + } + fuzz.Visit("", p1.ProtoReflect(), nil, clearFields) + + r := &fuzz.ReplaceFields{} + r.Func = func(path string, val protoreflect.Value) (protoreflect.Value, bool) { + // TODO: Any values that must follow a pattern + return protoreflect.Value{}, false + } + fuzz.Visit("", p1.ProtoReflect(), nil, r) + + ctx := &direct.MapContext{} + k := RedisClusterObservedState_FromProto(ctx, p1) + if ctx.Err() != nil { + t.Fatalf("error mapping from proto to krm: %v", ctx.Err()) + } + + p2 := RedisClusterObservedState_ToProto(ctx, k) + if ctx.Err() != nil { + t.Fatalf("error mapping from krm to proto: %v", ctx.Err()) + } + + if diff := cmp.Diff(p1, p2, protocmp.Transform()); diff != "" { + t.Logf("p1 = %v", prototext.Format(p1)) + t.Logf("p2 = %v", prototext.Format(p2)) + t.Errorf("roundtrip failed; diff:\n%s", diff) + } + }) +} diff --git a/pkg/test/fuzz/generate.go b/pkg/test/fuzz/generate.go index f291cdcc6a..91f55aed8e 100644 --- a/pkg/test/fuzz/generate.go +++ b/pkg/test/fuzz/generate.go @@ -46,6 +46,20 @@ func fillWithRandom0(t *testing.T, randStream *rand.Rand, msg protoreflect.Messa return } + if string(descriptor.FullName()) == "google.protobuf.Timestamp" { + count := randStream.Intn(10) + // Bias to zero + if count > 4 { + return + } + // Generate a "reasonable" timestamp + seconds := (1900 * 365 * 24 * 60 * 60) + randStream.Intn(400*365*24*60*60) + nanos := randStream.Intn(1000000000) + msg.Set(descriptor.Fields().ByName("seconds"), protoreflect.ValueOfInt32(int32(seconds))) + msg.Set(descriptor.Fields().ByName("nanos"), protoreflect.ValueOfInt32(int32(nanos))) + return + } + fields := descriptor.Fields() n := fields.Len() for i := 0; i < n; i++ {